Python源码示例:tensorflow.python.ops.state.assign_add()

示例1
def _update_t_cur_eta_t_v2(self, lr_t=None, var=None):  # tf.keras
    t_cur_update, eta_t_update = None, None  # in case not assigned

    # update `t_cur` if iterating last `(grad, var)`
    iteration_done = self._updates_processed == (self._updates_per_iter - 1)
    if iteration_done:
        t_cur_update = state_ops.assign_add(self.t_cur, 1,
                                            use_locking=self._use_locking)
        self._updates_processed = 0  # reset
    else:
        self._updates_processed += 1

    # Cosine annealing
    if self.use_cosine_annealing and iteration_done:
        # ensure eta_t is updated AFTER t_cur
        with ops.control_dependencies([t_cur_update]):
            eta_t_update = state_ops.assign(self.eta_t, _compute_eta_t(self),
                                            use_locking=self._use_locking)
        self.lr_t = lr_t * self.eta_t  # for external tracking

    return iteration_done, t_cur_update, eta_t_update 
示例2
def _model_builder(self):
    """Creates a model function."""

    def _model_fn(features, labels, mode):
      """Model function."""
      assert labels is None, labels
      (all_scores, model_predictions, losses, training_op) = gmm_ops.gmm(
          self._parse_tensor_or_dict(features), self._training_initial_clusters,
          self._num_clusters, self._random_seed, self._covariance_type,
          self._params)
      incr_step = state_ops.assign_add(variables.get_global_step(), 1)
      loss = math_ops.reduce_sum(losses)
      training_op = with_dependencies([training_op, incr_step], loss)
      predictions = {
          GMM.ALL_SCORES: all_scores[0],
          GMM.ASSIGNMENTS: model_predictions[0][0],
      }
      eval_metric_ops = {
          GMM.SCORES: _streaming_sum(loss),
      }
      return model_fn_lib.ModelFnOps(mode=mode, predictions=predictions,
                                     eval_metric_ops=eval_metric_ops,
                                     loss=loss, train_op=training_op)

    return _model_fn 
示例3
def setUp(self):
    self._tmp_dir = tempfile.mktemp()

    self.v = variables.Variable(10.0, name="v")
    self.delta = constant_op.constant(1.0, name="delta")
    self.inc_v = state_ops.assign_add(self.v, self.delta, name="inc_v")

    self.ph = array_ops.placeholder(dtypes.float32, name="ph")
    self.xph = array_ops.transpose(self.ph, name="xph")
    self.m = constant_op.constant(
        [[0.0, 1.0, 2.0], [-4.0, -1.0, 0.0]], dtype=dtypes.float32, name="m")
    self.y = math_ops.matmul(self.m, self.xph, name="y")

    self.sess = session.Session()

    # Initialize variable.
    self.sess.run(self.v.initializer) 
示例4
def testRunsUnderDebugMode(self):
    # Test command sequence: run; run; run;
    wrapped_sess = LocalCLIDebuggerWrapperSessionForTest(
        [[], [], []], self.sess, dump_root=self._tmp_dir)

    # run under debug mode twice.
    wrapped_sess.run(self.inc_v)
    wrapped_sess.run(self.inc_v)

    # Verify that the assign_add op did take effect.
    self.assertAllClose(12.0, self.sess.run(self.v))

    # Assert correct run call numbers for which the CLI has been launched at
    # run-start and run-end.
    self.assertEqual([1], wrapped_sess.observers["run_start_cli_run_numbers"])
    self.assertEqual([1, 2], wrapped_sess.observers["run_end_cli_run_numbers"])

    # Verify that the dumps have been generated and picked up during run-end.
    self.assertEqual(2, len(wrapped_sess.observers["debug_dumps"]))

    # Verify that the TensorFlow runtime errors are picked up and in this case,
    # they should be both None.
    self.assertEqual([None, None], wrapped_sess.observers["tf_errors"]) 
示例5
def _increase_eval_step_op(iterations_per_loop):
  """Returns an op to increase the eval step for TPU evaluation.

  Args:
    iterations_per_loop: Tensor. The number of eval steps running in TPU system
      before returning to CPU host for each `Session.run`.

  Returns:
    An operation
  """
  eval_step = evaluation._get_or_create_eval_step()  # pylint: disable=protected-access
  # Estimator evaluate increases 1 by default. So, we increase the difference.
  return state_ops.assign_add(
      eval_step,
      math_ops.cast(iterations_per_loop - 1, dtype=eval_step.dtype),
      use_locking=True) 
示例6
def testDecay(self):
    initial_lr = 0.1
    k = 10
    decay_rate = 0.96
    step = gen_state_ops._variable(shape=[], dtype=dtypes.int32, 
        name="step", container="", shared_name="")
    assign_step = state_ops.assign(step, 0)
    increment_step = state_ops.assign_add(step, 1)
    decayed_lr = learning_rate_decay.natural_exp_decay(initial_lr, step,
                                                       k, decay_rate)
    with self.test_session():
      assign_step.op.run()
      for i in range(k+1):
        expected = initial_lr * math.exp(-i / k * decay_rate)
        self.assertAllClose(decayed_lr.eval(), expected, 1e-6)
        increment_step.op.run() 
示例7
def testStaircase(self):
    initial_lr = 0.1
    k = 10
    decay_rate = 0.96
    step = gen_state_ops._variable(shape=[], dtype=dtypes.int32, 
        name="step", container="", shared_name="")
    assign_step = state_ops.assign(step, 0)
    increment_step = state_ops.assign_add(step, 1)
    decayed_lr = learning_rate_decay.natural_exp_decay(initial_lr,
                                                       step,
                                                       k,
                                                       decay_rate,
                                                       staircase=True)
    with self.test_session():
      assign_step.op.run()
      for i in range(k+1):
        expected = initial_lr * math.exp(-decay_rate * (i // k))
        self.assertAllClose(decayed_lr.eval(), expected, 1e-6)
        increment_step.op.run() 
示例8
def testDecay(self):
    initial_lr = 0.1
    k = 10
    decay_rate = 0.96
    step = gen_state_ops._variable(shape=[], dtype=dtypes.int32, 
        name="step", container="", shared_name="")    
    assign_step = state_ops.assign(step, 0)
    increment_step = state_ops.assign_add(step, 1)
    decayed_lr = learning_rate_decay.inverse_time_decay(initial_lr,
                                                        step,
                                                        k,
                                                        decay_rate)
    with self.test_session():
      assign_step.op.run()
      for i in range(k+1):
        expected = initial_lr / (1 + i / k * decay_rate)
        self.assertAllClose(decayed_lr.eval(), expected, 1e-6)
        increment_step.op.run() 
示例9
def testStaircase(self):
    initial_lr = 0.1
    k = 10
    decay_rate = 0.96
    step = gen_state_ops._variable(shape=[], dtype=dtypes.int32, 
        name="step", container="", shared_name="")
    assign_step = state_ops.assign(step, 0)
    increment_step = state_ops.assign_add(step, 1)
    decayed_lr = learning_rate_decay.inverse_time_decay(initial_lr,
                                                        step,
                                                        k,
                                                        decay_rate,
                                                        staircase=True)
    with self.test_session():
      assign_step.op.run()
      for i in range(k+1):
        expected = initial_lr / (1 + decay_rate * (i // k))
        self.assertAllClose(decayed_lr.eval(), expected, 1e-6)
        increment_step.op.run() 
示例10
def _get_train_ops(self, features, labels):
    """See base class."""

    features = self._get_feature_dict(features)
    features, labels = self._feature_engineering_fn(features, labels)
    logits = self._logits(features, is_training=True)

    def _make_training_op(training_loss):
      global_step = contrib_variables.get_global_step()
      assert global_step

      linear_train_step = self._linear_model.get_train_step(training_loss)
      dnn_train_step = (self._dnn_model.get_train_step(training_loss) if
                        self._dnn_model else [])
      with ops.control_dependencies(linear_train_step + dnn_train_step):
        with ops.get_default_graph().colocate_with(global_step):
          return state_ops.assign_add(global_step, 1).op

    model_fn_ops = self._head.head_ops(features, labels,
                                       estimator.ModeKeys.TRAIN,
                                       _make_training_op,
                                       logits=logits)
    return model_fn_ops.training_op, model_fn_ops.loss 
示例11
def _increase_eval_step_op(iterations_per_loop):
  """Returns an op to increase the eval step for TPU evaluation.

  Args:
    iterations_per_loop: Tensor. The number of eval steps running in TPU system
      before returning to CPU host for each `Session.run`.

  Returns:
    An operation
  """
  eval_step = evaluation._get_or_create_eval_step()  # pylint: disable=protected-access
  # Estimator evaluate increases 1 by default. So, we increase the difference.
  return state_ops.assign_add(
      eval_step,
      math_ops.cast(iterations_per_loop - 1, dtype=eval_step.dtype),
      use_locking=True) 
示例12
def _increase_eval_step_op(iterations_per_loop):
  """Returns an op to increase the eval step for TPU evaluation.

  Args:
    iterations_per_loop: Tensor. The number of eval steps running in TPU
        system before returning to CPU host for each `Session.run`.

  Returns:
    An operation
  """
  eval_step = evaluation._get_or_create_eval_step()  # pylint: disable=protected-access
  # Estimator evaluate increases 1 by default. So, we increase the difference.
  return state_ops.assign_add(
      eval_step,
      math_ops.cast(iterations_per_loop - 1, dtype=eval_step.dtype),
      use_locking=True) 
示例13
def _update_confusion_matrix(pred_begin, pred_end, gold_begin, gold_end):
  """Updates internal variables of the confusion matrix."""
  with ops.name_scope("UpdateConfusionMatrix"):
    total_true_pos = metrics_impl.metric_variable([],
                                                  dtypes.int32,
                                                  name="total_true_pos")
    total_false_pos = metrics_impl.metric_variable([],
                                                   dtypes.int32,
                                                   name="total_false_pos")
    total_false_neg = metrics_impl.metric_variable([],
                                                   dtypes.int32,
                                                   name="total_false_neg")

    num_gold = ragged_array_ops.size(gold_begin)
    num_pred = ragged_array_ops.size(pred_begin)
    tp = calculate_true_positive(pred_begin, pred_end, gold_begin, gold_end)
    fp = num_pred - tp
    fn = num_gold - tp
    tp_op = state_ops.assign_add(total_true_pos, tp)
    fp_op = state_ops.assign_add(total_false_pos, fp)
    fn_op = state_ops.assign_add(total_false_neg, fn)
    return (total_true_pos, total_false_pos,
            total_false_neg), control_flow_ops.group(tp_op, fp_op, fn_op) 
示例14
def update_state(self, prediction_begin, prediction_end, label_begin,
                   label_end):
    """Updates metric given prediction and labelled spans.

    Args:
      prediction_begin: A `RaggedTensor` w/ `ragged_rank`=1 of type int64. This
        contains the starting positions of the predicted spans.
      prediction_end: A `RaggedTensor` w/ `ragged_rank`=1 of type int64. This
        contains the ending positions of the predicted spans.
      label_begin: A `RaggedTensor` w/ `ragged_rank`=1 of type int64. This
        contains the starting positions of the golden labelled spans.
      label_end: A `RaggedTensor` w/ `ragged_rank`=1 of type int64. This
        contains the ending positions of the golden labelled spans.
    """
    tp = math_ops.cast(
        calculate_true_positive(prediction_begin, prediction_end, label_begin,
                                label_end), dtypes.float32)
    num_pred = math_ops.cast(
        ragged_array_ops.size(prediction_begin), dtypes.float32)
    num_gold = math_ops.cast(ragged_array_ops.size(label_begin), dtypes.float32)
    fp = num_pred - tp
    fn = num_gold - tp
    self.true_positive.assign_add(tp)
    self.false_positive.assign_add(fp)
    self.false_negative.assign_add(fn) 
示例15
def _increase_eval_step_op(iterations_per_loop):
  """Returns an op to increase the eval step for TPU evaluation.

  Args:
    iterations_per_loop: Tensor. The number of eval steps running in TPU system
      before returning to CPU host for each `Session.run`.

  Returns:
    An operation
  """
  eval_step = evaluation._get_or_create_eval_step()  # pylint: disable=protected-access
  # Estimator evaluate increases 1 by default. So, we increase the difference.
  return state_ops.assign_add(
      eval_step,
      math_ops.cast(iterations_per_loop - 1, dtype=eval_step.dtype),
      use_locking=True) 
示例16
def setUp(self):
    self._tmp_dir = tempfile.mktemp()

    self.v = variables.Variable(10.0, name="v")
    self.delta = constant_op.constant(1.0, name="delta")
    self.inc_v = state_ops.assign_add(self.v, self.delta, name="inc_v")

    self.ph = array_ops.placeholder(dtypes.float32, name="ph")
    self.xph = array_ops.transpose(self.ph, name="xph")
    self.m = constant_op.constant(
        [[0.0, 1.0, 2.0], [-4.0, -1.0, 0.0]], dtype=dtypes.float32, name="m")
    self.y = math_ops.matmul(self.m, self.xph, name="y")

    self.sess = session.Session()

    # Initialize variable.
    self.sess.run(self.v.initializer) 
示例17
def _IncrementCounter(self, counter):
    return state_ops.assign_add(counter, 1, use_locking=True) 
示例18
def _update_t_cur_eta_t(self):  # keras
    self.updates.append(state_ops.assign_add(self.t_cur, 1))
    # Cosine annealing
    if self.use_cosine_annealing:
        # ensure eta_t is updated AFTER t_cur
        with ops.control_dependencies([self.updates[-1]]):
            self.updates.append(state_ops.assign(self.eta_t,
                                                 _compute_eta_t(self))) 
示例19
def _apply_dense(self, grad, var):
    # Calculates the preconditioner statistics for each tensor.
    partitioned_grads = TensorPartitioner.partition_tensor(
        grad, self._partition_info)
    shape = var.get_shape()
    fallback_to_diagonal = self._fallback_to_diagonal_for_shape(shape)

    precond_statistics_update = []
    if not fallback_to_diagonal:
      precond_statistics_update = self._updated_statistics(
          var, partitioned_grads)

    accumulator = self.get_slot(var, "accumulator")
    accumulator_updated = state_ops.assign_add(accumulator, grad * grad)
    accumulator_inv_sqrt = math_ops.rsqrt(accumulator_updated + 1e-30)
    if self._momentum > 0.0:
      scaled_g = (1.0 - self._momentum_tensor) * (grad * accumulator_inv_sqrt)
      gbar = self.get_slot(var, "momentum")
      gbar_updated = state_ops.assign_add(
          gbar,
          gbar * (self._momentum_tensor - 1.0) + scaled_g)
    else:
      gbar_updated = (grad * accumulator_inv_sqrt)

    if not fallback_to_diagonal:
      # Update the preconditioner statistics followed by computing the
      # preconditioned gradient.
      with ops.control_dependencies(precond_statistics_update):
        s = tf.cast(self._run_nondiagonal_update, tf.float32)
        preconditioned_grad = self._preconditioned_update(
            var, partitioned_grads, gbar_updated)
        # slowly adapt from diagonal to preconditioned gradient.
        w = self._run_nondiagonal_update_warmup
        warmup_update = s * self._learning_rate_tensor * (
            w * preconditioned_grad + (1.0 - w) * gbar_updated)
        fallback_update = (1 - s) * (self._learning_rate_tensor * gbar_updated)
        return state_ops.assign_sub(var, warmup_update + fallback_update)
    else:
      return state_ops.assign_sub(var,
                                  self._learning_rate_tensor * gbar_updated) 
示例20
def streaming_tp_fp_arrays(num_gbboxes, tp, fp, 
                           metrics_collections=None,
                           updates_collections=None,
                           name=None):
    """Streaming computation of True and False Positive arrays. 
    """
    with variable_scope.variable_scope(name, 'streaming_tp_fp',
                                       [num_gbboxes, tp, fp]):
        num_gbboxes = tf.cast(num_gbboxes, tf.int32)
        tp = tf.cast(tp, tf.bool)
        fp = tf.cast(fp, tf.bool)
        # Reshape TP and FP tensors and clean away 0 class values.
        tp = tf.reshape(tp, [-1])
        fp = tf.reshape(fp, [-1])

        # Local variables accumlating information over batches.
        v_num_objects = _create_local('v_num_gbboxes', shape=[], dtype=tf.int32)
        v_tp = _create_local('v_tp', shape=[0, ], dtype=tf.bool)
        v_fp = _create_local('v_fp', shape=[0, ], dtype=tf.bool)
        

        # Update operations.
        num_objects_op = state_ops.assign_add(v_num_objects,
                                           tf.reduce_sum(num_gbboxes))
        tp_op = state_ops.assign(v_tp, tf.concat([v_tp, tp], axis=0),
                                 validate_shape=False)
        fp_op = state_ops.assign(v_fp, tf.concat([v_fp, fp], axis=0),
                                 validate_shape=False)

        # Value and update ops.
        val = (v_num_objects, v_tp, v_fp)
        with ops.control_dependencies([num_objects_op, tp_op, fp_op]):
            update_op = (num_objects_op, tp_op, fp_op)

        return val, update_op 
示例21
def _streaming_sum(scalar_tensor):
  """Create a sum metric and update op."""
  sum_metric = framework.local_variable(constant_op.constant(0.0))
  sum_update = sum_metric.assign_add(scalar_tensor)
  return sum_metric, sum_update 
示例22
def update_add(x, increment):
  return state_ops.assign_add(x, increment) 
示例23
def _kmeans_clustering_model_fn(features, labels, mode, params, config):
  """Model function for KMeansClustering estimator."""
  assert labels is None, labels
  (all_scores, model_predictions, losses,
   is_initialized, init_op, training_op) = clustering_ops.KMeans(
       _parse_tensor_or_dict(features),
       params.get('num_clusters'),
       initial_clusters=params.get('training_initial_clusters'),
       distance_metric=params.get('distance_metric'),
       use_mini_batch=params.get('use_mini_batch'),
       mini_batch_steps_per_iteration=params.get(
           'mini_batch_steps_per_iteration'),
       random_seed=params.get('random_seed'),
       kmeans_plus_plus_num_retries=params.get(
           'kmeans_plus_plus_num_retries')).training_graph()
  incr_step = state_ops.assign_add(variables.get_global_step(), 1)
  loss = math_ops.reduce_sum(losses, name=KMeansClustering.LOSS_OP_NAME)
  summary.scalar('loss/raw', loss)
  training_op = with_dependencies([training_op, incr_step], loss)
  predictions = {
      KMeansClustering.ALL_SCORES: all_scores[0],
      KMeansClustering.CLUSTER_IDX: model_predictions[0],
  }
  eval_metric_ops = {KMeansClustering.SCORES: loss}
  training_hooks = [_InitializeClustersHook(
      init_op, is_initialized, config.is_chief)]
  relative_tolerance = params.get('relative_tolerance')
  if relative_tolerance is not None:
    training_hooks.append(_LossRelativeChangeHook(relative_tolerance))
  return ModelFnOps(
      mode=mode,
      predictions=predictions,
      eval_metric_ops=eval_metric_ops,
      loss=loss,
      train_op=training_op,
      training_hooks=training_hooks)


# TODO(agarwal,ands): support sharded input. 
示例24
def setUp(self):
    self.session_root = tempfile.mkdtemp()

    self.v = variables.Variable(10.0, dtype=dtypes.float32, name="v")
    self.delta = constant_op.constant(1.0, dtype=dtypes.float32, name="delta")
    self.eta = constant_op.constant(-1.4, dtype=dtypes.float32, name="eta")
    self.inc_v = state_ops.assign_add(self.v, self.delta, name="inc_v")
    self.dec_v = state_ops.assign_add(self.v, self.eta, name="dec_v")

    self.ph = array_ops.placeholder(dtypes.float32, shape=(), name="ph")
    self.inc_w_ph = state_ops.assign_add(self.v, self.ph, name="inc_w_ph")

    self.sess = session.Session()
    self.sess.run(self.v.initializer) 
示例25
def assign_add(self, delta, use_locking=False):
    """Adds a value to this variable.

     This is essentially a shortcut for `assign_add(self, delta)`.

    Args:
      delta: A `Tensor`. The value to add to this variable.
      use_locking: If `True`, use locking during the operation.

    Returns:
      A `Tensor` that will hold the new value of this variable after
      the addition has completed.
    """
    return state_ops.assign_add(self._variable, delta, use_locking=use_locking) 
示例26
def _streaming_sum(scalar_tensor):
  """Create a sum metric and update op."""
  sum_metric = framework.local_variable(constant_op.constant(0.0))
  sum_update = sum_metric.assign_add(scalar_tensor)
  return sum_metric, sum_update 
示例27
def _get_train_ops(self, features, _):
    (_, _, losses, training_op) = gmm_ops.gmm(
        self._parse_tensor_or_dict(features), self._training_initial_clusters,
        self._num_clusters, self._random_seed, self._covariance_type,
        self._params)
    incr_step = state_ops.assign_add(variables.get_global_step(), 1)
    loss = math_ops.reduce_sum(losses)
    training_op = with_dependencies([training_op, incr_step], loss)
    return training_op, loss 
示例28
def _get_model_function(self):
    """Creates a model function."""

    def _model_fn(features, labels, mode):
      """Model function."""
      assert labels is None, labels
      (all_scores, model_predictions, losses,
       training_op) = clustering_ops.KMeans(
           self._parse_tensor_or_dict(features),
           self._num_clusters,
           self._training_initial_clusters,
           self._distance_metric,
           self._use_mini_batch,
           random_seed=self._random_seed,
           kmeans_plus_plus_num_retries=self.
           _kmeans_plus_plus_num_retries).training_graph()
      incr_step = state_ops.assign_add(variables.get_global_step(), 1)
      loss = math_ops.reduce_sum(losses, name=KMeansClustering.LOSS_OP_NAME)
      logging_ops.scalar_summary('loss/raw', loss)
      training_op = with_dependencies([training_op, incr_step], loss)
      predictions = {
          KMeansClustering.ALL_SCORES: all_scores[0],
          KMeansClustering.CLUSTER_IDX: model_predictions[0],
      }
      eval_metric_ops = {KMeansClustering.SCORES: loss,}
      if self._relative_tolerance is not None:
        training_hooks = [self.LossRelativeChangeHook(self._relative_tolerance)]
      else:
        training_hooks = None
      return ModelFnOps(
          mode=mode,
          predictions=predictions,
          eval_metric_ops=eval_metric_ops,
          loss=loss,
          train_op=training_op,
          training_hooks=training_hooks)

    return _model_fn 
示例29
def begin(self):
    self._global_step_tensor = training_util.get_global_step()
    if self._global_step_tensor is None:
      raise RuntimeError("Global step should be created to use UpdateGlobalStepHook.")
    ops.get_default_graph()._unsafe_unfinalize()
    self._updated_global_step = state_ops.assign_add(self._global_step_tensor, 1, use_locking=True) 
示例30
def _IncrementCounter(self, counter):
    return state_ops.assign_add(counter, 1, use_locking=True)