Python源码示例:syntaxnet.util.check.Gt()

示例1
def create_array(self, stride):
    """Creates a new tensor array to store this layer's activations.

    Arguments:
      stride: Possibly dynamic batch * beam size with which to initialize the
        tensor array

    Returns:
      TensorArray object
    """
    check.Gt(self.dim, 0, 'Cannot create array when dimension is dynamic')
    tensor_array = ta.TensorArray(dtype=tf.float32,
                                  size=0,
                                  dynamic_size=True,
                                  clear_after_read=False,
                                  infer_shape=False,
                                  name='%s_array' % self.name)

    # Start each array with all zeros. Special values will still be learned via
    # the extra embedding dimension stored for each linked feature channel.
    initial_value = tf.zeros([stride, self.dim])
    return tensor_array.write(0, initial_value) 
示例2
def __init__(self, component):
    """Initializes weights and layers.

    Args:
      component: Parent ComponentBuilderBase object.
    """
    super(SplitNetwork, self).__init__(component)

    parameters = component.spec.network_unit.parameters
    self._num_slices = int(parameters['num_slices'])
    check.Gt(self._num_slices, 0, 'Invalid number of slices.')
    check.Eq(self._concatenated_input_dim % self._num_slices, 0,
             'Input dimension %s does not evenly divide into %s slices' %
             (self._concatenated_input_dim, self._num_slices))
    self._slice_dim = int(self._concatenated_input_dim / self._num_slices)

    for slice_index in xrange(self._num_slices):
      self._layers.append(
          Layer(self, 'slice_%s' % slice_index, self._slice_dim)) 
示例3
def create_array(self, stride):
    """Creates a new tensor array to store this layer's activations.

    Arguments:
      stride: Possibly dynamic batch * beam size with which to initialize the
        tensor array

    Returns:
      TensorArray object
    """
    check.Gt(self.dim, 0, 'Cannot create array when dimension is dynamic')
    tensor_array = ta.TensorArray(
        dtype=tf.float32,
        size=0,
        dynamic_size=True,
        clear_after_read=False,
        infer_shape=False,
        name='%s_array' % self.name)

    # Start each array with all zeros. Special values will still be learned via
    # the extra embedding dimension stored for each linked feature channel.
    initial_value = tf.zeros([stride, self.dim])
    return tensor_array.write(0, initial_value) 
示例4
def __init__(self, component):
    """Initializes weights and layers.

    Args:
      component: Parent ComponentBuilderBase object.
    """
    super(SplitNetwork, self).__init__(component)

    parameters = component.spec.network_unit.parameters
    self._num_slices = int(parameters['num_slices'])
    check.Gt(self._num_slices, 0, 'Invalid number of slices.')
    check.Eq(self._concatenated_input_dim % self._num_slices, 0,
             'Input dimension %s does not evenly divide into %s slices' %
             (self._concatenated_input_dim, self._num_slices))
    self._slice_dim = int(self._concatenated_input_dim / self._num_slices)

    for slice_index in xrange(self._num_slices):
      self._layers.append(
          Layer(component, 'slice_%s' % slice_index, self._slice_dim)) 
示例5
def create_array(self, stride):
    """Creates a new tensor array to store this layer's activations.

    Arguments:
      stride: Possibly dynamic batch * beam size with which to initialize the
        tensor array

    Returns:
      TensorArray object
    """
    check.Gt(self.dim, 0, 'Cannot create array when dimension is dynamic')
    tensor_array = ta.TensorArray(
        dtype=tf.float32,
        size=0,
        dynamic_size=True,
        clear_after_read=False,
        infer_shape=False,
        name='%s_array' % self.name)

    # Start each array with all zeros. Special values will still be learned via
    # the extra embedding dimension stored for each linked feature channel.
    initial_value = tf.zeros([stride, self.dim])
    return tensor_array.write(0, initial_value) 
示例6
def __init__(self, component):
    """Initializes weights and layers.

    Args:
      component: Parent ComponentBuilderBase object.
    """
    super(SplitNetwork, self).__init__(component)

    parameters = component.spec.network_unit.parameters
    self._num_slices = int(parameters['num_slices'])
    check.Gt(self._num_slices, 0, 'Invalid number of slices.')
    check.Eq(self._concatenated_input_dim % self._num_slices, 0,
             'Input dimension %s does not evenly divide into %s slices' %
             (self._concatenated_input_dim, self._num_slices))
    self._slice_dim = int(self._concatenated_input_dim / self._num_slices)

    for slice_index in xrange(self._num_slices):
      self._layers.append(
          Layer(component, 'slice_%s' % slice_index, self._slice_dim)) 
示例7
def create_array(self, stride):
    """Creates a new tensor array to store this layer's activations.

    Arguments:
      stride: Possibly dynamic batch * beam size with which to initialize the
        tensor array

    Returns:
      TensorArray object
    """
    check.Gt(self.dim, 0, 'Cannot create array when dimension is dynamic')
    tensor_array = ta.TensorArray(dtype=tf.float32,
                                  size=0,
                                  dynamic_size=True,
                                  clear_after_read=False,
                                  infer_shape=False,
                                  name='%s_array' % self.name)

    # Start each array with all zeros. Special values will still be learned via
    # the extra embedding dimension stored for each linked feature channel.
    initial_value = tf.zeros([stride, self.dim])
    return tensor_array.write(0, initial_value) 
示例8
def __init__(self, component):
    """Initializes weights and layers.

    Args:
      component: Parent ComponentBuilderBase object.
    """
    super(SplitNetwork, self).__init__(component)

    parameters = component.spec.network_unit.parameters
    self._num_slices = int(parameters['num_slices'])
    check.Gt(self._num_slices, 0, 'Invalid number of slices.')
    check.Eq(self._concatenated_input_dim % self._num_slices, 0,
             'Input dimension %s does not evenly divide into %s slices' %
             (self._concatenated_input_dim, self._num_slices))
    self._slice_dim = int(self._concatenated_input_dim / self._num_slices)

    for slice_index in xrange(self._num_slices):
      self._layers.append(
          Layer(self, 'slice_%s' % slice_index, self._slice_dim)) 
示例9
def create_array(self, stride):
    """Creates a new tensor array to store this layer's activations.

    Arguments:
      stride: Possibly dynamic batch * beam size with which to initialize the
        tensor array

    Returns:
      TensorArray object
    """
    check.Gt(self.dim, 0, 'Cannot create array when dimension is dynamic')
    tensor_array = ta.TensorArray(dtype=tf.float32,
                                  size=0,
                                  dynamic_size=True,
                                  clear_after_read=False,
                                  infer_shape=False,
                                  name='%s_array' % self.name)

    # Start each array with all zeros. Special values will still be learned via
    # the extra embedding dimension stored for each linked feature channel.
    initial_value = tf.zeros([stride, self.dim])
    return tensor_array.write(0, initial_value) 
示例10
def __init__(self, component):
    """Initializes weights and layers.

    Args:
      component: Parent ComponentBuilderBase object.
    """
    super(SplitNetwork, self).__init__(component)

    parameters = component.spec.network_unit.parameters
    self._num_slices = int(parameters['num_slices'])
    check.Gt(self._num_slices, 0, 'Invalid number of slices.')
    check.Eq(self._concatenated_input_dim % self._num_slices, 0,
             'Input dimension %s does not evenly divide into %s slices' %
             (self._concatenated_input_dim, self._num_slices))
    self._slice_dim = int(self._concatenated_input_dim / self._num_slices)

    for slice_index in xrange(self._num_slices):
      self._layers.append(
          Layer(self, 'slice_%s' % slice_index, self._slice_dim)) 
示例11
def create_array(self, stride):
    """Creates a new tensor array to store this layer's activations.

    Arguments:
      stride: Possibly dynamic batch * beam size with which to initialize the
        tensor array

    Returns:
      TensorArray object
    """
    check.Gt(self.dim, 0, 'Cannot create array when dimension is dynamic')
    tensor_array = ta.TensorArray(
        dtype=tf.float32,
        size=0,
        dynamic_size=True,
        clear_after_read=False,
        infer_shape=False,
        name='%s_array' % self.name)

    # Start each array with all zeros. Special values will still be learned via
    # the extra embedding dimension stored for each linked feature channel.
    initial_value = tf.zeros([stride, self.dim])
    return tensor_array.write(0, initial_value) 
示例12
def __init__(self, component):
    """Initializes weights and layers.

    Args:
      component: Parent ComponentBuilderBase object.
    """
    super(SplitNetwork, self).__init__(component)

    parameters = component.spec.network_unit.parameters
    self._num_slices = int(parameters['num_slices'])
    check.Gt(self._num_slices, 0, 'Invalid number of slices.')
    check.Eq(self._concatenated_input_dim % self._num_slices, 0,
             'Input dimension %s does not evenly divide into %s slices' %
             (self._concatenated_input_dim, self._num_slices))
    self._slice_dim = int(self._concatenated_input_dim / self._num_slices)

    for slice_index in xrange(self._num_slices):
      self._layers.append(
          Layer(component, 'slice_%s' % slice_index, self._slice_dim)) 
示例13
def create_array(self, stride):
    """Creates a new tensor array to store this layer's activations.

    Arguments:
      stride: Possibly dynamic batch * beam size with which to initialize the
        tensor array

    Returns:
      TensorArray object
    """
    check.Gt(self.dim, 0, 'Cannot create array when dimension is dynamic')
    tensor_array = ta.TensorArray(dtype=tf.float32,
                                  size=0,
                                  dynamic_size=True,
                                  clear_after_read=False,
                                  infer_shape=False,
                                  name='%s_array' % self.name)

    # Start each array with all zeros. Special values will still be learned via
    # the extra embedding dimension stored for each linked feature channel.
    initial_value = tf.zeros([stride, self.dim])
    return tensor_array.write(0, initial_value) 
示例14
def __init__(self, component):
    """Initializes weights and layers.

    Args:
      component: Parent ComponentBuilderBase object.
    """
    super(SplitNetwork, self).__init__(component)

    parameters = component.spec.network_unit.parameters
    self._num_slices = int(parameters['num_slices'])
    check.Gt(self._num_slices, 0, 'Invalid number of slices.')
    check.Eq(self._concatenated_input_dim % self._num_slices, 0,
             'Input dimension %s does not evenly divide into %s slices' %
             (self._concatenated_input_dim, self._num_slices))
    self._slice_dim = int(self._concatenated_input_dim / self._num_slices)

    for slice_index in xrange(self._num_slices):
      self._layers.append(
          Layer(self, 'slice_%s' % slice_index, self._slice_dim)) 
示例15
def __init__(self, component):
    """Initializes weights and layers.

    Args:
      component: Parent ComponentBuilderBase object.
    """
    super(SplitNetwork, self).__init__(component)

    parameters = component.spec.network_unit.parameters
    self._num_slices = int(parameters['num_slices'])
    check.Gt(self._num_slices, 0, 'Invalid number of slices.')
    check.Eq(self._concatenated_input_dim % self._num_slices, 0,
             'Input dimension %s does not evenly divide into %s slices' %
             (self._concatenated_input_dim, self._num_slices))
    self._slice_dim = int(self._concatenated_input_dim / self._num_slices)

    for slice_index in xrange(self._num_slices):
      self._layers.append(
          Layer(component, 'slice_%s' % slice_index, self._slice_dim)) 
示例16
def __init__(self, component):
    """Initializes weights and layers.

    Args:
      component: Parent ComponentBuilderBase object.
    """
    super(SplitNetwork, self).__init__(component)

    parameters = component.spec.network_unit.parameters
    self._num_slices = int(parameters['num_slices'])
    check.Gt(self._num_slices, 0, 'Invalid number of slices.')
    check.Eq(self._concatenated_input_dim % self._num_slices, 0,
             'Input dimension %s does not evenly divide into %s slices' %
             (self._concatenated_input_dim, self._num_slices))
    self._slice_dim = int(self._concatenated_input_dim / self._num_slices)

    for slice_index in xrange(self._num_slices):
      self._layers.append(
          Layer(component, 'slice_%s' % slice_index, self._slice_dim)) 
示例17
def fixed_feature_lookup(component, state, channel_id, stride):
  """Looks up fixed features and passes them through embeddings.

  Embedding vectors may be scaled by weights if the features specify it.

  Args:
    component: Component object in which to look up the fixed features.
    state: MasterState object for the live nlp_saft::dragnn::MasterState.
    channel_id: int id of the fixed feature to look up.
    stride: int Tensor of current batch * beam size.

  Returns:
    NamedTensor object containing the embedding vectors.
  """
  feature_spec = component.spec.fixed_feature[channel_id]
  check.Gt(feature_spec.embedding_dim, 0,
           'Embeddings requested for non-embedded feature: %s' % feature_spec)
  embedding_matrix = component.get_variable(fixed_embeddings_name(channel_id))

  with tf.op_scope([embedding_matrix], 'fixed_embedding_' + feature_spec.name):
    indices, ids, weights = dragnn_ops.extract_fixed_features(
        state.handle, component=component.name, channel_id=channel_id)
    size = stride * feature_spec.size
    embeddings = embedding_lookup(embedding_matrix, indices, ids, weights, size)
    dim = feature_spec.size * feature_spec.embedding_dim
    return NamedTensor(
        tf.reshape(embeddings, [-1, dim]), feature_spec.name, dim=dim) 
示例18
def _validate_embedded_fixed_features(comp):
  """Checks that the embedded fixed features of |comp| are set up properly."""
  for feature in comp.spec.fixed_feature:
    check.Gt(feature.embedding_dim, 0,
             'Embeddings requested for non-embedded feature: %s' % feature)
    if feature.is_constant:
      check.IsTrue(feature.HasField('pretrained_embedding_matrix'),
                   'Constant embeddings must be pretrained: %s' % feature) 
示例19
def testCheckGt(self):
    check.Gt(2, 1, 'foo')
    with self.assertRaisesRegexp(ValueError, 'bar'):
      check.Gt(1, 1, 'bar')
    with self.assertRaisesRegexp(RuntimeError, 'baz'):
      check.Gt(-1, 1, 'baz', RuntimeError) 
示例20
def fixed_feature_lookup(component, state, channel_id, stride):
  """Looks up fixed features and passes them through embeddings.

  Embedding vectors may be scaled by weights if the features specify it.

  Args:
    component: Component object in which to look up the fixed features.
    state: MasterState object for the live ComputeSession.
    channel_id: int id of the fixed feature to look up.
    stride: int Tensor of current batch * beam size.

  Returns:
    NamedTensor object containing the embedding vectors.
  """
  feature_spec = component.spec.fixed_feature[channel_id]
  check.Gt(feature_spec.embedding_dim, 0,
           'Embeddings requested for non-embedded feature: %s' % feature_spec)
  embedding_matrix = component.get_variable(fixed_embeddings_name(channel_id))

  with tf.op_scope([embedding_matrix], 'fixed_embedding_' + feature_spec.name):
    indices, ids, weights = dragnn_ops.extract_fixed_features(
        state.handle, component=component.name, channel_id=channel_id)
    size = stride * feature_spec.size
    embeddings = embedding_lookup(embedding_matrix, indices, ids, weights, size)
    dim = feature_spec.size * feature_spec.embedding_dim
    return NamedTensor(
        tf.reshape(embeddings, [-1, dim]), feature_spec.name, dim=dim) 
示例21
def _validate_embedded_fixed_features(comp):
  """Checks that the embedded fixed features of |comp| are set up properly."""
  for feature in comp.spec.fixed_feature:
    check.Gt(feature.embedding_dim, 0,
             'Embeddings requested for non-embedded feature: %s' % feature)
    if feature.is_constant:
      check.IsTrue(feature.HasField('pretrained_embedding_matrix'),
                   'Constant embeddings must be pretrained: %s' % feature) 
示例22
def testCheckGt(self):
    check.Gt(2, 1, 'foo')
    with self.assertRaisesRegexp(ValueError, 'bar'):
      check.Gt(1, 1, 'bar')
    with self.assertRaisesRegexp(RuntimeError, 'baz'):
      check.Gt(-1, 1, 'baz', RuntimeError) 
示例23
def fixed_feature_lookup(component, state, channel_id, stride):
  """Looks up fixed features and passes them through embeddings.

  Embedding vectors may be scaled by weights if the features specify it.

  Args:
    component: Component object in which to look up the fixed features.
    state: MasterState object for the live ComputeSession.
    channel_id: int id of the fixed feature to look up.
    stride: int Tensor of current batch * beam size.

  Returns:
    NamedTensor object containing the embedding vectors.
  """
  feature_spec = component.spec.fixed_feature[channel_id]
  check.Gt(feature_spec.embedding_dim, 0,
           'Embeddings requested for non-embedded feature: %s' % feature_spec)
  embedding_matrix = component.get_variable(fixed_embeddings_name(channel_id))

  with tf.op_scope([embedding_matrix], 'fixed_embedding_' + feature_spec.name):
    indices, ids, weights = dragnn_ops.extract_fixed_features(
        state.handle, component=component.name, channel_id=channel_id)
    size = stride * feature_spec.size
    embeddings = embedding_lookup(embedding_matrix, indices, ids, weights, size)
    dim = feature_spec.size * feature_spec.embedding_dim
    return NamedTensor(
        tf.reshape(embeddings, [-1, dim]), feature_spec.name, dim=dim) 
示例24
def _validate_embedded_fixed_features(comp):
  """Checks that the embedded fixed features of |comp| are set up properly."""
  for feature in comp.spec.fixed_feature:
    check.Gt(feature.embedding_dim, 0,
             'Embeddings requested for non-embedded feature: %s' % feature)
    if feature.is_constant:
      check.IsTrue(feature.HasField('pretrained_embedding_matrix'),
                   'Constant embeddings must be pretrained: %s' % feature) 
示例25
def testCheckGt(self):
    check.Gt(2, 1, 'foo')
    with self.assertRaisesRegexp(ValueError, 'bar'):
      check.Gt(1, 1, 'bar')
    with self.assertRaisesRegexp(RuntimeError, 'baz'):
      check.Gt(-1, 1, 'baz', RuntimeError) 
示例26
def fixed_feature_lookup(component, state, channel_id, stride):
  """Looks up fixed features and passes them through embeddings.

  Embedding vectors may be scaled by weights if the features specify it.

  Args:
    component: Component object in which to look up the fixed features.
    state: MasterState object for the live nlp_saft::dragnn::MasterState.
    channel_id: int id of the fixed feature to look up.
    stride: int Tensor of current batch * beam size.

  Returns:
    NamedTensor object containing the embedding vectors.
  """
  feature_spec = component.spec.fixed_feature[channel_id]
  check.Gt(feature_spec.embedding_dim, 0,
           'Embeddings requested for non-embedded feature: %s' % feature_spec)
  embedding_matrix = component.get_variable(fixed_embeddings_name(channel_id))

  with tf.op_scope([embedding_matrix], 'fixed_embedding_' + feature_spec.name):
    indices, ids, weights = dragnn_ops.extract_fixed_features(
        state.handle, component=component.name, channel_id=channel_id)
    size = stride * feature_spec.size
    embeddings = embedding_lookup(embedding_matrix, indices, ids, weights, size)
    dim = feature_spec.size * feature_spec.embedding_dim
    return NamedTensor(
        tf.reshape(embeddings, [-1, dim]), feature_spec.name, dim=dim) 
示例27
def _validate_embedded_fixed_features(comp):
  """Checks that the embedded fixed features of |comp| are set up properly."""
  for feature in comp.spec.fixed_feature:
    check.Gt(feature.embedding_dim, 0,
             'Embeddings requested for non-embedded feature: %s' % feature)
    if feature.is_constant:
      check.IsTrue(feature.HasField('pretrained_embedding_matrix'),
                   'Constant embeddings must be pretrained: %s' % feature) 
示例28
def testCheckGt(self):
    check.Gt(2, 1, 'foo')
    with self.assertRaisesRegexp(ValueError, 'bar'):
      check.Gt(1, 1, 'bar')
    with self.assertRaisesRegexp(RuntimeError, 'baz'):
      check.Gt(-1, 1, 'baz', RuntimeError) 
示例29
def fixed_feature_lookup(component, state, channel_id, stride):
  """Looks up fixed features and passes them through embeddings.

  Embedding vectors may be scaled by weights if the features specify it.

  Args:
    component: Component object in which to look up the fixed features.
    state: MasterState object for the live nlp_saft::dragnn::MasterState.
    channel_id: int id of the fixed feature to look up.
    stride: int Tensor of current batch * beam size.

  Returns:
    NamedTensor object containing the embedding vectors.
  """
  feature_spec = component.spec.fixed_feature[channel_id]
  check.Gt(feature_spec.embedding_dim, 0,
           'Embeddings requested for non-embedded feature: %s' % feature_spec)
  embedding_matrix = component.get_variable(fixed_embeddings_name(channel_id))

  with tf.op_scope([embedding_matrix], 'fixed_embedding_' + feature_spec.name):
    indices, ids, weights = dragnn_ops.extract_fixed_features(
        state.handle, component=component.name, channel_id=channel_id)
    size = stride * feature_spec.size
    embeddings = embedding_lookup(embedding_matrix, indices, ids, weights, size)
    dim = feature_spec.size * feature_spec.embedding_dim
    return NamedTensor(
        tf.reshape(embeddings, [-1, dim]), feature_spec.name, dim=dim) 
示例30
def _validate_embedded_fixed_features(comp):
  """Checks that the embedded fixed features of |comp| are set up properly."""
  for feature in comp.spec.fixed_feature:
    check.Gt(feature.embedding_dim, 0,
             'Embeddings requested for non-embedded feature: %s' % feature)
    if feature.is_constant:
      check.IsTrue(feature.HasField('pretrained_embedding_matrix'),
                   'Constant embeddings must be pretrained: %s' % feature)