Python源码示例:syntaxnet.util.check.NotNone()
示例1
def __init__(self, tensor=None, array=None, stride=None, dim=None):
"""Creates ops for converting the input to either format.
If 'tensor' is used, then a conversion from [stride * steps, dim] to
[steps + 1, stride, dim] is performed for dynamic_tensor reads.
If 'array' is used, then a conversion from [steps + 1, stride, dim] to
[stride * steps, dim] is performed for bulk_tensor reads.
Args:
tensor: Bulk tensor input.
array: TensorArray dynamic input.
stride: stride of bulk tensor. Not used for dynamic.
dim: dim of bulk tensor. Not used for dynamic.
"""
if tensor is not None:
check.IsNone(array, 'Cannot initialize from tensor and array')
check.NotNone(stride, 'Stride is required for bulk tensor')
check.NotNone(dim, 'Dim is required for bulk tensor')
self._bulk_tensor = tensor
with tf.name_scope('convert_to_dyn'):
tensor = tf.reshape(tensor, [stride, -1, dim])
tensor = tf.transpose(tensor, perm=[1, 0, 2])
pad = tf.zeros([1, stride, dim], dtype=tensor.dtype)
self._array_tensor = tf.concat([pad, tensor], 0)
if array is not None:
check.IsNone(tensor, 'Cannot initialize from both tensor and array')
with tf.name_scope('convert_to_bulk'):
self._bulk_tensor = convert_network_state_tensorarray(array)
with tf.name_scope('convert_to_dyn'):
self._array_tensor = array.stack()
示例2
def __init__(self, component, name, dim):
check.NotNone(dim, 'Dimension is required')
self.component = component
self.name = name
self.dim = dim
示例3
def get_variable(self, var_name=None, var_params=None):
"""Returns either the original or averaged version of a given variable.
If the master.read_from_avg flag is set to True, and the
ExponentialMovingAverage (EMA) object has been attached, then this will ask
the EMA object for the given variable.
This is to allow executing inference from the averaged version of
parameters.
Arguments:
var_name: Name of the variable.
var_params: tf.Variable for which to retrieve an average.
Only one of |var_name| or |var_params| needs to be provided. If both are
provided, |var_params| takes precedence.
Returns:
tf.Variable object corresponding to original or averaged version.
"""
if var_params:
var_name = var_params.name
else:
check.NotNone(var_name, 'specify at least one of var_name or var_params')
var_params = tf.get_variable(var_name)
if self.moving_average and self.master.read_from_avg:
logging.info('Retrieving average for: %s', var_name)
var_params = self.moving_average.average(var_params)
assert var_params
logging.info('Returning: %s', var_params.name)
return var_params
示例4
def testCheckNotNone(self):
check.NotNone(1, 'foo')
check.NotNone([], 'foo')
with self.assertRaisesRegexp(ValueError, 'bar'):
check.NotNone(None, 'bar')
with self.assertRaisesRegexp(RuntimeError, 'baz'):
check.NotNone(None, 'baz', RuntimeError)
示例5
def __init__(self, tensor=None, array=None, stride=None, dim=None):
"""Creates ops for converting the input to either format.
If 'tensor' is used, then a conversion from [stride * steps, dim] to
[steps + 1, stride, dim] is performed for dynamic_tensor reads.
If 'array' is used, then a conversion from [steps + 1, stride, dim] to
[stride * steps, dim] is performed for bulk_tensor reads.
Args:
tensor: Bulk tensor input.
array: TensorArray dynamic input.
stride: stride of bulk tensor. Not used for dynamic.
dim: dim of bulk tensor. Not used for dynamic.
"""
if tensor is not None:
check.IsNone(array, 'Cannot initialize from tensor and array')
check.NotNone(stride, 'Stride is required for bulk tensor')
check.NotNone(dim, 'Dim is required for bulk tensor')
self._bulk_tensor = tensor
with tf.name_scope('convert_to_dyn'):
tensor = tf.reshape(tensor, [stride, -1, dim])
tensor = tf.transpose(tensor, perm=[1, 0, 2])
pad = tf.zeros([1, stride, dim], dtype=tensor.dtype)
self._array_tensor = tf.concat([pad, tensor], 0)
if array is not None:
check.IsNone(tensor, 'Cannot initialize from both tensor and array')
with tf.name_scope('convert_to_bulk'):
self._bulk_tensor = convert_network_state_tensorarray(array)
with tf.name_scope('convert_to_dyn'):
self._array_tensor = array.stack()
示例6
def __init__(self, component, name, dim):
check.NotNone(dim, 'Dimension is required')
self.component = component
self.name = name
self.dim = dim
示例7
def get_variable(self, var_name=None, var_params=None):
"""Returns either the original or averaged version of a given variable.
If the master.read_from_avg flag is set to True, and the
ExponentialMovingAverage (EMA) object has been attached, then this will ask
the EMA object for the given variable.
This is to allow executing inference from the averaged version of
parameters.
Arguments:
var_name: Name of the variable.
var_params: tf.Variable for which to retrieve an average.
Only one of |var_name| or |var_params| needs to be provided. If both are
provided, |var_params| takes precedence.
Returns:
tf.Variable object corresponding to original or averaged version.
"""
if var_params:
var_name = var_params.name
else:
check.NotNone(var_name, 'specify at least one of var_name or var_params')
var_params = tf.get_variable(var_name)
if self.moving_average and self.master.read_from_avg:
logging.info('Retrieving average for: %s', var_name)
var_params = self.moving_average.average(var_params)
assert var_params
logging.info('Returning: %s', var_params.name)
return var_params
示例8
def __init__(self, tensor=None, array=None, stride=None, dim=None):
"""Creates ops for converting the input to either format.
If 'tensor' is used, then a conversion from [stride * steps, dim] to
[steps + 1, stride, dim] is performed for dynamic_tensor reads.
If 'array' is used, then a conversion from [steps + 1, stride, dim] to
[stride * steps, dim] is performed for bulk_tensor reads.
Args:
tensor: Bulk tensor input.
array: TensorArray dynamic input.
stride: stride of bulk tensor. Not used for dynamic.
dim: dim of bulk tensor. Not used for dynamic.
"""
if tensor is not None:
check.IsNone(array, 'Cannot initialize from tensor and array')
check.NotNone(stride, 'Stride is required for bulk tensor')
check.NotNone(dim, 'Dim is required for bulk tensor')
self._bulk_tensor = tensor
with tf.name_scope('convert_to_dyn'):
tensor = tf.reshape(tensor, [stride, -1, dim])
tensor = tf.transpose(tensor, perm=[1, 0, 2])
pad = tf.zeros([1, stride, dim], dtype=tensor.dtype)
self._array_tensor = tf.concat([pad, tensor], 0)
if array is not None:
check.IsNone(tensor, 'Cannot initialize from both tensor and array')
with tf.name_scope('convert_to_bulk'):
self._bulk_tensor = convert_network_state_tensorarray(array)
with tf.name_scope('convert_to_dyn'):
self._array_tensor = array.stack()
示例9
def __init__(self, component, name, dim):
check.NotNone(dim, 'Dimension is required')
self.component = component
self.name = name
self.dim = dim
示例10
def get_variable(self, var_name=None, var_params=None):
"""Returns either the original or averaged version of a given variable.
If the master.read_from_avg flag is set to True, and the
ExponentialMovingAverage (EMA) object has been attached, then this will ask
the EMA object for the given variable.
This is to allow executing inference from the averaged version of
parameters.
Arguments:
var_name: Name of the variable.
var_params: tf.Variable for which to retrieve an average.
Only one of |var_name| or |var_params| needs to be provided. If both are
provided, |var_params| takes precedence.
Returns:
tf.Variable object corresponding to original or averaged version.
"""
if var_params:
var_name = var_params.name
else:
check.NotNone(var_name, 'specify at least one of var_name or var_params')
var_params = tf.get_variable(var_name)
if self.moving_average and self.master.read_from_avg:
logging.info('Retrieving average for: %s', var_name)
var_params = self.moving_average.average(var_params)
assert var_params
logging.info('Returning: %s', var_params.name)
return var_params
示例11
def __init__(self, tensor=None, array=None, stride=None, dim=None):
"""Creates ops for converting the input to either format.
If 'tensor' is used, then a conversion from [stride * steps, dim] to
[steps + 1, stride, dim] is performed for dynamic_tensor reads.
If 'array' is used, then a conversion from [steps + 1, stride, dim] to
[stride * steps, dim] is performed for bulk_tensor reads.
Args:
tensor: Bulk tensor input.
array: TensorArray dynamic input.
stride: stride of bulk tensor. Not used for dynamic.
dim: dim of bulk tensor. Not used for dynamic.
"""
if tensor is not None:
check.IsNone(array, 'Cannot initialize from tensor and array')
check.NotNone(stride, 'Stride is required for bulk tensor')
check.NotNone(dim, 'Dim is required for bulk tensor')
self._bulk_tensor = tensor
with tf.name_scope('convert_to_dyn'):
tensor = tf.reshape(tensor, [stride, -1, dim])
tensor = tf.transpose(tensor, perm=[1, 0, 2])
pad = tf.zeros([1, stride, dim], dtype=tensor.dtype)
self._array_tensor = tf.concat([pad, tensor], 0)
if array is not None:
check.IsNone(tensor, 'Cannot initialize from both tensor and array')
with tf.name_scope('convert_to_bulk'):
self._bulk_tensor = convert_network_state_tensorarray(array)
with tf.name_scope('convert_to_dyn'):
self._array_tensor = array.stack()
示例12
def __init__(self, component, name, dim):
check.NotNone(dim, 'Dimension is required')
self.component = component
self.name = name
self.dim = dim
示例13
def get_variable(self, var_name=None, var_params=None):
"""Returns either the original or averaged version of a given variable.
If the master.read_from_avg flag is set to True, and the
ExponentialMovingAverage (EMA) object has been attached, then this will ask
the EMA object for the given variable.
This is to allow executing inference from the averaged version of
parameters.
Arguments:
var_name: Name of the variable.
var_params: tf.Variable for which to retrieve an average.
Only one of |var_name| or |var_params| needs to be provided. If both are
provided, |var_params| takes precedence.
Returns:
tf.Variable object corresponding to original or averaged version.
"""
if var_params:
var_name = var_params.name
else:
check.NotNone(var_name, 'specify at least one of var_name or var_params')
var_params = tf.get_variable(var_name)
if self.moving_average and self.master.read_from_avg:
logging.info('Retrieving average for: %s', var_name)
var_params = self.moving_average.average(var_params)
assert var_params
logging.info('Returning: %s', var_params.name)
return var_params
示例14
def testCheckNotNone(self):
check.NotNone(1, 'foo')
check.NotNone([], 'foo')
with self.assertRaisesRegexp(ValueError, 'bar'):
check.NotNone(None, 'bar')
with self.assertRaisesRegexp(RuntimeError, 'baz'):
check.NotNone(None, 'baz', RuntimeError)
示例15
def __init__(self, tensor=None, array=None, stride=None, dim=None):
"""Creates ops for converting the input to either format.
If 'tensor' is used, then a conversion from [stride * steps, dim] to
[steps + 1, stride, dim] is performed for dynamic_tensor reads.
If 'array' is used, then a conversion from [steps + 1, stride, dim] to
[stride * steps, dim] is performed for bulk_tensor reads.
Args:
tensor: Bulk tensor input.
array: TensorArray dynamic input.
stride: stride of bulk tensor. Not used for dynamic.
dim: dim of bulk tensor. Not used for dynamic.
"""
if tensor is not None:
check.IsNone(array, 'Cannot initialize from tensor and array')
check.NotNone(stride, 'Stride is required for bulk tensor')
check.NotNone(dim, 'Dim is required for bulk tensor')
self._bulk_tensor = tensor
with tf.name_scope('convert_to_dyn'):
tensor = tf.reshape(tensor, [stride, -1, dim])
tensor = tf.transpose(tensor, perm=[1, 0, 2])
pad = tf.zeros([1, stride, dim], dtype=tensor.dtype)
self._array_tensor = tf.concat([pad, tensor], 0)
if array is not None:
check.IsNone(tensor, 'Cannot initialize from both tensor and array')
with tf.name_scope('convert_to_bulk'):
self._bulk_tensor = convert_network_state_tensorarray(array)
with tf.name_scope('convert_to_dyn'):
self._array_tensor = array.stack()
示例16
def __init__(self, component, name, dim):
check.NotNone(dim, 'Dimension is required')
self.component = component
self.name = name
self.dim = dim
示例17
def get_variable(self, var_name=None, var_params=None):
"""Returns either the original or averaged version of a given variable.
If the master.read_from_avg flag is set to True, and the
ExponentialMovingAverage (EMA) object has been attached, then this will ask
the EMA object for the given variable.
This is to allow executing inference from the averaged version of
parameters.
Arguments:
var_name: Name of the variable.
var_params: tf.Variable for which to retrieve an average.
Only one of |var_name| or |var_params| needs to be provided. If both are
provided, |var_params| takes precedence.
Returns:
tf.Variable object corresponding to original or averaged version.
"""
if var_params:
var_name = var_params.name
else:
check.NotNone(var_name, 'specify at least one of var_name or var_params')
var_params = tf.get_variable(var_name)
if self.moving_average and self.master.read_from_avg:
logging.info('Retrieving average for: %s', var_name)
var_params = self.moving_average.average(var_params)
assert var_params
logging.info('Returning: %s', var_params.name)
return var_params
示例18
def testCheckNotNone(self):
check.NotNone(1, 'foo')
check.NotNone([], 'foo')
with self.assertRaisesRegexp(ValueError, 'bar'):
check.NotNone(None, 'bar')
with self.assertRaisesRegexp(RuntimeError, 'baz'):
check.NotNone(None, 'baz', RuntimeError)
示例19
def __init__(self, tensor=None, array=None, stride=None, dim=None):
"""Creates ops for converting the input to either format.
If 'tensor' is used, then a conversion from [stride * steps, dim] to
[steps + 1, stride, dim] is performed for dynamic_tensor reads.
If 'array' is used, then a conversion from [steps + 1, stride, dim] to
[stride * steps, dim] is performed for bulk_tensor reads.
Args:
tensor: Bulk tensor input.
array: TensorArray dynamic input.
stride: stride of bulk tensor. Not used for dynamic.
dim: dim of bulk tensor. Not used for dynamic.
"""
if tensor is not None:
check.IsNone(array, 'Cannot initialize from tensor and array')
check.NotNone(stride, 'Stride is required for bulk tensor')
check.NotNone(dim, 'Dim is required for bulk tensor')
self._bulk_tensor = tensor
with tf.name_scope('convert_to_dyn'):
tensor = tf.reshape(tensor, [stride, -1, dim])
tensor = tf.transpose(tensor, perm=[1, 0, 2])
pad = tf.zeros([1, stride, dim], dtype=tensor.dtype)
self._array_tensor = tf.concat([pad, tensor], 0)
if array is not None:
check.IsNone(tensor, 'Cannot initialize from both tensor and array')
with tf.name_scope('convert_to_bulk'):
self._bulk_tensor = convert_network_state_tensorarray(array)
with tf.name_scope('convert_to_dyn'):
self._array_tensor = array.stack()
示例20
def __init__(self, component, name, dim):
check.NotNone(dim, 'Dimension is required')
self.component = component
self.name = name
self.dim = dim
示例21
def get_variable(self, var_name=None, var_params=None):
"""Returns either the original or averaged version of a given variable.
If the master.read_from_avg flag is set to True, and the
ExponentialMovingAverage (EMA) object has been attached, then this will ask
the EMA object for the given variable.
This is to allow executing inference from the averaged version of
parameters.
Arguments:
var_name: Name of the variable.
var_params: tf.Variable for which to retrieve an average.
Only one of |var_name| or |var_params| needs to be provided. If both are
provided, |var_params| takes precedence.
Returns:
tf.Variable object corresponding to original or averaged version.
"""
if var_params:
var_name = var_params.name
else:
check.NotNone(var_name, 'specify at least one of var_name or var_params')
var_params = tf.get_variable(var_name)
if self.moving_average and self.master.read_from_avg:
logging.info('Retrieving average for: %s', var_name)
var_params = self.moving_average.average(var_params)
assert var_params
logging.info('Returning: %s', var_params.name)
return var_params
示例22
def __init__(self, tensor=None, array=None, stride=None, dim=None):
"""Creates ops for converting the input to either format.
If 'tensor' is used, then a conversion from [stride * steps, dim] to
[steps + 1, stride, dim] is performed for dynamic_tensor reads.
If 'array' is used, then a conversion from [steps + 1, stride, dim] to
[stride * steps, dim] is performed for bulk_tensor reads.
Args:
tensor: Bulk tensor input.
array: TensorArray dynamic input.
stride: stride of bulk tensor. Not used for dynamic.
dim: dim of bulk tensor. Not used for dynamic.
"""
if tensor is not None:
check.IsNone(array, 'Cannot initialize from tensor and array')
check.NotNone(stride, 'Stride is required for bulk tensor')
check.NotNone(dim, 'Dim is required for bulk tensor')
self._bulk_tensor = tensor
with tf.name_scope('convert_to_dyn'):
tensor = tf.reshape(tensor, [stride, -1, dim])
tensor = tf.transpose(tensor, perm=[1, 0, 2])
pad = tf.zeros([1, stride, dim], dtype=tensor.dtype)
self._array_tensor = tf.concat([pad, tensor], 0)
if array is not None:
check.IsNone(tensor, 'Cannot initialize from both tensor and array')
with tf.name_scope('convert_to_bulk'):
self._bulk_tensor = convert_network_state_tensorarray(array)
with tf.name_scope('convert_to_dyn'):
self._array_tensor = array.stack()
示例23
def __init__(self, component, name, dim):
check.NotNone(dim, 'Dimension is required')
self.component = component
self.name = name
self.dim = dim
示例24
def get_variable(self, var_name=None, var_params=None):
"""Returns either the original or averaged version of a given variable.
If the master.read_from_avg flag is set to True, and the
ExponentialMovingAverage (EMA) object has been attached, then this will ask
the EMA object for the given variable.
This is to allow executing inference from the averaged version of
parameters.
Arguments:
var_name: Name of the variable.
var_params: tf.Variable for which to retrieve an average.
Only one of |var_name| or |var_params| needs to be provided. If both are
provided, |var_params| takes precedence.
Returns:
tf.Variable object corresponding to original or averaged version.
"""
if var_params:
var_name = var_params.name
else:
check.NotNone(var_name, 'specify at least one of var_name or var_params')
var_params = tf.get_variable(var_name)
if self.moving_average and self.master.read_from_avg:
logging.info('Retrieving average for: %s', var_name)
var_params = self.moving_average.average(var_params)
assert var_params
logging.info('Returning: %s', var_params.name)
return var_params
示例25
def testCheckNotNone(self):
check.NotNone(1, 'foo')
check.NotNone([], 'foo')
with self.assertRaisesRegexp(ValueError, 'bar'):
check.NotNone(None, 'bar')
with self.assertRaisesRegexp(RuntimeError, 'baz'):
check.NotNone(None, 'baz', RuntimeError)
示例26
def __init__(self, tensor=None, array=None, stride=None, dim=None):
"""Creates ops for converting the input to either format.
If 'tensor' is used, then a conversion from [stride * steps, dim] to
[steps + 1, stride, dim] is performed for dynamic_tensor reads.
If 'array' is used, then a conversion from [steps + 1, stride, dim] to
[stride * steps, dim] is performed for bulk_tensor reads.
Args:
tensor: Bulk tensor input.
array: TensorArray dynamic input.
stride: stride of bulk tensor. Not used for dynamic.
dim: dim of bulk tensor. Not used for dynamic.
"""
if tensor is not None:
check.IsNone(array, 'Cannot initialize from tensor and array')
check.NotNone(stride, 'Stride is required for bulk tensor')
check.NotNone(dim, 'Dim is required for bulk tensor')
self._bulk_tensor = tensor
if dim >= 0:
# These operations will fail if |dim| is negative.
with tf.name_scope('convert_to_dyn'):
tensor = tf.reshape(tensor, [stride, -1, dim])
tensor = tf.transpose(tensor, perm=[1, 0, 2])
pad = tf.zeros([1, stride, dim], dtype=tensor.dtype)
self._array_tensor = tf.concat([pad, tensor], 0)
if array is not None:
check.IsNone(tensor, 'Cannot initialize from both tensor and array')
with tf.name_scope('convert_to_bulk'):
self._bulk_tensor = convert_network_state_tensorarray(array)
with tf.name_scope('convert_to_dyn'):
self._array_tensor = array.stack()
示例27
def __init__(self, component, name, dim):
check.NotNone(dim, 'Dimension is required')
self.component = component
self.name = name
self.dim = dim
示例28
def get_variable(self, var_name=None, var_params=None):
"""Returns either the original or averaged version of a given variable.
If the master.read_from_avg flag is set to True, and the
ExponentialMovingAverage (EMA) object has been attached, then this will ask
the EMA object for the given variable.
This is to allow executing inference from the averaged version of
parameters.
Arguments:
var_name: Name of the variable.
var_params: tf.Variable for which to retrieve an average.
Only one of |var_name| or |var_params| needs to be provided. If both are
provided, |var_params| takes precedence.
Returns:
tf.Variable object corresponding to original or averaged version.
"""
if var_params is not None:
var_name = var_params.name
else:
check.NotNone(var_name, 'specify at least one of var_name or var_params')
var_params = tf.get_variable(var_name)
if self.moving_average and self.master.read_from_avg:
logging.info('Retrieving average for: %s', var_name)
var_params = self.moving_average.average(var_params)
assert var_params
logging.info('Returning: %s', var_params.name)
return var_params
示例29
def create(self,
fixed_embeddings,
linked_embeddings,
context_tensor_arrays,
attention_tensor,
during_training,
stride=None):
"""Requires |stride|; otherwise see base class."""
check.NotNone(stride,
'BulkBiLSTMNetwork requires "stride" and must be called '
'in the bulk feature extractor component.')
# Flatten the lengths into a vector.
lengths = dragnn.lookup_named_tensor('lengths', linked_embeddings)
lengths_s = tf.squeeze(lengths.tensor, [1])
# Collect all other inputs into a batched tensor.
linked_embeddings = [
named_tensor for named_tensor in linked_embeddings
if named_tensor.name != 'lengths'
]
inputs_sxnxd = dragnn.get_input_tensor_with_stride(
fixed_embeddings, linked_embeddings, stride)
# Since get_input_tensor_with_stride() concatenates the input embeddings, it
# obscures the static activation dimension, which the RNN library requires.
# Restore it using set_shape(). Note that set_shape() merges into the known
# shape, so only specify the activation dimension.
inputs_sxnxd.set_shape(
[tf.Dimension(None), tf.Dimension(None), self._input_dim])
initial_states_forward, initial_states_backward = (
self._create_initial_states(stride))
if during_training:
cells_forward = self._train_cells_forward
cells_backward = self._train_cells_backward
else:
cells_forward = self._inference_cells_forward
cells_backward = self._inference_cells_backward
def _bilstm_closure(scope):
"""Applies the bi-LSTM to the current inputs."""
outputs_sxnxd, _, _ = tf.contrib.rnn.stack_bidirectional_dynamic_rnn(
cells_forward,
cells_backward,
inputs_sxnxd,
initial_states_fw=initial_states_forward,
initial_states_bw=initial_states_backward,
sequence_length=lengths_s,
parallel_iterations=self._attrs['parallel_iterations'],
scope=scope)
return outputs_sxnxd
# Layer outputs are not batched; flatten out the batch dimension.
outputs_sxnxd = self._apply_with_captured_variables(_bilstm_closure)
outputs_snxd = tf.reshape(outputs_sxnxd, [-1, self._output_dim])
return self._append_base_layers([outputs_snxd])
示例30
def maybe_apply_dropout(inputs, keep_prob, per_sequence, stride=None):
"""Applies dropout, if so configured, to an input tensor.
The input may be rank 2 or 3 depending on whether the stride (i.e., batch
size) has been incorporated into the shape.
Args:
inputs: [stride * num_steps, dim] or [stride, num_steps, dim] input tensor.
keep_prob: Scalar probability of keeping each input element. If >= 1.0, no
dropout is performed.
per_sequence: If true, sample the dropout mask once per sequence, instead of
once per step. Requires |stride| when true.
stride: Scalar batch size. Optional if |per_sequence| is false.
Returns:
[stride * num_steps, dim] or [stride, num_steps, dim] tensor, matching the
shape of |inputs|, containing the masked or original inputs, depending on
whether dropout was actually performed.
"""
check.Ge(inputs.get_shape().ndims, 2, 'inputs must be rank 2 or 3')
check.Le(inputs.get_shape().ndims, 3, 'inputs must be rank 2 or 3')
flat = (inputs.get_shape().ndims == 2)
if keep_prob >= 1.0:
return inputs
if not per_sequence:
return tf.nn.dropout(inputs, keep_prob)
check.NotNone(stride, 'per-sequence dropout requires stride')
dim = inputs.get_shape().as_list()[-1]
check.NotNone(dim, 'inputs must have static activation dimension, but have '
'static shape %s' % inputs.get_shape().as_list())
# If needed, restore the batch dimension to separate the sequences.
inputs_sxnxd = tf.reshape(inputs, [stride, -1, dim]) if flat else inputs
# Replace |num_steps| with 1 in |noise_shape|, so the dropout mask broadcasts
# to all steps for a particular sequence.
noise_shape = [stride, 1, dim]
masked_sxnxd = tf.nn.dropout(inputs_sxnxd, keep_prob, noise_shape)
# If needed, flatten out the batch dimension in the return value.
return tf.reshape(masked_sxnxd, [-1, dim]) if flat else masked_sxnxd