desc
stringlengths
3
26.7k
decl
stringlengths
11
7.89k
bodies
stringlengths
8
553k
'Test that initializers are not mutated.'
def testInitializerMutation(self):
initializers = {'b': tf.constant_initializer(0)} initializers_copy = dict(initializers) conv1 = snt.Conv3D(output_channels=1, kernel_shape=3, stride=1, name='conv1', initializers=initializers) conv1(tf.placeholder(tf.float32, [1, 10, 10, 10, 2])) self.assertAllEqual(initializers, initializers_copy)
'Run through for something with a known answer using SAME padding.'
@parameterized.NamedParameters(('WithBias', True), ('WithoutBias', False)) def testComputationSame(self, use_bias):
conv1 = snt.Conv3D(output_channels=1, kernel_shape=3, stride=1, padding=snt.SAME, name='conv1', use_bias=use_bias, initializers=create_constant_initializers(1.0, 1.0, use_bias)) out = conv1(tf.constant(np.ones([1, 5, 5, 5, 1], dtype=np.float32))) expected_out = np.asarray([9, 13, 13, 13, 9, 13, 19, 19, 19, 13, 13, 19, 19, 19, 13, 13, 19, 19, 19, 13, 9, 13, 13, 13, 9, 13, 19, 19, 19, 13, 19, 28, 28, 28, 19, 19, 28, 28, 28, 19, 19, 28, 28, 28, 19, 13, 19, 19, 19, 13, 13, 19, 19, 19, 13, 19, 28, 28, 28, 19, 19, 28, 28, 28, 19, 19, 28, 28, 28, 19, 13, 19, 19, 19, 13, 13, 19, 19, 19, 13, 19, 28, 28, 28, 19, 19, 28, 28, 28, 19, 19, 28, 28, 28, 19, 13, 19, 19, 19, 13, 9, 13, 13, 13, 9, 13, 19, 19, 19, 13, 13, 19, 19, 19, 13, 13, 19, 19, 19, 13, 9, 13, 13, 13, 9]).reshape((5, 5, 5)) if (not use_bias): expected_out -= 1 with self.test_session(): tf.variables_initializer(([conv1.w, conv1.b] if use_bias else [conv1.w])).run() self.assertAllClose(np.reshape(out.eval(), [5, 5, 5]), expected_out)
'Run through for something with a known answer using snt.VALID padding.'
@parameterized.NamedParameters(('WithBias', True), ('WithoutBias', False)) def testComputationValid(self, use_bias):
conv1 = snt.Conv3D(output_channels=1, kernel_shape=3, stride=1, padding=snt.VALID, name='conv1', use_bias=use_bias, initializers=create_constant_initializers(1.0, 1.0, use_bias)) out = conv1(tf.constant(np.ones([1, 5, 5, 5, 1], dtype=np.float32))) expected_out = np.asarray(([28] * 27)).reshape((3, 3, 3)) if (not use_bias): expected_out -= 1 with self.test_session(): tf.variables_initializer(([conv1.w, conv1.b] if use_bias else [conv1.w])).run() self.assertAllClose(np.reshape(out.eval(), [3, 3, 3]), expected_out)
'Sharing is working.'
@parameterized.NamedParameters(('WithBias', True), ('WithoutBias', False)) def testSharing(self, use_bias):
conv1 = snt.Conv3D(output_channels=1, kernel_shape=3, stride=1, padding=snt.SAME, use_bias=use_bias, name='conv1') x = np.random.randn(1, 5, 5, 5, 1) x1 = tf.constant(x, dtype=np.float32) x2 = tf.constant(x, dtype=np.float32) out1 = conv1(x1) out2 = conv1(x2) with self.test_session(): tf.variables_initializer(([conv1.w, conv1.b] if use_bias else [conv1.w])).run() self.assertAllClose(out1.eval(), out2.eval()) w = np.random.randn(3, 3, 3, 1, 1) conv1.w.assign(w).eval() self.assertAllClose(out1.eval(), out2.eval())
'Set up some variables to re-use in multiple tests.'
def setUp(self):
super(Conv3DTransposeTest, self).setUp() self.batch_size = 7 self.in_depth = 7 self.in_height = 7 self.in_width = 11 self.in_channels = 4 self.out_channels = 10 self.kernel_shape_d = 5 self.kernel_shape_h = 5 self.kernel_shape_w = 7 self.stride_d = 1 self.stride_h = 1 self.stride_w = 1 self.padding = snt.SAME self.in_shape = (self.batch_size, self.in_depth, self.in_height, self.in_width, self.in_channels) self.out_shape = (self.in_depth, self.in_height, self.in_width) self.kernel_shape = (self.kernel_shape_d, self.kernel_shape_h, self.kernel_shape_w) self.kernel_shape2 = (self.kernel_shape_d, self.kernel_shape_h, self.kernel_shape_w, self.out_channels, self.in_channels) self.strides = (self.stride_d, self.stride_h, self.stride_w)
'Tests if output shapes are valid.'
@parameterized.NamedParameters(('WithBias', True), ('WithoutBias', False)) def testOutputShapeConsistency(self, use_bias):
inputs = tf.placeholder(tf.float32, shape=self.in_shape) conv1 = snt.Conv3DTranspose(name='conv3d_1', output_channels=self.out_channels, output_shape=self.out_shape, kernel_shape=self.kernel_shape, padding=self.padding, stride=1, use_bias=use_bias) outputs = conv1(inputs) self.assertTrue(outputs.get_shape().is_compatible_with((((self.batch_size,) + self.out_shape) + (self.out_channels,)))) self.assertTrue(conv1.w.get_shape().is_compatible_with(self.kernel_shape2)) if use_bias: self.assertTrue(conv1.b.get_shape().is_compatible_with([self.out_channels]))
'Tests if output shapes are valid when specified as an integer.'
@parameterized.NamedParameters(('WithBias', True), ('WithoutBias', False)) def testOutputShapeInteger(self, use_bias):
inputs = tf.zeros(shape=[3, 5, 5, 5, 2], dtype=tf.float32) inputs_2 = tf.zeros(shape=[3, 5, 7, 5, 2], dtype=tf.float32) conv1 = snt.Conv3DTranspose(name='conv3d_1', output_channels=10, output_shape=10, kernel_shape=5, padding=snt.SAME, stride=2, use_bias=use_bias) outputs = conv1(inputs) outputs_2 = conv1(inputs_2) self.assertTrue(outputs.get_shape().is_compatible_with((3, 10, 10, 10, 10))) with self.test_session() as sess: tf.global_variables_initializer().run() sess.run(outputs) with self.assertRaises(tf.errors.InvalidArgumentError): sess.run(outputs_2)
'Tests if the correct ouput shapes are setup in transposed module.'
@parameterized.NamedParameters(('WithBias', True), ('WithoutBias', False)) def testTransposition(self, use_bias):
net = snt.Conv3DTranspose(name='conv3d_3', output_channels=self.out_channels, output_shape=self.out_shape, kernel_shape=self.kernel_shape, padding=self.padding, stride=1, use_bias=use_bias) net_transpose = net.transpose() input_to_net = tf.placeholder(tf.float32, shape=self.in_shape) err = 'Variables in {} not instantiated yet, __call__ the module first.' with self.assertRaisesRegexp(snt.NotConnectedError, err.format(net.scope_name)): net_transpose(input_to_net) net_transpose = net.transpose(name='another_net_transpose') net_out = net(input_to_net) net_transposed_output = net_transpose(net_out) self.assertEqual(net_transposed_output.get_shape(), input_to_net.get_shape())
'Check that differing reduction indices give the correct output shape.'
def testReductionIndices(self):
inputs = tf.placeholder(tf.float32, shape=[None, 64, 32, 3]) bn1 = snt.BatchNorm(axis=[0], offset=False) bn1(inputs, is_training=True) self.assertEqual(bn1.moving_mean.get_shape(), (1, 64, 32, 3)) bn2 = snt.BatchNorm(axis=[0, 1], offset=False) bn2(inputs, is_training=True) self.assertEqual(bn2.moving_mean.get_shape(), (1, 1, 32, 3)) bn3 = snt.BatchNorm(axis=[0, 2], offset=False) bn3(inputs, is_training=True) self.assertEqual(bn3.moving_mean.get_shape(), (1, 64, 1, 3)) bn4 = snt.BatchNorm(offset=False) bn4(inputs, is_training=True) self.assertEqual(bn4.moving_mean.get_shape(), (1, 1, 1, 3)) err = 'Too many indices specified in axis: len\\(\\[0, 1, 2, 3, 0\\]\\) > len\\(\\(\\?, 64, 32, 3\\)\\)' with self.assertRaisesRegexp(snt.IncompatibleShapeError, err): bn5 = snt.BatchNorm(axis=[0, 1, 2, 3, 0]) bn5(inputs, is_training=True) err = 'One or more index in axis is too large for input shape: \\[4\\] >= 4' with self.assertRaisesRegexp(snt.IncompatibleShapeError, err): bn6 = snt.BatchNorm(axis=[4]) bn6(inputs, is_training=True) err = 'Indices in axis must be non-negative: \\[-1\\] < 0' with self.assertRaisesRegexp(snt.IncompatibleShapeError, err): bn7 = snt.BatchNorm(axis=[(-1)]) bn7(inputs, is_training=True)
'Test that using moving_mean as shift improves statistics.'
def testShiftImproveStatistics(self):
(_, _, inputs) = self._get_inputs() bn = snt.BatchNorm(offset=False, scale=False, decay_rate=0.1) out1 = bn(inputs, is_training=True) with self.test_session() as sess: sess.run(tf.global_variables_initializer()) out_v = sess.run(out1) self.assertAllClose(np.zeros([7, 6]), out_v, rtol=1e-06, atol=1e-05) sess.run(tuple(tf.get_collection(tf.GraphKeys.UPDATE_OPS))) out_v = sess.run(out1) self.assertAllClose(np.zeros([7, 6]), out_v, rtol=1e-06, atol=1e-06)
'The correct statistics are being computed for double connection. Connected in parallel, it\'s ill-defined what order the updates will happen in. A double update could happen, or two sequential updates. E.g. If decay_rate is 0.9, the start value is 1.0, and the target value is 0.0, the value could progress as 1.00 -> 0.90 -> 0.81, if the second update uses the fresh second value. Or as 1.00 -> 0.90 -> 0.80 if the second update uses the stale first value. We fix this here by running them in sequential run calls to ensure that this test is deterministic. The two situations are minimally different, especially if decay_rate is close to one (e.g. the default of 0.999). Args: dtype: TensorFlow datatype of input test batch.'
@parameterized.NamedParameters(('Float32', tf.float32), ('Float64', tf.float64)) def testCheckStatsDouble(self, dtype):
(v, _, inputs) = self._get_inputs(dtype) bn = snt.BatchNorm(offset=False, scale=False, decay_rate=0.9) with tf.name_scope('net1'): bn(inputs, is_training=True) with tf.name_scope('net2'): bn(inputs, is_training=True) update_ops_1 = tuple(tf.get_collection(tf.GraphKeys.UPDATE_OPS, 'net1')) self.assertEqual(len(update_ops_1), 2) update_ops_2 = tuple(tf.get_collection(tf.GraphKeys.UPDATE_OPS, 'net2')) self.assertEqual(len(update_ops_2), 2) with self.test_session() as sess: sess.run(tf.global_variables_initializer()) (mm, mv) = sess.run([bn.moving_mean, bn.moving_variance]) self.assertAllClose(np.zeros([1, 6]), mm) self.assertAllClose(np.ones([1, 6]), mv) sess.run(update_ops_1) sess.run(update_ops_2) (mm, mv) = sess.run([bn.moving_mean, bn.moving_variance]) correct_mm = ((1.0 - bn._decay_rate) * v) correct_mm = (((1.0 - bn._decay_rate) * v) + (bn._decay_rate * correct_mm)) correct_mv = (np.ones([1, 6]) * (bn._decay_rate ** 2)) self.assertAllClose(np.reshape(correct_mm, [1, 6]), mm) self.assertAllClose(np.reshape(correct_mv, [1, 6]), mv)
'The correct normalization is being used for different Python flags.'
def testCheckStatsPython(self):
(v, input_v, inputs) = self._get_inputs() bn = snt.BatchNorm(offset=False, scale=False, decay_rate=0.5) out1 = bn(inputs, is_training=True, test_local_stats=True) out2 = bn(inputs, is_training=False, test_local_stats=True) out3 = bn(inputs, is_training=False, test_local_stats=False) update_ops = tuple(tf.get_collection(tf.GraphKeys.UPDATE_OPS)) self.assertEqual(len(update_ops), 2) with tf.control_dependencies(update_ops): out1 = tf.identity(out1) with self.test_session() as sess: sess.run(tf.global_variables_initializer()) out_v = sess.run(out1) (mm, mv) = sess.run([bn.moving_mean, bn.moving_variance]) correct_mm = ((1.0 - bn._decay_rate) * v) correct_mv = (np.ones([1, 6]) * bn._decay_rate) self.assertAllClose(np.reshape(correct_mm, [1, 6]), mm) self.assertAllClose(np.reshape(correct_mv, [1, 6]), mv) self.assertAllClose(np.zeros([7, 6]), out_v, rtol=1e-06, atol=1e-05) (out2_, out3_) = sess.run([out2, out3]) self.assertAllClose(np.zeros([7, 6]), out2_) self.assertAllClose(((input_v - mm) / np.sqrt((mv + bn._eps))), out3_)
'The correct normalization is being used for different TF flags.'
@parameterized.NamedParameters(('UseUpdateCollection', tf.GraphKeys.UPDATE_OPS), ('UseDifferentUpdateCollection', 'my_update_ops'), ('UseControlDependencies', None)) def testCheckStatsInGraph(self, update_ops_collection):
(v, input_v, inputs) = self._get_inputs() bn = snt.BatchNorm(offset=False, scale=False, decay_rate=0.5, update_ops_collection=update_ops_collection) is_training = tf.placeholder(tf.bool) test_local_stats = tf.placeholder(tf.bool) out = bn(inputs, is_training=is_training, test_local_stats=test_local_stats) if (update_ops_collection is not None): update_ops = tuple(tf.get_collection(update_ops_collection)) self.assertEqual(len(update_ops), 2) with tf.control_dependencies(update_ops): out = tf.identity(out) with self.test_session() as sess: sess.run(tf.global_variables_initializer()) out_v = sess.run(out, feed_dict={is_training: True, test_local_stats: True}) self.assertAllClose(np.zeros([7, 6]), out_v, rtol=1e-06, atol=1e-05) ops = (bn.moving_mean, bn.moving_variance) (mm1, mv1) = sess.run(ops) correct_mm = ((1.0 - bn._decay_rate) * v) correct_mv = (np.ones([1, 6]) * bn._decay_rate) self.assertAllClose(np.reshape(correct_mm, [1, 6]), mm1) self.assertAllClose(np.reshape(correct_mv, [1, 6]), mv1) out_v = sess.run(out, feed_dict={is_training: False, test_local_stats: True}) (mm2, mv2) = sess.run(ops) self.assertAllClose(mm1, mm2) self.assertAllClose(mv1, mv2) self.assertAllClose(np.zeros([7, 6]), out_v) out_v = sess.run(out, feed_dict={is_training: False, test_local_stats: False}) (mm3, mv3) = sess.run(ops) self.assertAllClose(mm1, mm3) self.assertAllClose(mv1, mv3) self.assertAllClose(((input_v - mm3) / np.sqrt((mv3 + bn._eps))), out_v)
'Check that the correct number of variables are made when sharing.'
def testSharing(self):
inputs1 = tf.placeholder(tf.float32, shape=[None, 64, 64, 3]) inputs2 = tf.placeholder(tf.float32, shape=[None, 64, 64, 3]) bn = snt.BatchNorm(offset=True, scale=True) bn(inputs1, is_training=True) bn(inputs2, is_training=False) self.assertEqual(len(tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES)), 4) update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS) self.assertEqual(len(update_ops), 2)
'Demonstrate that updates inside a cond fail.'
def testUpdatesInsideCond(self):
(_, input_v, inputs) = self._get_inputs() bn = snt.BatchNorm(offset=False, scale=False, decay_rate=0.5) condition = tf.placeholder(tf.bool) cond = tf.cond(condition, (lambda : bn(inputs, is_training=True)), (lambda : inputs)) init = tf.global_variables_initializer() with self.test_session() as sess: sess.run(init) out_v = sess.run(cond, feed_dict={condition: False}) self.assertAllClose(input_v, out_v) out_v = sess.run(cond, feed_dict={condition: True}) self.assertAllClose(np.zeros([7, 6]), out_v, rtol=0.0001, atol=0.0001) (mm, mv) = sess.run([bn.moving_mean, bn.moving_variance]) self.assertAllClose(np.zeros([1, 6]), mm) self.assertAllClose(np.ones([1, 6]), mv) with self.assertRaisesRegexp(ValueError, 'Operation'): sess.run(tuple(tf.get_collection(tf.GraphKeys.UPDATE_OPS)))
'Check the inputs batch_size can change.'
def testVariableBatchSize(self):
inputs_shape = [10, 10] inputs = tf.placeholder(tf.float32, shape=([None] + inputs_shape)) bn = snt.BatchNorm(offset=False, scale=False) out = bn(inputs, is_training=False, test_local_stats=False) init = tf.global_variables_initializer() update_ops = tuple(tf.get_collection(tf.GraphKeys.UPDATE_OPS)) with self.test_session() as sess: sess.run(init) for batch_size in [1, 3, 10]: input_data = np.random.rand(batch_size, *inputs_shape) out_v = sess.run(out, feed_dict={inputs: input_data}) self.assertAllClose((input_data / np.sqrt((1.0 + bn._eps))), out_v) sess.run(update_ops, feed_dict={inputs: input_data})
'See `__init__` of `LSTM` and `BatchNormLSTM` for docs.'
def __init__(self, hidden_size, forget_bias=1.0, initializers=None, partitioners=None, regularizers=None, use_peepholes=False, use_batch_norm_h=False, use_batch_norm_x=False, use_batch_norm_c=False, use_layer_norm=False, max_unique_stats=1, hidden_clip_value=None, cell_clip_value=None, name='lstm'):
super(_BaseLSTM, self).__init__(name=name) self._hidden_size = hidden_size self._forget_bias = forget_bias self._use_peepholes = use_peepholes self._max_unique_stats = max_unique_stats self._use_batch_norm_h = use_batch_norm_h self._use_batch_norm_x = use_batch_norm_x self._use_batch_norm_c = use_batch_norm_c self._use_layer_norm = use_layer_norm self._hidden_clip_value = hidden_clip_value self._cell_clip_value = cell_clip_value self.possible_keys = self.get_possible_initializer_keys(use_peepholes=use_peepholes, use_batch_norm_h=use_batch_norm_h, use_batch_norm_x=use_batch_norm_x, use_batch_norm_c=use_batch_norm_c) self._initializers = util.check_initializers(initializers, self.possible_keys) self._partitioners = util.check_initializers(partitioners, self.possible_keys) self._regularizers = util.check_initializers(regularizers, self.possible_keys) if (max_unique_stats < 1): raise ValueError('max_unique_stats must be >= 1') if ((max_unique_stats != 1) and (not (use_batch_norm_h or use_batch_norm_x or use_batch_norm_c))): raise ValueError('max_unique_stats specified but batch norm disabled') if (use_batch_norm_h and use_layer_norm): raise ValueError('Only one of use_batch_norm_h and layer_norm is allowed.') if (use_batch_norm_x and use_layer_norm): raise ValueError('Only one of use_batch_norm_x and layer_norm is allowed.') if (use_batch_norm_c and use_layer_norm): raise ValueError('Only one of use_batch_norm_c and layer_norm is allowed.') if ((hidden_clip_value is not None) and (hidden_clip_value < 0)): raise ValueError('The value of hidden_clip_value should be nonnegative.') if ((cell_clip_value is not None) and (cell_clip_value < 0)): raise ValueError('The value of cell_clip_value should be nonnegative.') if use_batch_norm_h: self._batch_norm_h = _BaseLSTM.IndexedStatsBatchNorm(max_unique_stats, 'batch_norm_h') if use_batch_norm_x: self._batch_norm_x = _BaseLSTM.IndexedStatsBatchNorm(max_unique_stats, 'batch_norm_x') if use_batch_norm_c: self._batch_norm_c = _BaseLSTM.IndexedStatsBatchNorm(max_unique_stats, 'batch_norm_c')
'Wraps this RNNCore with the additional control input to the `BatchNorm`s. Example usage: lstm = snt.BatchNormLSTM(4) is_training = tf.placeholder(tf.bool) rnn_input = ... my_rnn = rnn.rnn(lstm.with_batch_norm_control(is_training), rnn_input) Args: is_training: Boolean that indicates whether we are in training mode or testing mode. When in training mode, the batch norm statistics are taken from the given batch, and moving statistics are updated. When in testing mode, the moving statistics are not updated, and in addition if `test_local_stats` is False then the moving statistics are used for the batch statistics. See the `BatchNorm` module for more details. test_local_stats: Boolean scalar indicated whether to use local batch statistics in test mode. Returns: RNNCell wrapping this class with the extra input(s) added.'
def with_batch_norm_control(self, is_training, test_local_stats=True):
return _BaseLSTM.CellWithExtraInput(self, is_training=is_training, test_local_stats=test_local_stats)
'Returns the keys the dictionary of variable initializers may contain. The set of all possible initializer keys are: w_gates: weight for gates b_gates: bias of gates w_f_diag: weight for prev_cell -> forget gate peephole w_i_diag: weight for prev_cell -> input gate peephole w_o_diag: weight for prev_cell -> output gate peephole gamma_h: batch norm scaling for previous_hidden -> gates gamma_x: batch norm scaling for input -> gates gamma_c: batch norm scaling for cell -> output beta_c: batch norm bias for cell -> output Args: cls:The class. use_peepholes: Boolean that indicates whether peephole connections are used. use_batch_norm_h: Boolean that indicates whether to apply batch normalization at the previous_hidden -> gates contribution. If you are experimenting with batch norm then this may be the most effective to turn on. use_batch_norm_x: Boolean that indicates whether to apply batch normalization at the input -> gates contribution. use_batch_norm_c: Boolean that indicates whether to apply batch normalization at the cell -> output contribution. Returns: Set with strings corresponding to the strings that may be passed to the constructor.'
@classmethod def get_possible_initializer_keys(cls, use_peepholes=False, use_batch_norm_h=False, use_batch_norm_x=False, use_batch_norm_c=False):
possible_keys = cls.POSSIBLE_INITIALIZER_KEYS.copy() if (not use_peepholes): possible_keys.difference_update({cls.W_F_DIAG, cls.W_I_DIAG, cls.W_O_DIAG}) if (not use_batch_norm_h): possible_keys.remove(cls.GAMMA_H) if (not use_batch_norm_x): possible_keys.remove(cls.GAMMA_X) if (not use_batch_norm_c): possible_keys.difference_update({cls.GAMMA_C, cls.BETA_C}) return possible_keys
'Connects the LSTM module into the graph. If this is not the first time the module has been connected to the graph, the Tensors provided as inputs and state must have the same final dimension, in order for the existing variables to be the correct size for their corresponding multiplications. The batch size may differ for each connection. Args: inputs: Tensor of size `[batch_size, input_size]`. prev_state: Tuple (prev_hidden, prev_cell), or if batch norm is enabled and `max_unique_stats > 1`, then (prev_hidden, prev_cell, time_step). Here, prev_hidden and prev_cell are tensors of size `[batch_size, hidden_size]`, and time_step is used to indicate the current RNN step. is_training: Boolean indicating whether we are in training mode (as opposed to testing mode), passed to the batch norm modules. Note to use this you must wrap the cell via the `with_batch_norm_control` function. test_local_stats: Boolean indicating whether to use local batch statistics in test mode. See the `BatchNorm` documentation for more on this. Returns: A tuple (output, next_state) where \'output\' is a Tensor of size `[batch_size, hidden_size]` and \'next_state\' is a tuple (next_hidden, next_cell) or (next_hidden, next_cell, time_step + 1), where next_hidden and next_cell have size `[batch_size, hidden_size]`. Raises: ValueError: If connecting the module into the graph any time after the first time, and the inferred size of the inputs does not match previous invocations.'
def _build(self, inputs, prev_state, is_training=None, test_local_stats=True):
use_batch_norm = (self._use_batch_norm_c or self._use_batch_norm_h) use_batch_norm = (use_batch_norm or self._use_batch_norm_x) if (use_batch_norm and (is_training is None)): raise ValueError('Boolean is_training flag must be explicitly specified when using batch normalization.') if (self._max_unique_stats == 1): (prev_hidden, prev_cell) = prev_state time_step = None else: (prev_hidden, prev_cell, time_step) = prev_state if (self._hidden_clip_value is not None): prev_hidden = tf.clip_by_value(prev_hidden, (- self._hidden_clip_value), self._hidden_clip_value) if (self._cell_clip_value is not None): prev_cell = tf.clip_by_value(prev_cell, (- self._cell_clip_value), self._cell_clip_value) self._create_gate_variables(inputs.get_shape(), inputs.dtype) self._create_batch_norm_variables(inputs.dtype) if (self._use_batch_norm_h or self._use_batch_norm_x): gates_h = tf.matmul(prev_hidden, self._w_h) gates_x = tf.matmul(inputs, self._w_x) if self._use_batch_norm_h: gates_h = (self._gamma_h * self._batch_norm_h(gates_h, time_step, is_training, test_local_stats)) if self._use_batch_norm_x: gates_x = (self._gamma_x * self._batch_norm_x(gates_x, time_step, is_training, test_local_stats)) gates = (gates_h + gates_x) else: inputs_and_hidden = tf.concat([inputs, prev_hidden], 1) gates = tf.matmul(inputs_and_hidden, self._w_xh) if self._use_layer_norm: gates = layer_norm.LayerNorm()(gates) gates += self._b (i, j, f, o) = array_ops.split(value=gates, num_or_size_splits=4, axis=1) if self._use_peepholes: self._create_peephole_variables(inputs.dtype) f += (self._w_f_diag * prev_cell) i += (self._w_i_diag * prev_cell) forget_mask = tf.sigmoid((f + self._forget_bias)) next_cell = ((forget_mask * prev_cell) + (tf.sigmoid(i) * tf.tanh(j))) cell_output = next_cell if self._use_batch_norm_c: cell_output = (self._beta_c + (self._gamma_c * self._batch_norm_c(cell_output, time_step, is_training, test_local_stats))) if self._use_peepholes: cell_output += (self._w_o_diag * cell_output) next_hidden = (tf.tanh(cell_output) * tf.sigmoid(o)) if (self._max_unique_stats == 1): return (next_hidden, (next_hidden, next_cell)) else: return (next_hidden, (next_hidden, next_cell, (time_step + 1)))
'Initialize the variables used for the `BatchNorm`s (if any).'
def _create_batch_norm_variables(self, dtype):
gamma_initializer = tf.constant_initializer(0.1) if self._use_batch_norm_h: self._gamma_h = tf.get_variable(self.GAMMA_H, shape=[(4 * self._hidden_size)], dtype=dtype, initializer=self._initializers.get(self.GAMMA_H, gamma_initializer), partitioner=self._partitioners.get(self.GAMMA_H), regularizer=self._regularizers.get(self.GAMMA_H)) if self._use_batch_norm_x: self._gamma_x = tf.get_variable(self.GAMMA_X, shape=[(4 * self._hidden_size)], dtype=dtype, initializer=self._initializers.get(self.GAMMA_X, gamma_initializer), partitioner=self._partitioners.get(self.GAMMA_X), regularizer=self._regularizers.get(self.GAMMA_X)) if self._use_batch_norm_c: self._gamma_c = tf.get_variable(self.GAMMA_C, shape=[self._hidden_size], dtype=dtype, initializer=self._initializers.get(self.GAMMA_C, gamma_initializer), partitioner=self._partitioners.get(self.GAMMA_C), regularizer=self._regularizers.get(self.GAMMA_C)) self._beta_c = tf.get_variable(self.BETA_C, shape=[self._hidden_size], dtype=dtype, initializer=self._initializers.get(self.BETA_C), partitioner=self._partitioners.get(self.BETA_C), regularizer=self._regularizers.get(self.BETA_C))
'Initialize the variables used for the gates.'
def _create_gate_variables(self, input_shape, dtype):
if (len(input_shape) != 2): raise ValueError('Rank of shape must be {} not: {}'.format(2, len(input_shape))) input_size = input_shape.dims[1].value b_shape = [(4 * self._hidden_size)] equiv_input_size = (self._hidden_size + input_size) initializer = basic.create_linear_initializer(equiv_input_size) if (self._use_batch_norm_h or self._use_batch_norm_x): self._w_h = tf.get_variable((self.W_GATES + '_H'), shape=[self._hidden_size, (4 * self._hidden_size)], dtype=dtype, initializer=self._initializers.get(self.W_GATES, initializer), partitioner=self._partitioners.get(self.W_GATES), regularizer=self._regularizers.get(self.W_GATES)) self._w_x = tf.get_variable((self.W_GATES + '_X'), shape=[input_size, (4 * self._hidden_size)], dtype=dtype, initializer=self._initializers.get(self.W_GATES, initializer), partitioner=self._partitioners.get(self.W_GATES), regularizer=self._regularizers.get(self.W_GATES)) else: self._w_xh = tf.get_variable(self.W_GATES, shape=[(self._hidden_size + input_size), (4 * self._hidden_size)], dtype=dtype, initializer=self._initializers.get(self.W_GATES, initializer), partitioner=self._partitioners.get(self.W_GATES), regularizer=self._regularizers.get(self.W_GATES)) self._b = tf.get_variable(self.B_GATES, shape=b_shape, dtype=dtype, initializer=self._initializers.get(self.B_GATES, initializer), partitioner=self._partitioners.get(self.B_GATES), regularizer=self._regularizers.get(self.B_GATES))
'Initialize the variables used for the peephole connections.'
def _create_peephole_variables(self, dtype):
self._w_f_diag = tf.get_variable(self.W_F_DIAG, shape=[self._hidden_size], dtype=dtype, initializer=self._initializers.get(self.W_F_DIAG), partitioner=self._partitioners.get(self.W_F_DIAG), regularizer=self._regularizers.get(self.W_F_DIAG)) self._w_i_diag = tf.get_variable(self.W_I_DIAG, shape=[self._hidden_size], dtype=dtype, initializer=self._initializers.get(self.W_I_DIAG), partitioner=self._partitioners.get(self.W_I_DIAG), regularizer=self._regularizers.get(self.W_I_DIAG)) self._w_o_diag = tf.get_variable(self.W_O_DIAG, shape=[self._hidden_size], dtype=dtype, initializer=self._initializers.get(self.W_O_DIAG), partitioner=self._partitioners.get(self.W_O_DIAG), regularizer=self._regularizers.get(self.W_O_DIAG))
'Builds the default start state tensor of zeros. Args: batch_size: An int, float or scalar Tensor representing the batch size. dtype: The data type to use for the state. trainable: Boolean that indicates whether to learn the initial state. trainable_initializers: An optional pair of initializers for the initial hidden state and cell state. trainable_regularizers: Optional regularizer function or nested structure of functions with the same structure as the `state_size` property of the core, to be used as regularizers of the initial state variable. A regularizer should be a function that takes a single `Tensor` as an input and returns a scalar `Tensor` output, e.g. the L1 and L2 regularizers in `tf.contrib.layers`. name: Optional string used to prefix the initial state variable names, in the case of a trainable initial state. If not provided, defaults to the name of the module. Returns: A tensor tuple `([batch_size, state_size], [batch_size, state_size], ?)` filled with zeros, with the third entry present when batch norm is enabled with `max_unique_stats > 1\', with value `0` (representing the time step).'
def initial_state(self, batch_size, dtype=tf.float32, trainable=False, trainable_initializers=None, trainable_regularizers=None, name=None):
if (self._max_unique_stats == 1): return super(_BaseLSTM, self).initial_state(batch_size, dtype=dtype, trainable=trainable, trainable_initializers=trainable_initializers, trainable_regularizers=trainable_regularizers, name=name) else: with tf.name_scope(self._initial_state_scope(name)): if (not trainable): state = self.zero_state(batch_size, dtype) else: state = rnn_core.trainable_initial_state(batch_size, (tf.TensorShape([self._hidden_size]), tf.TensorShape([self._hidden_size])), dtype=dtype, initializers=trainable_initializers, regularizers=trainable_regularizers, name=self._initial_state_scope(name)) return (state[0], state[1], tf.constant(0, dtype=tf.int32))
'Tuple of `tf.TensorShape`s indicating the size of state tensors.'
@property def state_size(self):
if (self._max_unique_stats == 1): return (tf.TensorShape([self._hidden_size]), tf.TensorShape([self._hidden_size])) else: return (tf.TensorShape([self._hidden_size]), tf.TensorShape([self._hidden_size]), tf.TensorShape(1))
'`tf.TensorShape` indicating the size of the core output.'
@property def output_size(self):
return tf.TensorShape([self._hidden_size])
'Boolean indicating whether peephole connections are used.'
@property def use_peepholes(self):
return self._use_peepholes
'Boolean indicating whether batch norm for hidden -> gates is enabled.'
@property def use_batch_norm_h(self):
return self._use_batch_norm_h
'Boolean indicating whether batch norm for input -> gates is enabled.'
@property def use_batch_norm_x(self):
return self._use_batch_norm_x
'Boolean indicating whether batch norm for cell -> output is enabled.'
@property def use_batch_norm_c(self):
return self._use_batch_norm_c
'Boolean indicating whether layer norm is enabled.'
@property def use_layer_norm(self):
return self._use_layer_norm
'Create an IndexedStatsBatchNorm. Args: max_unique_stats: number of different indices to have statistics for; indices beyond this will use the final statistics. name: Name of the module.'
def __init__(self, max_unique_stats, name=None):
super(_BaseLSTM.IndexedStatsBatchNorm, self).__init__(name=name) self._max_unique_stats = max_unique_stats
'Add the IndexedStatsBatchNorm module to the graph. Args: inputs: Tensor to apply batch norm to. index: Scalar TensorFlow int32 value to select the batch norm index. is_training: Boolean to indicate to `snt.BatchNorm` if we are currently training. test_local_stats: Boolean to indicate to `snt.BatchNorm` if batch normalization should use local batch statistics at test time. Returns: Output of batch norm operation.'
def _build(self, inputs, index, is_training, test_local_stats):
def create_batch_norm(): return batch_norm.BatchNorm(offset=False, scale=False)(inputs, is_training, test_local_stats) if (self._max_unique_stats > 1): pred_fn_pairs = [(tf.equal(i, index), create_batch_norm) for i in xrange((self._max_unique_stats - 1))] out = tf.case(pred_fn_pairs, create_batch_norm) out.set_shape(inputs.get_shape()) return out else: return create_batch_norm()
'Construct the CellWithExtraInput. Args: cell: The RNNCell to wrap (typically a snt.RNNCore). *args: Extra arguments to pass to __call__. **kwargs: Extra keyword arguments to pass to __call__.'
def __init__(self, cell, *args, **kwargs):
self._cell = cell self._args = args self._kwargs = kwargs
'Tuple indicating the size of nested state tensors.'
@property def state_size(self):
return self._cell.state_size
'`tf.TensorShape` indicating the size of the core output.'
@property def output_size(self):
return self._cell.output_size
'Construct LSTM. Args: hidden_size: (int) Hidden size dimensionality. forget_bias: (float) Bias for the forget activation. initializers: Dict containing ops to initialize the weights. This dictionary may contain any of the keys returned by `LSTM.get_possible_initializer_keys`. The gamma and beta variables control batch normalization values for different batch norm transformations inside the cell; see the paper for details. partitioners: Optional dict containing partitioners to partition the weights and biases. As a default, no partitioners are used. This dict may contain any of the keys returned by `LSTM.get_possible_initializer_keys`. regularizers: Optional dict containing regularizers for the weights and biases. As a default, no regularizers are used. This dict may contain any of the keys returned by `LSTM.get_possible_initializer_keys`. use_peepholes: Boolean that indicates whether peephole connections are used. use_batch_norm_h: Boolean that indicates whether to apply batch normalization at the previous_hidden -> gates contribution. This is deprecated and will be removed in a future sonnet version. Please switch to `BatchNormLSTM` if you require it. use_batch_norm_x: Boolean that indicates whether to apply batch normalization at the input -> gates contribution. This is deprecated and will be removed in a future sonnet version. Please switch to `BatchNormLSTM` if you require it. use_batch_norm_c: Boolean that indicates whether to apply batch normalization at the cell -> output contribution. This is deprecated and will be removed in a future sonnet version. Please switch to `BatchNormLSTM` if you require it. use_layer_norm: Boolean that indicates whether to apply layer normalization. max_unique_stats: The maximum number of steps to use unique batch norm statistics for. (See module description above for more details.) This is deprecated and will be removed in a future sonnet version. Please switch to `BatchNormLSTM` if you require it. hidden_clip_value: Optional number; if set, then the LSTM hidden state vector is clipped by this value. cell_clip_value: Optional number; if set, then the LSTM cell vector is clipped by this value. name: name of the module. Raises: KeyError: if `initializers` contains any keys not returned by `LSTM.get_possible_initializer_keys`. KeyError: if `partitioners` contains any keys not returned by `LSTM.get_possible_initializer_keys`. KeyError: if `regularizers` contains any keys not returned by `LSTM.get_possible_initializer_keys`. ValueError: if a peephole initializer is passed in the initializer list, but `use_peepholes` is False.'
@deprecation.deprecated_args('2017-09-18', 'Please switch from LSTM to BatchNormLSTM if you need batch norm functionality.', 'use_batch_norm_h', 'use_batch_norm_x', 'use_batch_norm_c', 'max_unique_stats') def __init__(self, hidden_size, forget_bias=1.0, initializers=None, partitioners=None, regularizers=None, use_peepholes=False, use_batch_norm_h=False, use_batch_norm_x=False, use_batch_norm_c=False, use_layer_norm=False, max_unique_stats=1, hidden_clip_value=None, cell_clip_value=None, name='lstm'):
super(LSTM, self).__init__(hidden_size, forget_bias=forget_bias, initializers=initializers, partitioners=partitioners, regularizers=regularizers, use_peepholes=use_peepholes, use_batch_norm_h=use_batch_norm_h, use_batch_norm_x=use_batch_norm_x, use_batch_norm_c=use_batch_norm_c, use_layer_norm=use_layer_norm, max_unique_stats=max_unique_stats, hidden_clip_value=hidden_clip_value, cell_clip_value=cell_clip_value, name=name)
'Construct `BatchNormLSTM`. Args: hidden_size: (int) Hidden size dimensionality. forget_bias: (float) Bias for the forget activation. initializers: Dict containing ops to initialize the weights. This dictionary may contain any of the keys returned by `BatchNormLSTM.get_possible_initializer_keys`. The gamma and beta variables control batch normalization values for different batch norm transformations inside the cell; see the paper for details. partitioners: Optional dict containing partitioners to partition the weights and biases. As a default, no partitioners are used. This dict may contain any of the keys returned by `BatchNormLSTM.get_possible_initializer_keys`. regularizers: Optional dict containing regularizers for the weights and biases. As a default, no regularizers are used. This dict may contain any of the keys returned by `BatchNormLSTM.get_possible_initializer_keys`. use_peepholes: Boolean that indicates whether peephole connections are used. use_batch_norm_h: Boolean that indicates whether to apply batch normalization at the previous_hidden -> gates contribution. If you are experimenting with batch norm then this may be the most effective to use, and is enabled by default. use_batch_norm_x: Boolean that indicates whether to apply batch normalization at the input -> gates contribution. use_batch_norm_c: Boolean that indicates whether to apply batch normalization at the cell -> output contribution. max_unique_stats: The maximum number of steps to use unique batch norm statistics for. (See module description above for more details.) hidden_clip_value: Optional number; if set, then the LSTM hidden state vector is clipped by this value. cell_clip_value: Optional number; if set, then the LSTM cell vector is clipped by this value. name: name of the module. Raises: KeyError: if `initializers` contains any keys not returned by `BatchNormLSTM.get_possible_initializer_keys`. KeyError: if `partitioners` contains any keys not returned by `BatchNormLSTM.get_possible_initializer_keys`. KeyError: if `regularizers` contains any keys not returned by `BatchNormLSTM.get_possible_initializer_keys`. ValueError: if a peephole initializer is passed in the initializer list, but `use_peepholes` is False. ValueError: if a batch norm initializer is passed in the initializer list, but batch norm is disabled. ValueError: if none of the `use_batch_norm_*` options are True. ValueError: if `max_unique_stats` is < 1.'
def __init__(self, hidden_size, forget_bias=1.0, initializers=None, partitioners=None, regularizers=None, use_peepholes=False, use_batch_norm_h=True, use_batch_norm_x=False, use_batch_norm_c=False, max_unique_stats=1, hidden_clip_value=None, cell_clip_value=None, name='batch_norm_lstm'):
if (not any([use_batch_norm_h, use_batch_norm_x, use_batch_norm_c])): raise ValueError('At least one use_batch_norm_* option is required for BatchNormLSTM') super(BatchNormLSTM, self).__init__(hidden_size, forget_bias=forget_bias, initializers=initializers, partitioners=partitioners, regularizers=regularizers, use_peepholes=use_peepholes, use_batch_norm_h=use_batch_norm_h, use_batch_norm_x=use_batch_norm_x, use_batch_norm_c=use_batch_norm_c, max_unique_stats=max_unique_stats, hidden_clip_value=hidden_clip_value, cell_clip_value=cell_clip_value, name=name)
'Construct ConvLSTM. Args: conv_ndims: Convolution dimensionality (1, 2 or 3). input_shape: Shape of the input as tuple, excluding the batch size. output_channels: Number of output channels of the conv LSTM. kernel_shape: Sequence of kernel sizes (of size 2), or integer that is used to define kernel size in all dimensions. stride: Sequence of kernel strides (of size 2), or integer that is used to define stride in all dimensions. padding: Padding algorithm, either `snt.SAME` or `snt.VALID`. use_bias: Use bias in convolutions. skip_connection: If set to `True`, concatenate the input to the output of the conv LSTM. Default: `False`. forget_bias: Forget bias. initializers: Dict containing ops to initialize the convolutional weights. partitioners: Optional dict containing partitioners to partition the convolutional weights and biases. As a default, no partitioners are used. regularizers: Optional dict containing regularizers for the convolutional weights and biases. As a default, no regularizers are used. name: Name of the module. Raises: ValueError: If `skip_connection` is `True` and stride is different from 1 or if `input_shape` is incompatible with `conv_ndims`.'
def __init__(self, conv_ndims, input_shape, output_channels, kernel_shape, stride=1, padding=conv.SAME, use_bias=True, skip_connection=False, forget_bias=1.0, initializers=None, partitioners=None, regularizers=None, name='conv_lstm'):
super(ConvLSTM, self).__init__(name=name) self._conv_class = self._get_conv_class(conv_ndims) if (skip_connection and (stride != 1)): raise ValueError('`stride` needs to be 1 when using skip connection') if (conv_ndims != (len(input_shape) - 1)): raise ValueError('Invalid input_shape {} for conv_ndims={}.'.format(input_shape, conv_ndims)) self._conv_ndims = conv_ndims self._input_shape = input_shape self._output_channels = output_channels self._kernel_shape = kernel_shape self._stride = stride self._padding = padding self._use_bias = use_bias self._forget_bias = forget_bias self._skip_connection = skip_connection self._initializers = initializers self._partitioners = partitioners self._regularizers = regularizers self._total_output_channels = output_channels if (self._stride != 1): self._total_output_channels //= (self._stride * self._stride) if self._skip_connection: self._total_output_channels += self._input_shape[(-1)] self._convolutions = collections.defaultdict(self._new_convolution)
'Tuple of `tf.TensorShape`s indicating the size of state tensors.'
@property def state_size(self):
hidden_size = tf.TensorShape((self._input_shape[:(-1)] + (self._output_channels,))) return (hidden_size, hidden_size)
'`tf.TensorShape` indicating the size of the core output.'
@property def output_size(self):
return tf.TensorShape((self._input_shape[:(-1)] + (self._total_output_channels,)))
'Construct Conv1DLSTM. See `snt.ConvLSTM` for more details.'
def __init__(self, name='conv_1d_lstm', **kwargs):
super(Conv1DLSTM, self).__init__(conv_ndims=1, name=name, **kwargs)
'Construct Conv2DLSTM. See `snt.ConvLSTM` for more details.'
def __init__(self, name='conv_2d_lstm', **kwargs):
super(Conv2DLSTM, self).__init__(conv_ndims=2, name=name, **kwargs)
'Construct GRU. Args: hidden_size: (int) Hidden size dimensionality. initializers: Dict containing ops to initialize the weights. This dict may contain any of the keys returned by `GRU.get_possible_initializer_keys`. partitioners: Optional dict containing partitioners to partition the weights and biases. As a default, no partitioners are used. This dict may contain any of the keys returned by `GRU.get_possible_initializer_keys` regularizers: Optional dict containing regularizers for the weights and biases. As a default, no regularizers are used. This dict may contain any of the keys returned by `GRU.get_possible_initializer_keys` name: name of the module. Raises: KeyError: if `initializers` contains any keys not returned by `GRU.get_possible_initializer_keys`. KeyError: if `partitioners` contains any keys not returned by `GRU.get_possible_initializer_keys`. KeyError: if `regularizers` contains any keys not returned by `GRU.get_possible_initializer_keys`.'
def __init__(self, hidden_size, initializers=None, partitioners=None, regularizers=None, name='gru'):
super(GRU, self).__init__(name=name) self._hidden_size = hidden_size self._initializers = util.check_initializers(initializers, self.POSSIBLE_INITIALIZER_KEYS) self._partitioners = util.check_partitioners(partitioners, self.POSSIBLE_INITIALIZER_KEYS) self._regularizers = util.check_regularizers(regularizers, self.POSSIBLE_INITIALIZER_KEYS)
'Returns the keys the dictionary of variable initializers may contain. The set of all possible initializer keys are: wz: weight for input -> update cell uz: weight for prev_state -> update cell bz: bias for update_cell wr: weight for input -> reset cell ur: weight for prev_state -> reset cell br: bias for reset cell wh: weight for input -> candidate activation uh: weight for prev_state -> candidate activation bh: bias for candidate activation Returns: Set with strings corresponding to the strings that may be passed to the constructor.'
@classmethod def get_possible_initializer_keys(cls):
return super(GRU, cls).get_possible_initializer_keys(cls)
'Connects the GRU module into the graph. If this is not the first time the module has been connected to the graph, the Tensors provided as inputs and state must have the same final dimension, in order for the existing variables to be the correct size for their corresponding multiplications. The batch size may differ for each connection. Args: inputs: Tensor of size `[batch_size, input_size]`. prev_state: Tensor of size `[batch_size, hidden_size]`. Returns: A tuple (output, next_state) where `output` is a Tensor of size `[batch_size, hidden_size]` and `next_state` is a Tensor of size `[batch_size, hidden_size]`. Raises: ValueError: If connecting the module into the graph any time after the first time, and the inferred size of the inputs does not match previous invocations.'
def _build(self, inputs, prev_state):
input_size = inputs.get_shape()[1] weight_shape = (input_size, self._hidden_size) u_shape = (self._hidden_size, self._hidden_size) bias_shape = (self._hidden_size,) self._wz = tf.get_variable(GRU.WZ, weight_shape, dtype=inputs.dtype, initializer=self._initializers.get(GRU.WZ), partitioner=self._partitioners.get(GRU.WZ), regularizer=self._regularizers.get(GRU.WZ)) self._uz = tf.get_variable(GRU.UZ, u_shape, dtype=inputs.dtype, initializer=self._initializers.get(GRU.UZ), partitioner=self._partitioners.get(GRU.UZ), regularizer=self._regularizers.get(GRU.UZ)) self._bz = tf.get_variable(GRU.BZ, bias_shape, dtype=inputs.dtype, initializer=self._initializers.get(GRU.BZ), partitioner=self._partitioners.get(GRU.BZ), regularizer=self._regularizers.get(GRU.BZ)) z = tf.sigmoid(((tf.matmul(inputs, self._wz) + tf.matmul(prev_state, self._uz)) + self._bz)) self._wr = tf.get_variable(GRU.WR, weight_shape, dtype=inputs.dtype, initializer=self._initializers.get(GRU.WR), partitioner=self._partitioners.get(GRU.WR), regularizer=self._regularizers.get(GRU.WR)) self._ur = tf.get_variable(GRU.UR, u_shape, dtype=inputs.dtype, initializer=self._initializers.get(GRU.UR), partitioner=self._partitioners.get(GRU.UR), regularizer=self._regularizers.get(GRU.UR)) self._br = tf.get_variable(GRU.BR, bias_shape, dtype=inputs.dtype, initializer=self._initializers.get(GRU.BR), partitioner=self._partitioners.get(GRU.BR), regularizer=self._regularizers.get(GRU.BR)) r = tf.sigmoid(((tf.matmul(inputs, self._wr) + tf.matmul(prev_state, self._ur)) + self._br)) self._wh = tf.get_variable(GRU.WH, weight_shape, dtype=inputs.dtype, initializer=self._initializers.get(GRU.WH), partitioner=self._partitioners.get(GRU.WH), regularizer=self._regularizers.get(GRU.WH)) self._uh = tf.get_variable(GRU.UH, u_shape, dtype=inputs.dtype, initializer=self._initializers.get(GRU.UH), partitioner=self._partitioners.get(GRU.UH), regularizer=self._regularizers.get(GRU.UH)) self._bh = tf.get_variable(GRU.BH, bias_shape, dtype=inputs.dtype, initializer=self._initializers.get(GRU.BH), partitioner=self._partitioners.get(GRU.BH), regularizer=self._regularizers.get(GRU.BH)) h_twiddle = tf.tanh(((tf.matmul(inputs, self._wh) + tf.matmul((r * prev_state), self._uh)) + self._bh)) state = (((1 - z) * prev_state) + (z * h_twiddle)) return (state, state)
'Initialize ConstantZero module. Args: output_rank: int. Rank of value returned by build(). The default value (2) imitates the output of the Linear module. name: string. Name of module.'
def __init__(self, output_rank=2, name='constant_zero'):
super(ConstantZero, self).__init__(name=name) self._output_rank = output_rank
'Attach ConstantZero module to graph. Args: inputs: [batch_size, input_size]-shaped Tensor of dtype float32. Returns: A Tensor with rank output_rank where the first dimension has length batch_size and all others have length 1.'
def _build(self, inputs):
assert (inputs.get_shape().as_list()[(-1)] is not None) batch_size = tf.shape(inputs)[0] result_shape = ([batch_size] + ([1] * (self._output_rank - 1))) return tf.zeros(result_shape, dtype=inputs.dtype)
'Constructs a GridWarper module and initializes the source grid params. `source_shape` and `output_shape` are used to define the size of the source and output signal domains, as opposed to the shape of the respective Tensors. For example, for an image of size `width=W` and `height=H`, `{source,output}_shape=[H, W]`; for a volume of size `width=W`, `height=H` and `depth=D`, `{source,output}_shape=[H, W, D]`. Args: source_shape: Iterable of integers determining the size of the source signal domain. output_shape: Iterable of integers determining the size of the destination resampled signal domain. num_coeff: Number of coefficients parametrizing the grid warp. For example, a 2D affine transformation will be defined by the 6 parameters populating the corresponding 2x3 affine matrix. name: Name of Module. **kwargs: Extra kwargs to be forwarded to the `create_features` function, instantiating the source grid parameters. Raises: Error: If `len(output_shape) > len(source_shape)`. TypeError: If `output_shape` and `source_shape` are not both iterable.'
def __init__(self, source_shape, output_shape, num_coeff, name, **kwargs):
super(GridWarper, self).__init__(name=name) self._source_shape = tuple(source_shape) self._output_shape = tuple(output_shape) if (len(self._output_shape) > len(self._source_shape)): raise base.Error('Output domain dimensionality ({}) must be equal or smaller than source domain dimensionality ({})'.format(len(self._output_shape), len(self._source_shape))) self._num_coeff = num_coeff self._psi = self._create_features(**kwargs)
'Generates matrix of features, of size `[num_coeff, num_points]`.'
@abc.abstractmethod def _create_features(self, **kwargs):
pass
'Returns number of coefficients of warping function.'
@property def n_coeff(self):
return self._n_coeff
'Returns a list of features used to compute the grid warp.'
@property def psi(self):
return self._psi
'Returns a tuple containing the shape of the source signal.'
@property def source_shape(self):
return self._source_shape
'Returns a tuple containing the shape of the output grid.'
@property def output_shape(self):
return self._output_shape
'Constructs an AffineGridWarper. `source_shape` and `output_shape` are used to define the size of the source and output signal domains, as opposed to the shape of the respective Tensors. For example, for an image of size `width=W` and `height=H`, `{source,output}_shape=[H, W]`; for a volume of size `width=W`, `height=H` and `depth=D`, `{source,output}_shape=[H, W, D]`. Args: source_shape: Iterable of integers determining the size of the source signal domain. output_shape: Iterable of integers determining the size of the destination resampled signal domain. constraints: Either a double list of shape `[N, N+1]` defining constraints on the entries of a matrix defining an affine transformation in N dimensions, or an `AffineWarpConstraints` object. If the double list is passed, a numeric value bakes in a constraint on the corresponding entry in the tranformation matrix, whereas `None` implies that the corresponding entry will be specified at run time. name: Name of module. Raises: Error: If constraints fully define the affine transformation; or if input grid shape and contraints have different dimensionality. TypeError: If output_shape and source_shape are not both iterable.'
def __init__(self, source_shape, output_shape, constraints=None, name='affine_grid_warper'):
self._source_shape = tuple(source_shape) self._output_shape = tuple(output_shape) num_dim = len(source_shape) if isinstance(constraints, AffineWarpConstraints): self._constraints = constraints elif (constraints is None): self._constraints = AffineWarpConstraints.no_constraints(num_dim) else: self._constraints = AffineWarpConstraints(constraints=constraints) if (self._constraints.num_free_params == 0): raise base.Error('Transformation is fully constrained.') if (self._constraints.num_dim != num_dim): raise base.Error('Incompatible set of constraints provided: input grid shape and constraints have different dimensionality.') super(AffineGridWarper, self).__init__(source_shape=source_shape, output_shape=output_shape, num_coeff=6, name=name, constraints=self._constraints)
'Creates all the matrices needed to compute the output warped grids.'
def _create_features(self, constraints):
affine_warp_constraints = constraints if (not isinstance(affine_warp_constraints, AffineWarpConstraints)): affine_warp_constraints = AffineWarpConstraints(affine_warp_constraints) mask = affine_warp_constraints.mask psi = _create_affine_features(output_shape=self._output_shape, source_shape=self._source_shape) scales = [((x - 1.0) * 0.5) for x in reversed(self._source_shape)] offsets = scales features = [] for (row, scale) in zip(mask, scales): x_i = np.array([x for (x, is_active) in zip(psi, row) if is_active]) features.append(((x_i * scale) if len(x_i) else None)) for (row_i, row) in enumerate(mask): x_i = None s = scales[row_i] for (i, is_active) in enumerate(row): if is_active: continue if (x_i is None): x_i = ((np.array(psi[i]) * affine_warp_constraints[row_i][i]) * s) else: x_i += ((np.array(psi[i]) * affine_warp_constraints[row_i][i]) * s) features.append(x_i) features += offsets return features
'Assembles the module network and adds it to the graph. The internal computation graph is assembled according to the set of constraints provided at construction time. Args: inputs: Tensor containing a batch of transformation parameters. Returns: A batch of warped grids. Raises: Error: If the input tensor size is not consistent with the constraints passed at construction time.'
def _build(self, inputs):
input_shape = tf.shape(inputs) input_dtype = inputs.dtype.as_numpy_dtype batch_size = tf.expand_dims(input_shape[0], 0) number_of_params = inputs.get_shape()[1] if (number_of_params != self._constraints.num_free_params): raise base.Error('Input size is not consistent with constraint definition: {} parameters expected, {} provided.'.format(self._constraints.num_free_params, number_of_params)) num_output_dimensions = (len(self._psi) // 3) def get_input_slice(start, size): 'Extracts a subset of columns from the input 2D Tensor.' return basic.SliceByDim([1], [start], [size])(inputs) warped_grid = [] var_index_offset = 0 number_of_points = np.prod(self._output_shape) for i in xrange(num_output_dimensions): if (self._psi[i] is not None): grid_coord = self._psi[i].astype(input_dtype) num_active_vars = self._psi[i].shape[0] active_vars = get_input_slice(var_index_offset, num_active_vars) warped_coord = tf.matmul(active_vars, grid_coord) warped_coord = tf.expand_dims(warped_coord, 1) var_index_offset += num_active_vars offset = self._psi[(num_output_dimensions + i)] if (offset is not None): offset = offset.astype(input_dtype) tiling_params = tf.concat([batch_size, tf.constant(1, shape=(1,)), tf.ones_like(offset.shape)], 0) offset = offset.reshape(((1, 1) + offset.shape)) warped_coord += tf.tile(offset, tiling_params) else: warped_coord = self._psi[(num_output_dimensions + i)].astype(input_dtype) tiling_params = tf.concat([batch_size, tf.constant(1, shape=(1,)), tf.ones_like(warped_coord.shape)], 0) warped_coord = warped_coord.reshape(((1, 1) + warped_coord.shape)) warped_coord = tf.tile(warped_coord, tiling_params) warped_coord += self._psi[(i + (2 * num_output_dimensions))] warped_coord.set_shape([None, 1, number_of_points]) warped_grid.append(warped_coord) grid_shape = (self._output_shape + (1,)) warped_grid = [basic.BatchReshape(grid_shape)(grid) for grid in warped_grid] return tf.concat(warped_grid, len(grid_shape))
'Returns a `sonnet` module to compute inverse affine transforms. The function first assembles a network that given the constraints of the current AffineGridWarper and a set of input parameters, retrieves the coefficients of the corresponding inverse affine transform, then feeds its output into a new AffineGridWarper setup to correctly warp the `output` space into the `source` space. Args: name: Name of module implementing the inverse grid transformation. Returns: A `sonnet` module performing the inverse affine transform of a reference grid of points via an AffineGridWarper module. Raises: tf.errors.UnimplementedError: If the function is called on a non 2D instance of AffineGridWarper.'
def inverse(self, name=None):
if (self._num_coeff != 6): raise tf.errors.UnimplementedError('AffineGridWarper currently supportsinversion only for the 2D case.') def _affine_grid_warper_inverse(inputs): 'Assembles network to compute inverse affine transformation.\n\n Each `inputs` row potentailly contains [a, b, tx, c, d, ty]\n corresponding to an affine matrix:\n\n A = [a, b, tx],\n [c, d, ty]\n\n We want to generate a tensor containing the coefficients of the\n corresponding inverse affine transformation in a constraints-aware\n fashion.\n Calling M:\n\n M = [a, b]\n [c, d]\n\n the affine matrix for the inverse transform is:\n\n A_in = [M^(-1), M^-1 * [-tx, -tx]^T]\n\n where\n\n M^(-1) = (ad - bc)^(-1) * [ d, -b]\n [-c, a]\n\n Args:\n inputs: Tensor containing a batch of transformation parameters.\n\n Returns:\n A tensorflow graph performing the inverse affine transformation\n parametrized by the input coefficients.\n ' batch_size = tf.expand_dims(tf.shape(inputs)[0], 0) constant_shape = tf.concat([batch_size, tf.convert_to_tensor((1,))], 0) index = iter(range(6)) def get_variable(constraint): if (constraint is None): i = next(index) return inputs[:, i:(i + 1)] else: return tf.fill(constant_shape, tf.constant(constraint, dtype=inputs.dtype)) constraints = chain.from_iterable(self.constraints) (a, b, tx, c, d, ty) = (get_variable(constr) for constr in constraints) det = ((a * d) - (b * c)) a_inv = (d / det) b_inv = ((- b) / det) c_inv = ((- c) / det) d_inv = (a / det) m_inv = basic.BatchReshape([2, 2])(tf.concat([a_inv, b_inv, c_inv, d_inv], 1)) txy = tf.expand_dims(tf.concat([tx, ty], 1), 2) txy_inv = basic.BatchFlatten()(tf.matmul(m_inv, txy)) tx_inv = txy_inv[:, 0:1] ty_inv = txy_inv[:, 1:2] inverse_gw_inputs = tf.concat([a_inv, b_inv, (- tx_inv), c_inv, d_inv, (- ty_inv)], 1) agw = AffineGridWarper(self.output_shape, self.source_shape) return agw(inverse_gw_inputs) if (name is None): name = (self.module_name + '_inverse') return base.Module(_affine_grid_warper_inverse, name=name)
'Creates a constraint definition for an affine transformation. Args: constraints: A doubly-nested iterable of shape `[N, N+1]` defining constraints on the entries of a matrix that represents an affine transformation in `N` dimensions. A numeric value bakes in a constraint on the corresponding entry in the tranformation matrix, whereas `None` implies that the corresponding entry will be specified at run time. Raises: TypeError: If `constraints` is not a nested iterable. ValueError: If the double iterable `constraints` has inconsistent dimensions.'
def __init__(self, constraints=((((None,) * 3),) * 2)):
try: self._constraints = tuple((tuple(x) for x in constraints)) except TypeError: raise TypeError('constraints must be a nested iterable.') self._num_dim = len(self._constraints) expected_num_cols = (self._num_dim + 1) if any(((len(x) != expected_num_cols) for x in self._constraints)): raise ValueError('The input list must define a Nx(N+1) matrix of contraints.')
'Computes a boolean mask from the user defined constraints.'
def _calc_mask(self):
mask = [] for row in self._constraints: mask.append(tuple(((x is None) for x in row))) return tuple(mask)
'Computes number of non constrained parameters.'
def _calc_num_free_params(self):
return sum((row.count(None) for row in self._constraints))
'Returns the list of constraints for the i-th row of the affine matrix.'
def __getitem__(self, i):
return self._constraints[i]
'Combines two constraints, raising an error if they are not compatible.'
def _combine(self, x, y):
if ((x is None) or (y is None)): return (x or y) if (x != y): raise ValueError('Incompatible set of constraints provided.') return x
'Combines two sets of constraints into a coherent single set.'
def __and__(self, rhs):
return self.combine_with(rhs)
'Combines two sets of constraints into a coherent single set.'
def combine_with(self, additional_constraints):
x = additional_constraints if (not isinstance(additional_constraints, AffineWarpConstraints)): x = AffineWarpConstraints(additional_constraints) new_constraints = [] for (left, right) in zip(self._constraints, x.constraints): new_constraints.append([self._combine(x, y) for (x, y) in zip(left, right)]) return AffineWarpConstraints(new_constraints)
'Empty set of constraints for a num_dim-ensional affine transform.'
@classmethod def no_constraints(cls, num_dim=2):
return cls(((((None,) * (num_dim + 1)),) * num_dim))
'Assign contraints on translation components of affine transform in 2d.'
@classmethod def translation_2d(cls, x=None, y=None):
return cls([[None, None, x], [None, None, y]])
'Assign contraints on translation components of affine transform in 3d.'
@classmethod def translation_3d(cls, x=None, y=None, z=None):
return cls([[None, None, None, x], [None, None, None, y], [None, None, None, z]])
'Assigns contraints on scaling components of affine transform in 2d.'
@classmethod def scale_2d(cls, x=None, y=None):
return cls([[x, None, None], [None, y, None]])
'Assigns contraints on scaling components of affine transform in 3d.'
@classmethod def scale_3d(cls, x=None, y=None, z=None):
return cls([[x, None, None, None], [None, y, None, None], [None, None, z, None]])
'Assigns contraints on shear components of affine transform in 2d.'
@classmethod def shear_2d(cls, x=None, y=None):
return cls([[None, x, None], [y, None, None]])
'Assigns contraints on shear components of affine transform in 3d.'
@classmethod def no_shear_3d(cls):
return cls([[None, 0, 0, None], [0, None, 0, None], [0, 0, None, None]])
'Defines the name scope of the initial_state ops.'
def _initial_state_scope(self, name):
return (name if name else ('%s_initial_state' % self.scope_name))
'Builds the default start state for an RNNCore. Args: batch_size: An int, or scalar int32 Tensor representing the batch size. dtype: The data type to use for the state. trainable: Boolean that indicates whether to learn the initial state. Note that intializers and regularizers will be ignored if `trainable=False`. trainable_initializers: An initializer function or nested structure of functions with same structure as the `state_size` property of the core, to be used as initializers of the initial state variable. trainable_regularizers: Optional regularizer function or nested structure of functions with the same structure as the `state_size` property of the core, to be used as regularizers of the initial state variable. As a default, no regularizers are used. A regularizer should be a function that takes a single `Tensor` as an input and returns a scalar `Tensor` output, e.g. the L1 and L2 regularizers in `tf.contrib.layers`. name: Optional string used to prefix the initial state variable names, in the case of a trainable initial state. If not provided, defaults to the name of the module. Returns: A tensor or nested tuple of tensors with same structure and shape as the `state_size` property of the core. Raises: ValueError: if the user passes initializers that are not functions. ValueError: if the user passes regularizers that are not functions.'
def initial_state(self, batch_size, dtype=tf.float32, trainable=False, trainable_initializers=None, trainable_regularizers=None, name=None, **unused_kwargs):
with tf.name_scope(self._initial_state_scope(name)): if (not trainable): return self.zero_state(batch_size, dtype) else: return trainable_initial_state(batch_size, self.state_size, dtype, initializers=trainable_initializers, regularizers=trainable_regularizers, name=self._initial_state_scope(name))
'size(s) of state(s) used by this cell. It can be represented by an Integer, a TensorShape or a tuple of Integers or TensorShapes.'
@property def state_size(self):
raise NotImplementedError('Abstract method')
'Integer or TensorShape: size of outputs produced by this cell.'
@property def output_size(self):
raise NotImplementedError('Abstract method')
'Return zero-filled state tensor(s). Args: batch_size: int, float, or unit Tensor representing the batch size. dtype: the data type to use for the state. Returns: If `state_size` is an int or TensorShape, then the return value is a `N-D` tensor of shape `[batch_size x state_size]` filled with zeros. If `state_size` is a nested list or tuple, then the return value is a nested list or tuple (of the same structure) of `2-D` tensors with the shapes `[batch_size x s]` for each s in `state_size`.'
def zero_state(self, batch_size, dtype):
with tf.name_scope((type(self).__name__ + 'ZeroState'), values=[batch_size]): return rnn_cell_impl._zero_state_tensors(self.state_size, batch_size, dtype)
'Constructs the Module that introduces a trainable state in the graph. It receives an initial state that will be used as the intial values for the trainable variables that the module contains, and optionally a mask that indicates the parts of the initial state that should be learnable. Args: initial_state: tensor or arbitrarily nested iterables of tensors. mask: optional boolean mask. It should have the same nested structure as the given initial_state. name: module name. Raises: TypeError: if mask is not a list of booleans or None.'
def __init__(self, initial_state, mask=None, name='trainable_initial_state'):
super(TrainableInitialState, self).__init__(name=name) warnings.simplefilter('always', DeprecationWarning) warnings.warn('Use the trainable flag in initial_state instead.', DeprecationWarning, stacklevel=2) if (mask is not None): flat_mask = nest.flatten(mask) if (not all([isinstance(m, bool) for m in flat_mask])): raise TypeError('Mask should be None or a list of boolean values.') nest.assert_same_structure(initial_state, mask) self._mask = mask self._initial_state = initial_state
'Connects the module to the graph. Returns: The learnable state, which has the same type, structure and shape as the `initial_state` passed to the constructor.'
def _build(self):
flat_initial_state = nest.flatten(self._initial_state) if (self._mask is not None): flat_mask = nest.flatten(self._mask) flat_learnable_state = [_single_learnable_state(state, state_id=i, learnable=mask) for (i, (state, mask)) in enumerate(zip(flat_initial_state, flat_mask))] else: flat_learnable_state = [_single_learnable_state(state, state_id=i) for (i, state) in enumerate(flat_initial_state)] return nest.pack_sequence_as(structure=self._initial_state, flat_sequence=flat_learnable_state)
'Tests if calling __init__ without named args raises a ValueError.'
def testInitNoNamedArgs(self):
with self.assertRaises(ValueError): NoInitIdentityModule('foobar')
'Tests if calling __init__ without a string name raises a TypeError.'
def testInitInvalidTypeArgs(self):
with self.assertRaises(TypeError): NoInitIdentityModule(name=123)
'Tests if calling __init__ with no args uses correct defaults.'
def testInitNoArgs(self):
module = NoInitIdentityModule() self.assertEqual(module.module_name, 'no_init_identity_module')
'Tests if a __call__ with no __init__ raises an error.'
def testInitNoSuper(self):
module = NoSuperInitIdentityModule() with self.assertRaises(base.NotInitializedError): module(tf.constant([1]))
'Initialize AttentiveRead module. Args: attention_logit_mod: Module that produces logit corresponding to a memory slot\'s compatibility. Must map a [batch_size * memory_size, memory_word_size + query_word_size]-shaped Tensor to a [batch_size * memory_size, 1] shape Tensor. name: string. Name for module.'
def __init__(self, attention_logit_mod, name='attention'):
super(AttentiveRead, self).__init__(name=name) self._attention_logit_mod = attention_logit_mod
'Perform a differentiable read. Args: memory: [batch_size, memory_size, memory_word_size]-shaped Tensor of dtype float32. This represents, for each example and memory slot, a single embedding to attend over. query: [batch_size, query_word_size]-shaped Tensor of dtype float32. Represents, for each example, a single embedding representing a query. memory_mask: None or [batch_size, memory_size]-shaped Tensor of dtype bool. An entry of False indicates that a memory slot should not enter the resulting weighted sum. If None, all memory is used. Returns: An AttentionOutput instance containing: read: [batch_size, memory_word_size]-shaped Tensor of dtype float32. This represents, for each example, a weighted sum of the contents of the memory. weights: [batch_size, memory_size]-shaped Tensor of dtype float32. This represents, for each example and memory slot, the attention weights used to compute the read. weight_logits: [batch_size, memory_size]-shaped Tensor of dtype float32. This represents, for each example and memory slot, the logits of the attention weights, that is, `weights` is calculated by taking the softmax of the weight logits. Raises: UnderspecifiedError: if memory_word_size or query_word_size can not be inferred. IncompatibleShapeError: if memory, query, memory_mask, or output of attention_logit_mod do not match expected shapes.'
def _build(self, memory, query, memory_mask=None):
if (len(memory.get_shape()) != 3): raise base.IncompatibleShapeError('memory must have shape [batch_size, memory_size, memory_word_size].') if (len(query.get_shape()) != 2): raise base.IncompatibleShapeError('query must have shape [batch_size, query_word_size].') if ((memory_mask is not None) and (len(memory_mask.get_shape()) != 2)): raise base.IncompatibleShapeError('memory_mask must have shape [batch_size, memory_size].') inferred_memory_word_size = memory.get_shape()[2].value inferred_query_word_size = query.get_shape()[1].value if ((inferred_memory_word_size is None) or (inferred_query_word_size is None)): raise base.UnderspecifiedError('memory_word_size and query_word_size must be known at graph construction time.') memory_shape = tf.shape(memory) batch_size = memory_shape[0] memory_size = memory_shape[1] query_shape = tf.shape(query) query_batch_size = query_shape[0] expanded_query = tf.tile(tf.expand_dims(query, dim=1), [1, memory_size, 1]) with tf.control_dependencies([tf.assert_equal(batch_size, query_batch_size)]): concatenated_embeddings = tf.concat(values=[memory, expanded_query], axis=2) batch_apply_attention_logit = basic.BatchApply(self._attention_logit_mod, n_dims=2, name='batch_apply_attention_logit') attention_weight_logits = batch_apply_attention_logit(concatenated_embeddings) if (len(attention_weight_logits.get_shape()) != 3): raise base.IncompatibleShapeError('attention_weight_logits must be a rank-3 Tensor. Are you sure that attention_logit_mod() returned [batch_size * memory_size, 1]-shaped Tensor?') attention_weight_logits = tf.squeeze(attention_weight_logits, [2]) if (memory_mask is not None): num_remaining_memory_slots = tf.reduce_sum(tf.cast(memory_mask, dtype=tf.int32), axis=[1]) with tf.control_dependencies([tf.assert_positive(num_remaining_memory_slots)]): finfo = np.finfo(np.float32) kept_indices = tf.cast(memory_mask, dtype=tf.float32) ignored_indices = tf.cast(tf.logical_not(memory_mask), dtype=tf.float32) lower_bound = ((finfo.max * kept_indices) + (finfo.min * ignored_indices)) attention_weight_logits = tf.minimum(attention_weight_logits, lower_bound) attention_weight = tf.reshape(tf.nn.softmax(attention_weight_logits), shape=[batch_size, memory_size, 1]) attended_memory = tf.reduce_sum((memory * attention_weight), axis=[1]) (inferred_batch_size, _, inferred_memory_word_size) = memory.get_shape().as_list() attended_memory.set_shape([inferred_batch_size, inferred_memory_word_size]) return AttentionOutput(read=attended_memory, weights=tf.squeeze(attention_weight, [2]), weight_logits=attention_weight_logits)
'Construct a Basic RNN core. Args: hidden_size: hidden size dimensionality. activation: activation function to use. initializers: optional dict containing ops to initialize the weights. This dictionary may contain the keys \'in_to_hidden\' and/or \'hidden_to_hidden\'. partitioners: optional dict containing ops to partition the weights. This dictionary may contain the keys \'in_to_hidden\' and/or \'hidden_to_hidden\'. regularizers: optional dict containing ops to regularize the weights. This dictionary may contain the keys \'in_to_hidden\' and/or \'hidden_to_hidden\'. name: name of the module. Raises: KeyError: if `initializers` contains any keys other than \'in_to_hidden\' or \'hidden_to_hidden\'. KeyError: if `partitioners` contains any keys other than \'in_to_hidden\' or \'hidden_to_hidden\'. KeyError: if `regularizers` contains any keys other than \'in_to_hidden\' or \'hidden_to_hidden\'. TypeError: If any of the given initializers are not callable. TypeError: If any of the given partitioners are not callable. TypeError: If any of the given regularizers are not callable.'
def __init__(self, hidden_size, activation=tf.tanh, initializers=None, partitioners=None, regularizers=None, name='vanilla_rnn'):
super(VanillaRNN, self).__init__(name=name) self._hidden_size = hidden_size self._activation = activation self._initializers = util.check_initializers(initializers, self.POSSIBLE_INITIALIZER_KEYS) self._partitioners = util.check_partitioners(partitioners, self.POSSIBLE_INITIALIZER_KEYS) self._regularizers = util.check_regularizers(regularizers, self.POSSIBLE_INITIALIZER_KEYS)
'Connects the VanillaRNN module into the graph. If this is not the first time the module has been connected to the graph, the Tensors provided as input_ and state must have the same final dimension, in order for the existing variables to be the correct size for their corresponding multiplications. The batch size may differ for each connection. Args: input_: a 2D Tensor of size [batch_size, input_size]. prev_state: a 2D Tensor of size [batch_size, hidden_size]. Returns: output: a 2D Tensor of size [batch_size, hidden_size]. next_state: a Tensor of size [batch_size, hidden_size]. Raises: ValueError: if connecting the module into the graph any time after the first time, and the inferred size of the inputs does not match previous invocations.'
def _build(self, input_, prev_state):
self._in_to_hidden_linear = basic.Linear(self._hidden_size, name='in_to_hidden', initializers=self._initializers.get('in_to_hidden'), partitioners=self._partitioners.get('in_to_hidden'), regularizers=self._regularizers.get('in_to_hidden')) self._hidden_to_hidden_linear = basic.Linear(self._hidden_size, name='hidden_to_hidden', initializers=self._initializers.get('hidden_to_hidden'), partitioners=self._partitioners.get('hidden_to_hidden'), regularizers=self._regularizers.get('hidden_to_hidden')) in_to_hidden = self._in_to_hidden_linear(input_) hidden_to_hidden = self._hidden_to_hidden_linear(prev_state) output = self._activation((in_to_hidden + hidden_to_hidden)) return (output, output)
'Construct a Deep RNN core. Args: cores: iterable of modules or ops. skip_connections: a boolean that indicates whether to use skip connections. This means that the input is fed to all the layers, after being concatenated with the output of the previous layer. The output of the module will be the concatenation of all the outputs of the internal modules. concat_final_output_if_skip: A boolean that indicates whether the outputs of intermediate layers should be concatenated into the timestep-wise output of the core. By default this is True. If this is set to False, then the core output is that of the final layer, i.e. that of `cores[-1]`. name: name of the module. Raises: ValueError: if `cores` is not an iterable, or if `skip_connections` is True and not all the modules are recurrent.'
def __init__(self, cores, skip_connections=True, concat_final_output_if_skip=True, name='deep_rnn'):
super(DeepRNN, self).__init__(name=name) if (not isinstance(cores, collections.Iterable)): raise ValueError('Cores should be an iterable object.') self._cores = tuple(cores) self._skip_connections = skip_connections self._concat_final_output_if_skip = concat_final_output_if_skip self._is_recurrent_list = [isinstance(core, rnn_core.RNNCore) for core in self._cores] if self._skip_connections: tf.logging.warning('The `skip_connections` argument will be deprecated. Please use snt.SkipConnectionCore instead.') if (not all(self._is_recurrent_list)): raise ValueError('skip_connections are enabled but not all cores are `snt.RNNCore`s, which is not supported. The following cores were specified: {}.'.format(self._cores)) self._check_cores_output_sizes() self._num_recurrent = sum(self._is_recurrent_list)
'Checks the output_sizes of the cores of the DeepRNN module. Raises: ValueError: if the outputs of the cores cannot be concatenated along their first dimension.'
def _check_cores_output_sizes(self):
for core_sizes in zip(*tuple(_get_flat_core_sizes(self._cores))): first_core_list = core_sizes[0][1:] for (i, core_list) in enumerate(core_sizes[1:]): if (core_list[1:] != first_core_list): raise ValueError(('The outputs of the provided cores are not able to be concatenated along the first feature dimension. Core 0 has size %s, whereas Core %d has size %s' % (first_core_list, i, core_list)))
'Connects the DeepRNN module into the graph. If this is not the first time the module has been connected to the graph, the Tensors provided as input_ and state must have the same final dimension, in order for the existing variables to be the correct size for their corresponding multiplications. The batch size may differ for each connection. Args: inputs: a nested tuple of Tensors of arbitrary dimensionality, with at least an initial batch dimension. prev_state: a tuple of `prev_state`s that corresponds to the state of each one of the cores of the `DeepCore`. Returns: output: a nested tuple of Tensors of arbitrary dimensionality, with at least an initial batch dimension. next_state: a tuple of `next_state`s that corresponds to the updated state of each one of the cores of the `DeepCore`. Raises: ValueError: if connecting the module into the graph any time after the first time, and the inferred size of the inputs does not match previous invocations. This may happen if one connects a module any time after the first time that does not have the configuration of skip connections as the first time.'
def _build(self, inputs, prev_state):
current_input = inputs next_states = [] outputs = [] recurrent_idx = 0 for (i, core) in enumerate(self._cores): if (self._skip_connections and (i > 0)): flat_input = (nest.flatten(inputs), nest.flatten(current_input)) flat_input = [tf.concat(input_, 1) for input_ in zip(*flat_input)] current_input = nest.pack_sequence_as(structure=inputs, flat_sequence=flat_input) if self._is_recurrent_list[i]: (current_input, next_state) = core(current_input, prev_state[recurrent_idx]) next_states.append(next_state) recurrent_idx += 1 else: current_input = core(current_input) if self._skip_connections: outputs.append(current_input) if (self._skip_connections and self._concat_final_output_if_skip): flat_outputs = tuple((nest.flatten(output) for output in outputs)) flat_outputs = [tf.concat(output, 1) for output in zip(*flat_outputs)] output = nest.pack_sequence_as(structure=outputs[0], flat_sequence=flat_outputs) else: output = current_input return (output, tuple(next_states))
'Builds the default start state for a DeepRNN. Args: batch_size: An int, float or scalar Tensor representing the batch size. dtype: The data type to use for the state. trainable: Boolean that indicates whether to learn the initial state. trainable_initializers: An initializer function or nested structure of functions with same structure as the `state_size` property of the core, to be used as initializers of the initial state variable. trainable_regularizers: Optional regularizer function or nested structure of functions with the same structure as the `state_size` property of the core, to be used as regularizers of the initial state variable. A regularizer should be a function that takes a single `Tensor` as an input and returns a scalar `Tensor` output, e.g. the L1 and L2 regularizers in `tf.contrib.layers`. name: Optional string used to prefix the initial state variable names, in the case of a trainable initial state. If not provided, defaults to the name of the module. Returns: A tensor or nested tuple of tensors with same structure and shape as the `state_size` property of the core. Raises: ValueError: if the number of passed initializers is not the same as the number of recurrent cores.'
def initial_state(self, batch_size, dtype=tf.float32, trainable=False, trainable_initializers=None, trainable_regularizers=None, name=None):
initial_state = [] if (trainable_initializers is None): trainable_initializers = ([None] * self._num_recurrent) if (trainable_regularizers is None): trainable_regularizers = ([None] * self._num_recurrent) num_initializers = len(trainable_initializers) if (num_initializers != self._num_recurrent): raise ValueError(('The number of initializers and recurrent cores should be the same. Received %d initializers for %d specified recurrent cores.' % (num_initializers, self._num_recurrent))) with tf.name_scope(self._initial_state_scope(name)): recurrent_idx = 0 for (is_recurrent, core) in zip(self._is_recurrent_list, self._cores): if is_recurrent: core_initial_state = core.initial_state(batch_size, dtype=dtype, trainable=trainable, trainable_initializers=trainable_initializers[recurrent_idx], trainable_regularizers=trainable_regularizers[recurrent_idx]) initial_state.append(core_initial_state) recurrent_idx += 1 return tuple(initial_state)
'Construct a Basic RNN core. Args: model: callable that computes the next state. name: name of the module. Raises: TypeError: if model is not a callable object or if it is an RNNCore. AttributeError: if model does not have an output_size attribute.'
def __init__(self, model, name='model_rnn'):
super(ModelRNN, self).__init__(name=name) if (not callable(model)): raise TypeError('Model must be callable.') if isinstance(model, rnn_core.RNNCore): raise TypeError('Model should not be an RNNCore.') try: self._output_size = model.output_size except AttributeError: raise AttributeError('Model should have an output_size attribute.') self._model = model
'Connects the ModelRNN module into the graph. If this is not the first time the module has been connected to the graph, the Tensors provided as input_ and state must have the same final dimension, in order for the existing variables to be the correct size for their corresponding multiplications. The batch size may differ for each connection. Args: inputs: Tensor input to the ModelRNN (ignored). prev_state: Tensor of size `model.output_size`. Returns: output: Tensor of size `model.output_size`. next_state: Tensor of size `model.output_size`.'
def _build(self, inputs, prev_state):
next_state = self._model(prev_state) return (next_state, next_state)
'Tests ACT using an LSTM for the core.'
@parameterized.Parameters((13, 11, 7, 3, 5), (3, 3, 3, 1, 5), (1, 1, 1, 1, 1)) def testACTLSTM(self, input_size, hidden_size, output_size, seq_len, batch_size):
lstm = gated_rnn.LSTM(hidden_size) def get_hidden_state(state): (hidden, unused_cell) = state return hidden self._testACT(input_size, hidden_size, output_size, seq_len, batch_size, lstm, get_hidden_state)
'Tests ACT using an LSTM for the core.'
@parameterized.Parameters((13, 11, 7, 3, 5), (3, 3, 3, 1, 5), (1, 1, 1, 1, 1)) def testACTVanilla(self, input_size, hidden_size, output_size, seq_len, batch_size):
vanilla = basic_rnn.VanillaRNN(hidden_size) def get_state(state): return state self._testACT(input_size, hidden_size, output_size, seq_len, batch_size, vanilla, get_state)
'Constructs a new `BlockTriangularMatrix` module. Args: block_shape: tuple, 2-dimensional tuple indicating the shape of each individual block. block_rows: int, the number of blocks in each row (and column) of the output matrix. include_diagonal: boolean, indicates whether or not blocks on the diagonal entries should be included. include_off_diagonal: boolean, indicates whether or not only the off-diagonal entries should be included. If set to False, the value of `upper` is ignored. upper: boolean, if True then the output matrix is block upper triangular; if False, it is block lower triangular. name: string, name of the module. Raises: ValueError: if `include_diagonal` and `include_off_diagonal` are both False.'
def __init__(self, block_shape, block_rows, include_diagonal=True, include_off_diagonal=True, upper=False, name='block_triangular_matrix'):
super(BlockTriangularMatrix, self).__init__(name=name) if ((not include_diagonal) and (not include_off_diagonal)): raise ValueError('Arguments include_diagonal and include_off_diagonal cannot both be False.') self._block_shape = tuple(block_shape) self._block_rows = block_rows self._include_diagonal = include_diagonal self._include_off_diagonal = include_off_diagonal self._upper = upper self._num_blocks = sum((self._content_blocks(r) for r in xrange(self._block_rows)))
'The total number of blocks in the output matrix.'
@property def num_blocks(self):
return self._num_blocks
'The number of entries of each block.'
@property def block_size(self):
return (self._block_shape[0] * self._block_shape[1])
'The shape of each block.'
@property def block_shape(self):
return self._block_shape
'The shape of the output matrix.'
@property def output_shape(self):
return ((self._block_shape[0] * self._block_rows), (self._block_shape[1] * self._block_rows))