desc
stringlengths
3
26.7k
decl
stringlengths
11
7.89k
bodies
stringlengths
8
553k
'Check that the correct number of variables are made when sharing.'
def testSharing(self):
inputs1 = tf.placeholder(tf.float32, shape=[None, 64]) inputs2 = tf.placeholder(tf.float32, shape=[None, 64]) ln = snt.LayerNorm() ln(inputs1) ln(inputs2) self.assertEqual(len(tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES)), 2)
'Constructs a Sequential module. This feeds the output of each layer into the next and returns the output of the final layer. If a layer returns a tuple, it is assumed that this must be unpacked into the argument list of the next layer. If it is not a tuple, it is simply passed through to the next layer unchanged. Args: layers: Iterable of callables to stack together, which can be modules or ops. name: Name of the module. Raises: TypeError: If `layers` is None or contains any non-callable items.'
def __init__(self, layers, name='sequential'):
super(Sequential, self).__init__(name=name) self._layers = tuple(layers) is_not_callable = [(i, mod) for (i, mod) in enumerate(self._layers) if (not callable(mod))] if is_not_callable: raise TypeError('Items {} not callable with types: {}'.format(', '.join((str(i) for (i, _) in is_not_callable)), ', '.join((type(layer).__name__ for (_, layer) in is_not_callable))))
'Connects the Sequential module into the graph. Args: *args: A tuple of inputs, to be unpacked as the arguments to the first layer. Returns: The output value of the last layer.'
def _build(self, *args):
net = args for layer in self._layers: if isinstance(net, tuple): net = layer(*net) else: net = layer(net) return net
'Tests the _fill_list private function in snt.conv.'
def test(self):
x = random.randint(1, 10) self.assertEqual(conv._fill_shape(x, 1), (x,)) self.assertEqual(conv._fill_shape(x, 2), (x, x)) self.assertEqual(conv._fill_shape(x, 3), (x, x, x)) self.assertEqual(conv._fill_shape(x, 4), (x, x, x, x)) self.assertEqual(conv._fill_shape([x, (x + 1), (x + 2)], 3), (x, (x + 1), (x + 2))) err = 'n must be a positive integer' with self.assertRaisesRegexp(TypeError, err): conv._fill_shape(x, 0) err = 'must be either a positive integer or an iterable of positive integers of size 4' with self.assertRaisesRegexp(TypeError, err): conv._fill_shape([], 4) with self.assertRaisesRegexp(TypeError, err): conv._fill_shape([x], 4) with self.assertRaisesRegexp(TypeError, err): conv._fill_shape([x, x], 4) with self.assertRaisesRegexp(TypeError, err): conv._fill_shape(['b'], 4)
'Test output shapes are correct.'
@parameterized.Parameters(*zip(input_shape, stride, kernel_shape, padding, output_shape)) def testFunction(self, input_shape, stride, kernel_shape, padding, output_shape):
self.assertEqual(conv._default_transpose_size(input_shape, stride, kernel_shape=kernel_shape, padding=padding), tuple(output_shape))
'Test ConvTranspose modules return expected default output shapes.'
@parameterized.Parameters(*zip(input_shape, stride, kernel_shape, padding, output_shape)) def testModules(self, input_shape, stride, kernel_shape, padding, output_shape):
if (len(input_shape) == 1): module = snt.Conv1DTranspose elif (len(input_shape) == 2): module = snt.Conv2DTranspose elif (len(input_shape) == 3): module = snt.Conv3DTranspose batch_size = [1] channels = [1] inputs = tf.zeros(shape=((batch_size + input_shape) + channels), dtype=tf.float32) outputs = module(output_channels=1, kernel_shape=kernel_shape, stride=stride, padding=padding)(inputs) self.assertEqual(output_shape, outputs.get_shape().as_list()[1:(-1)])
'Test ConvTranspose modules with multiple connections.'
@parameterized.Parameters(*zip(input_shape, stride, kernel_shape, padding, output_shape)) def testConnectTwice(self, input_shape, stride, kernel_shape, padding, output_shape):
if (len(input_shape) == 1): module = snt.Conv1DTranspose elif (len(input_shape) == 2): module = snt.Conv2DTranspose elif (len(input_shape) == 3): module = snt.Conv3DTranspose batch_size = [1] channels = [1] inputs = tf.zeros(shape=((batch_size + input_shape) + channels), dtype=tf.float32) inputs_2 = tf.zeros(shape=((batch_size + input_shape) + channels), dtype=tf.float32) conv1 = module(output_channels=1, kernel_shape=kernel_shape, stride=stride, padding=padding) outputs = conv1(inputs) outputs_2 = conv1(inputs_2) new_input_shape = ([25] * len(input_shape)) new_inputs = tf.zeros(shape=((batch_size + new_input_shape) + channels), dtype=tf.float32) new_outputs = conv1(new_inputs) with self.test_session() as sess: tf.global_variables_initializer().run() (outputs_array, outputs_array_2) = sess.run([outputs, outputs_2]) self.assertEqual(outputs_array.shape, outputs_array_2.shape) sess.run(new_outputs)
'The correct number of variables are created.'
@parameterized.Parameters(*itertools.product(modules, (True, False))) def testVariables(self, module_info, use_bias):
(module, num_input_dims, module_kwargs) = module_info mod_name = 'module' input_shape = ((10,) * (num_input_dims + 2)) inputs = tf.placeholder(tf.float32, input_shape) with tf.variable_scope('scope'): conv_mod = module(name=mod_name, use_bias=use_bias, **module_kwargs) self.assertEqual(conv_mod.scope_name, ('scope/' + mod_name)) self.assertEqual(conv_mod.module_name, mod_name) with self.assertRaisesRegexp(snt.NotConnectedError, 'not instantiated yet'): conv_mod.get_variables() output = conv_mod(inputs) supposed_variables = conv_mod.get_possible_initializer_keys(use_bias=use_bias) self.assertIn(len(supposed_variables), [1, 2, 3]) graph_variables = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES) self.assertEqual(len(graph_variables), len(supposed_variables)) conv_variables = conv_mod.get_variables() self.assertEqual(len(conv_variables), len(supposed_variables)) variable_names = {v.name for v in conv_variables} for var_name in supposed_variables: self.assertIn('scope/{}/{}:0'.format(mod_name, var_name), variable_names) with self.test_session() as sess: tf.global_variables_initializer().run() inputs_data = np.random.rand(*input_shape) sess.run(output, feed_dict={inputs: inputs_data})
'Error is thrown if the input is missing a channel dimension.'
@parameterized.Parameters(*itertools.product(modules, (True, False))) def testMissingChannelsError(self, module_info, use_bias):
(module, num_input_dims, module_kwargs) = module_info conv_mod = module(use_bias=use_bias, **module_kwargs) inputs = tf.placeholder(tf.float32, ((10,) * (num_input_dims + 1))) err = 'Input Tensor must have shape' with self.assertRaisesRegexp(snt.IncompatibleShapeError, err): conv_mod(inputs)
'Error is thrown if the input has been incorrectly flattened.'
@parameterized.Parameters(*itertools.product(modules, (True, False))) def testFlattenedError(self, module_info, use_bias):
(module, num_input_dims, module_kwargs) = module_info conv_mod = module(use_bias=use_bias, **module_kwargs) inputs = tf.placeholder(tf.float32, ((10,) * (num_input_dims + 1))) inputs = snt.BatchFlatten()(inputs) err = 'Input Tensor must have shape' with self.assertRaisesRegexp(snt.IncompatibleShapeError, err): conv_mod(inputs)
'Check that custom_getter option works.'
@parameterized.Parameters(*modules) def testCustomGetter(self, module, num_input_dims, module_kwargs):
def stop_gradient(getter, *args, **kwargs): return tf.stop_gradient(getter(*args, **kwargs)) inputs = tf.placeholder(tf.float32, ((10,) * (num_input_dims + 2))) conv_mod1 = module(**module_kwargs) out1 = conv_mod1(inputs) conv_mod2 = module(custom_getter=stop_gradient, **module_kwargs) out2 = conv_mod2(inputs) num_variables = len(conv_mod1.get_variables()) grads1 = tf.gradients(out1, list(conv_mod1.get_variables())) grads2 = tf.gradients(out2, list(conv_mod2.get_variables())) self.assertEqual(([tf.Tensor] * num_variables), [type(g) for g in grads1]) self.assertEqual(([None] * num_variables), grads2) if hasattr(conv_mod2, 'transpose'): conv_mod2_transpose = conv_mod2.transpose() inputs_transpose = tf.placeholder(tf.float32, out2.get_shape()) out3 = conv_mod2_transpose(inputs_transpose) grads3 = tf.gradients(out3, list(conv_mod2_transpose.get_variables())) self.assertEqual(([None] * num_variables), grads3)
'The generated shapes are correct with SAME padding.'
@parameterized.NamedParameters(('WithBias', True), ('WithoutBias', False)) def testShapesSame(self, use_bias):
batch_size = random.randint(1, 100) in_height = random.randint(10, 288) in_width = random.randint(10, 288) in_channels = random.randint(1, 10) out_channels = random.randint(1, 32) kernel_shape_h = random.randint(1, 11) kernel_shape_w = random.randint(1, 11) inputs = tf.placeholder(tf.float32, shape=[batch_size, in_height, in_width, in_channels]) conv1 = snt.Conv2D(name='conv1', output_channels=out_channels, kernel_shape=[kernel_shape_h, kernel_shape_w], padding=snt.SAME, stride=1, use_bias=use_bias) output = conv1(inputs) self.assertTrue(output.get_shape().is_compatible_with([batch_size, in_height, in_width, out_channels])) self.assertTrue(conv1.w.get_shape().is_compatible_with([kernel_shape_h, kernel_shape_w, in_channels, out_channels])) if use_bias: self.assertTrue(conv1.b.get_shape().is_compatible_with([out_channels]))
'The generated shapes are correct when input shape not known.'
@parameterized.NamedParameters(('WithBias', True), ('WithoutBias', False)) def testShapesNotKnown(self, use_bias):
batch_size = 5 in_height = in_width = 32 in_channels = out_channels = 5 kernel_shape_h = kernel_shape_w = 3 inputs = tf.placeholder(tf.float32, shape=[None, None, None, in_channels], name='inputs') conv1 = snt.Conv2D(name='conv1', output_channels=out_channels, kernel_shape=[kernel_shape_h, kernel_shape_w], padding=snt.SAME, stride=1, use_bias=use_bias) output = conv1(inputs) with self.test_session(): tf.variables_initializer(([conv1.w, conv1.b] if use_bias else [conv1.w])).run() output_eval = output.eval({inputs: np.zeros([batch_size, in_height, in_width, in_channels])}) self.assertEqual(output_eval.shape, (batch_size, in_height, in_width, out_channels))
'No error is thrown if image shape isn\'t known for atrous convolution.'
@parameterized.NamedParameters(('WithBias', True), ('WithoutBias', False)) def testShapesNotKnownAtrous(self, use_bias):
inputs = tf.placeholder(tf.float32, shape=[None, None, None, 5], name='inputs') conv1 = snt.Conv2D(name='conv1', output_channels=5, kernel_shape=[3, 3], padding=snt.SAME, stride=1, rate=2, use_bias=use_bias) conv1(inputs)
'Errors are thrown for invalid kernel shapes.'
@parameterized.NamedParameters(('WithBias', True), ('WithoutBias', False)) def testKernelShape(self, use_bias):
snt.Conv2D(output_channels=10, kernel_shape=[3, 4], name='conv1', use_bias=use_bias) snt.Conv2D(output_channels=10, kernel_shape=3, name='conv1', use_bias=use_bias) err = 'Invalid kernel shape' with self.assertRaisesRegexp(snt.IncompatibleShapeError, err): snt.Conv2D(output_channels=10, kernel_shape=[3, 3, 3], name='conv1')
'Errors are thrown for invalid strides.'
@parameterized.NamedParameters(('WithBias', True), ('WithoutBias', False)) def testStrideError(self, use_bias):
snt.Conv2D(output_channels=10, kernel_shape=3, stride=1, name='conv1', use_bias=use_bias) snt.Conv2D(output_channels=10, kernel_shape=3, stride=[1, 1], name='conv1', use_bias=use_bias) snt.Conv2D(output_channels=10, kernel_shape=3, stride=[1, 1, 1, 1], name='conv1', use_bias=use_bias) with self.assertRaisesRegexp(snt.IncompatibleShapeError, 'Invalid stride'): snt.Conv2D(output_channels=10, kernel_shape=3, stride=[1, 1, 1], name='conv1')
'Errors are thrown for invalid dilation rates.'
@parameterized.NamedParameters(('WithBias', True), ('WithoutBias', False)) def testRateError(self, use_bias):
snt.Conv2D(output_channels=10, kernel_shape=3, rate=1, name='conv1', use_bias=use_bias) snt.Conv2D(output_channels=10, kernel_shape=3, rate=2, name='conv1', use_bias=use_bias) for rate in [0, 0.5, (-1)]: with self.assertRaisesRegexp(snt.IncompatibleShapeError, 'Invalid rate shape*'): snt.Conv2D(output_channels=10, kernel_shape=3, rate=rate, name='conv1')
'Errors are thrown for stride > 1 when using atrous convolution.'
@parameterized.NamedParameters(('WithBias', True), ('WithoutBias', False)) def testRateAndStrideError(self, use_bias):
err = 'Cannot have stride > 1 with rate > 1' with self.assertRaisesRegexp(snt.NotSupportedError, err): snt.Conv2D(output_channels=10, kernel_shape=3, stride=2, rate=2, name='conv1', use_bias=use_bias) with self.assertRaisesRegexp(snt.NotSupportedError, err): snt.Conv2D(output_channels=10, kernel_shape=3, stride=[2, 1], rate=2, name='conv1', use_bias=use_bias)
'Errors are thrown for invalid input types.'
@parameterized.NamedParameters(('WithBias', True), ('WithoutBias', False)) def testInputTypeError(self, use_bias):
conv1 = snt.Conv2D(output_channels=1, kernel_shape=3, stride=1, padding=snt.SAME, name='conv1', use_bias=use_bias, initializers=create_constant_initializers(1.0, 1.0, use_bias)) for dtype in (tf.float16, tf.float64): x = tf.constant(np.ones([1, 5, 5, 1]), dtype=dtype) err = 'Input must have dtype tf.float32.*' with self.assertRaisesRegexp(TypeError, err): conv1(x)
'Test initializers work as expected.'
@parameterized.NamedParameters(('WithBias', True), ('WithoutBias', False)) def testInitializers(self, use_bias):
w = random.random() b = random.random() conv1 = snt.Conv2D(output_channels=1, kernel_shape=3, stride=1, name='conv1', use_bias=use_bias, initializers=create_constant_initializers(w, b, use_bias)) conv1(tf.placeholder(tf.float32, [1, 10, 10, 2])) with self.test_session(): tf.variables_initializer(([conv1.w, conv1.b] if use_bias else [conv1.w])).run() self.assertAllClose(conv1.w.eval(), np.full([3, 3, 2, 1], w, dtype=np.float32)) if use_bias: self.assertAllClose(conv1.b.eval(), [b]) err = "Initializer for 'w' is not a callable function or dictionary" with self.assertRaisesRegexp(TypeError, err): snt.Conv2D(output_channels=10, kernel_shape=3, stride=1, name='conv1', initializers={'w': tf.ones([])})
'Test that initializers are not mutated.'
def testInitializerMutation(self):
initializers = {'b': tf.constant_initializer(0)} initializers_copy = dict(initializers) conv1 = snt.Conv2D(output_channels=1, kernel_shape=3, stride=1, name='conv1', initializers=initializers) conv1(tf.placeholder(tf.float32, [1, 10, 10, 2])) self.assertAllEqual(initializers, initializers_copy)
'Run through for something with a known answer using SAME padding.'
@parameterized.NamedParameters(('WithBias', True), ('WithoutBias', False)) def testComputationSame(self, use_bias):
conv1 = snt.Conv2D(output_channels=1, kernel_shape=3, stride=1, padding=snt.SAME, name='conv1', use_bias=use_bias, initializers=create_constant_initializers(1.0, 1.0, use_bias)) out = conv1(tf.constant(np.ones([1, 5, 5, 1], dtype=np.float32))) expected_out = np.array([[5, 7, 7, 7, 5], [7, 10, 10, 10, 7], [7, 10, 10, 10, 7], [7, 10, 10, 10, 7], [5, 7, 7, 7, 5]]) if (not use_bias): expected_out -= 1 with self.test_session(): tf.variables_initializer(([conv1.w, conv1.b] if use_bias else [conv1.w])).run() self.assertAllClose(np.reshape(out.eval(), [5, 5]), expected_out)
'Run through for something with a known answer using snt.VALID padding.'
@parameterized.NamedParameters(('WithBias', True), ('WithoutBias', False)) def testComputationValid(self, use_bias):
conv1 = snt.Conv2D(output_channels=1, kernel_shape=3, stride=1, padding=snt.VALID, name='conv1', use_bias=use_bias, initializers=create_constant_initializers(1.0, 1.0, use_bias)) out = conv1(tf.constant(np.ones([1, 5, 5, 1], dtype=np.float32))) expected_output = np.array([[10, 10, 10], [10, 10, 10], [10, 10, 10]]) if (not use_bias): expected_output -= 1 with self.test_session(): tf.variables_initializer(([conv1.w, conv1.b] if use_bias else [conv1.w])).run() self.assertAllClose(np.reshape(out.eval(), [3, 3]), expected_output)
'Sharing is working.'
@parameterized.NamedParameters(('WithBias', True), ('WithoutBias', False)) def testSharing(self, use_bias):
conv1 = snt.Conv2D(output_channels=1, kernel_shape=3, stride=1, padding=snt.SAME, use_bias=use_bias, name='conv1') x = np.random.randn(1, 5, 5, 1) x1 = tf.constant(x, dtype=np.float32) x2 = tf.constant(x, dtype=np.float32) out1 = conv1(x1) out2 = conv1(x2) with self.test_session(): tf.variables_initializer(([conv1.w, conv1.b] if use_bias else [conv1.w])).run() self.assertAllClose(out1.eval(), out2.eval()) w = np.random.randn(3, 3, 1, 1) conv1.w.assign(w).eval() self.assertAllClose(out1.eval(), out2.eval())
'The atrous conv is constructed and applied correctly with snt.VALID.'
@parameterized.NamedParameters(('WithBias', True), ('WithoutBias', False)) def testAtrousConvValid(self, use_bias):
conv1 = snt.Conv2D(output_channels=1, kernel_shape=3, stride=1, rate=2, padding=snt.VALID, name='conv1', use_bias=use_bias, initializers=create_constant_initializers(1.0, 0.0, use_bias)) out = conv1(tf.constant(np.ones([1, 5, 5, 1], dtype=np.float32))) with self.test_session(): tf.variables_initializer(([conv1.w, conv1.b] if use_bias else [conv1.w])).run() self.assertAllClose(np.reshape(out.eval(), [1, 1]), [[9]])
'The atrous conv 2D is constructed and applied correctly with SAME.'
@parameterized.NamedParameters(('WithBias', True), ('WithoutBias', False)) def testAtrousConvSame(self, use_bias):
conv1 = snt.Conv2D(output_channels=1, kernel_shape=3, stride=1, rate=2, padding=snt.SAME, name='conv1', use_bias=use_bias, initializers=create_constant_initializers(1.0, 1.0, use_bias)) out = conv1(tf.constant(np.ones([1, 5, 5, 1], dtype=np.float32))) expected_out = np.array([[5, 5, 7, 5, 5], [5, 5, 7, 5, 5], [7, 7, 10, 7, 7], [5, 5, 7, 5, 5], [5, 5, 7, 5, 5]]) if (not use_bias): expected_out -= 1 with self.test_session(): tf.variables_initializer(([conv1.w, conv1.b] if use_bias else [conv1.w])).run() self.assertAllClose(np.reshape(out.eval(), [5, 5]), expected_out)
'Tests if the correct output shapes are setup in transposed module.'
@parameterized.NamedParameters(('WithBias', True), ('WithoutBias', False)) def testTransposition(self, use_bias):
net = snt.Conv2D(name='conv2d', output_channels=4, kernel_shape=3, stride=1, use_bias=use_bias) net_transpose = net.transpose() input_to_net = tf.placeholder(tf.float32, shape=[None, 100, 100, 3]) err = 'Variables in {} not instantiated yet, __call__ the module first.' with self.assertRaisesRegexp(snt.NotConnectedError, err.format(net.scope_name)): net_transpose(input_to_net) net_transpose = net.transpose(name='another_net_transpose') net_out = net(input_to_net) net_transposed_output = net_transpose(net_out) self.assertAllEqual(net_transposed_output.get_shape().as_list(), input_to_net.get_shape().as_list())
'2D Masks are applied properly.'
def testMask2D(self):
mask = np.array([[1, 1, 1], [1, 0, 0], [0, 0, 0]], dtype=np.float32) inputs = tf.constant(1.0, shape=(1, 5, 5, 2)) conv1 = snt.Conv2D(output_channels=1, kernel_shape=3, mask=mask, padding=snt.VALID, use_bias=False, initializers=create_constant_initializers(1.0, 0.0, use_bias=False)) out = conv1(inputs) expected_out = np.array(([([8] * 3)] * 3)) with self.test_session(): tf.variables_initializer([conv1.w]).run() self.assertAllClose(np.reshape(out.eval(), [3, 3]), expected_out)
'4D Masks are applied properly.'
def testMask4D(self):
mask = np.ones([3, 3, 2, 1], dtype=np.float32) mask[0, 0, 0, :] = 0 inputs = tf.constant(1.0, shape=(1, 5, 5, 2)) conv1 = snt.Conv2D(output_channels=1, kernel_shape=3, mask=mask, padding=snt.VALID, use_bias=False, initializers=create_constant_initializers(1.0, 0.0, use_bias=False)) out = conv1(inputs) expected_out = np.array(([([17] * 3)] * 3)) with self.test_session(): tf.variables_initializer([conv1.w]).run() self.assertAllClose(np.reshape(out.eval(), [3, 3]), expected_out)
'Errors are thrown for invalid mask rank.'
def testMaskErrorInvalidRank(self):
mask = np.ones((3,)) with self.assertRaises(snt.Error) as cm: snt.Conv2D(output_channels=4, kernel_shape=3, mask=mask) self.assertEqual(str(cm.exception), 'Invalid mask rank: {}'.format(mask.ndim))
'Errors are thrown for invalid mask type.'
def testMaskErrorInvalidType(self):
mask = tf.constant(1.0, shape=(3, 3)) with self.assertRaises(TypeError) as cm: snt.Conv2D(output_channels=4, kernel_shape=3, mask=mask) self.assertEqual(str(cm.exception), 'Invalid type for mask: {}'.format(type(mask)))
'Errors are thrown for incompatible rank 2 mask.'
def testMaskErrorIncompatibleRank2(self):
mask = np.ones((3, 3)) x = tf.constant(0.0, shape=(2, 8, 8, 6)) with self.assertRaises(snt.Error) as cm: snt.Conv2D(output_channels=4, kernel_shape=5, mask=mask)(x) self.assertTrue(str(cm.exception).startswith('Invalid mask shape: {}'.format(mask.shape)))
'Errors are thrown for incompatible rank 4 mask.'
def testMaskErrorIncompatibleRank4(self):
mask = np.ones((3, 3, 4, 5)) x = tf.constant(0.0, shape=(2, 8, 8, 6)) with self.assertRaises(snt.Error) as cm: snt.Conv2D(output_channels=4, kernel_shape=5, mask=mask)(x) self.assertTrue(str(cm.exception).startswith('Invalid mask shape: {}'.format(mask.shape)))
'Set up some variables to re-use in multiple tests.'
def setUp(self):
super(Conv2DTransposeTest, self).setUp() self.batch_size = 100 self.in_height = 32 self.in_width = 32 self.in_channels = 3 self.out_channels = 10 self.kernel_shape_h = 5 self.kernel_shape_w = 5 self.strides = (1, 1, 1, 1) self.padding = snt.SAME self.in_shape = (self.batch_size, self.in_height, self.in_width, self.in_channels) self.out_shape = (self.in_height, self.in_width) self.kernel_shape = (self.kernel_shape_h, self.kernel_shape_w) self.kernel_shape2 = (self.kernel_shape_h, self.kernel_shape_w, self.out_channels, self.in_channels)
'Tests error is raised if kernel shape is not specified.'
def testKernelsNotSpecified(self):
with self.assertRaisesRegexp(ValueError, '`kernel_shape` cannot be None.'): snt.Conv2DTranspose(output_channels=1)
'Tests if output shapes are valid.'
@parameterized.NamedParameters(('WithBias', True), ('WithoutBias', False)) def testOutputShapeConsistency(self, use_bias):
inputs = tf.placeholder(tf.float32, shape=self.in_shape) conv1 = snt.Conv2DTranspose(name='conv2d_1', output_channels=self.out_channels, output_shape=self.out_shape, kernel_shape=self.kernel_shape, padding=self.padding, stride=1, use_bias=use_bias) outputs = conv1(inputs) self.assertTrue(outputs.get_shape().is_compatible_with((((self.batch_size,) + self.out_shape) + (self.out_channels,)))) self.assertTrue(conv1.w.get_shape().is_compatible_with(self.kernel_shape2)) if use_bias: self.assertTrue(conv1.b.get_shape().is_compatible_with([self.out_channels]))
'Tests if output shapes are valid when specified as an integer.'
@parameterized.NamedParameters(('WithBias', True), ('WithoutBias', False)) def testOutputShapeInteger(self, use_bias):
inputs = tf.zeros(shape=[3, 5, 5, 2], dtype=tf.float32) inputs_2 = tf.zeros(shape=[3, 5, 7, 2], dtype=tf.float32) conv1 = snt.Conv2DTranspose(name='conv2d_1', output_channels=10, output_shape=10, kernel_shape=5, padding=snt.SAME, stride=2, use_bias=use_bias) outputs = conv1(inputs) outputs_2 = conv1(inputs_2) self.assertTrue(outputs.get_shape().is_compatible_with((3, 10, 10, 10))) with self.test_session() as sess: tf.global_variables_initializer().run() sess.run(outputs) with self.assertRaises(tf.errors.InvalidArgumentError): sess.run(outputs_2)
'Tests if the correct output shapes are setup in transposed module.'
@parameterized.NamedParameters(('WithBias', True), ('WithoutBias', False)) def testTransposition(self, use_bias):
net = snt.Conv2DTranspose(name='conv2d', output_channels=self.out_channels, output_shape=self.out_shape, kernel_shape=self.kernel_shape, padding=self.padding, stride=1, use_bias=use_bias) net_transpose = net.transpose() input_to_net = tf.placeholder(tf.float32, shape=self.in_shape) err = 'Variables in {} not instantiated yet, __call__ the module first.' with self.assertRaisesRegexp(snt.NotConnectedError, err.format(net.scope_name)): net_transpose(input_to_net) net_transpose = net.transpose(name='another_net_transpose') net_out = net(input_to_net) net_transposed_output = net_transpose(net_out) self.assertEqual(net_transposed_output.get_shape(), input_to_net.get_shape())
'Test that initializers are not mutated.'
def testInitializerMutation(self):
initializers = {'b': tf.constant_initializer(0)} initializers_copy = dict(initializers) conv1 = snt.Conv2DTranspose(output_shape=(10, 10), output_channels=1, kernel_shape=3, stride=1, name='conv2d', initializers=initializers) conv1(tf.placeholder(tf.float32, [1, 10, 10, 2])) self.assertAllEqual(initializers, initializers_copy)
'The generated shapes are correct with SAME and VALID padding.'
@parameterized.NamedParameters(('WithBias', True), ('WithoutBias', False)) def testShapes(self, use_bias):
batch_size = random.randint(1, 100) in_length = random.randint(10, 288) in_channels = random.randint(1, 10) out_channels = random.randint(1, 32) kernel_shape = random.randint(1, 10) inputs = tf.placeholder(tf.float32, shape=[batch_size, in_length, in_channels]) conv1 = snt.Conv1D(output_channels=out_channels, kernel_shape=kernel_shape, padding=snt.SAME, stride=1, name='conv1', use_bias=use_bias) output1 = conv1(inputs) self.assertTrue(output1.get_shape().is_compatible_with([batch_size, in_length, out_channels])) self.assertTrue(conv1.w.get_shape().is_compatible_with([kernel_shape, in_channels, out_channels])) if use_bias: self.assertTrue(conv1.b.get_shape().is_compatible_with([out_channels])) conv2 = snt.Conv1D(output_channels=out_channels, kernel_shape=kernel_shape, padding=snt.VALID, stride=1, name='conv2', use_bias=use_bias) output2 = conv2(inputs) self.assertTrue(output2.get_shape().is_compatible_with([batch_size, ((in_length - kernel_shape) + 1), out_channels])) self.assertTrue(conv2.w.get_shape().is_compatible_with([kernel_shape, in_channels, out_channels])) if use_bias: self.assertTrue(conv2.b.get_shape().is_compatible_with([out_channels]))
'The generated shapes are correct when input shape not known.'
@parameterized.NamedParameters(('WithBias', True), ('WithoutBias', False)) def testShapesNotKnown(self, use_bias):
batch_size = 5 in_length = 32 in_channels = out_channels = 5 kernel_shape = 3 inputs = tf.placeholder(tf.float32, shape=[None, None, in_channels], name='inputs') conv1 = snt.Conv1D(name='conv1', output_channels=out_channels, kernel_shape=kernel_shape, padding=snt.SAME, stride=1, use_bias=use_bias) output = conv1(inputs) with self.test_session(): tf.variables_initializer(([conv1.w, conv1.b] if use_bias else [conv1.w])).run() output_eval = output.eval({inputs: np.zeros([batch_size, in_length, in_channels])}) self.assertEqual(output_eval.shape, (batch_size, in_length, out_channels))
'Errors are thrown for invalid kernel shapes.'
@parameterized.NamedParameters(('WithBias', True), ('WithoutBias', False)) def testKernelShape(self, use_bias):
snt.Conv1D(output_channels=10, kernel_shape=[3], name='conv1', use_bias=use_bias) snt.Conv1D(output_channels=10, kernel_shape=3, name='conv1', use_bias=use_bias) err = 'Invalid kernel shape' with self.assertRaisesRegexp(snt.IncompatibleShapeError, err): snt.Conv1D(output_channels=10, kernel_shape=[3, 3], name='conv1')
'Errors are thrown for invalid strides.'
@parameterized.NamedParameters(('WithBias', True), ('WithoutBias', False)) def testStrideError(self, use_bias):
snt.Conv1D(output_channels=10, kernel_shape=3, stride=1, name='conv1', use_bias=use_bias) err = 'Invalid stride' with self.assertRaisesRegexp(snt.IncompatibleShapeError, err): snt.Conv1D(output_channels=10, kernel_shape=3, stride=[1, 1], name='conv1') with self.assertRaisesRegexp(snt.IncompatibleShapeError, err): snt.Conv1D(output_channels=10, kernel_shape=3, stride=[1, 1, 1, 1], name='conv1')
'Errors are thrown for invalid dilation rates.'
@parameterized.NamedParameters(('WithBias', True), ('WithoutBias', False)) def testRateError(self, use_bias):
snt.Conv1D(output_channels=10, kernel_shape=3, rate=1, name='conv1', use_bias=use_bias) snt.Conv1D(output_channels=10, kernel_shape=3, rate=2, name='conv1', use_bias=use_bias) for rate in [0, 0.5, (-1)]: with self.assertRaisesRegexp(snt.IncompatibleShapeError, 'Invalid rate shape*'): snt.Conv1D(output_channels=10, kernel_shape=3, rate=rate, name='conv1')
'Errors are thrown for stride > 1 when using atrous convolution.'
@parameterized.NamedParameters(('WithBias', True), ('WithoutBias', False)) def testRateAndStrideError(self, use_bias):
err = 'Cannot have stride > 1 with rate > 1' with self.assertRaisesRegexp(snt.NotSupportedError, err): snt.Conv1D(output_channels=10, kernel_shape=3, stride=2, rate=2, name='conv1', use_bias=use_bias)
'Errors are thrown for invalid input types.'
@parameterized.NamedParameters(('WithBias', True), ('WithoutBias', False)) def testInputTypeError(self, use_bias):
conv1 = snt.Conv1D(output_channels=1, kernel_shape=3, stride=1, padding=snt.VALID, use_bias=use_bias, name='conv1', initializers=create_constant_initializers(1.0, 1.0, use_bias)) for dtype in (tf.float16, tf.float64): x = tf.constant(np.ones([1, 5, 1]), dtype=dtype) err = 'Input must have dtype tf.float32.*' with self.assertRaisesRegexp(TypeError, err): conv1(x)
'Test initializers work as expected.'
@parameterized.NamedParameters(('WithBias', True), ('WithoutBias', False)) def testInitializers(self, use_bias):
w = random.random() b = random.random() conv1 = snt.Conv1D(output_channels=1, kernel_shape=3, stride=1, padding=snt.SAME, use_bias=use_bias, name='conv1', initializers=create_constant_initializers(w, b, use_bias)) conv1(tf.placeholder(tf.float32, [1, 10, 2])) with self.test_session(): tf.variables_initializer(([conv1.w, conv1.b] if use_bias else [conv1.w])).run() self.assertAllClose(conv1.w.eval(), np.full([3, 2, 1], w, dtype=np.float32)) if use_bias: self.assertAllClose(conv1.b.eval(), [b]) err = "Initializer for 'w' is not a callable function or dictionary" with self.assertRaisesRegexp(TypeError, err): snt.Conv1D(output_channels=10, kernel_shape=3, stride=1, padding=snt.SAME, use_bias=use_bias, name='conv1', initializers={'w': tf.ones([])})
'Test that initializers are not mutated.'
def testInitializerMutation(self):
initializers = {'b': tf.constant_initializer(0)} initializers_copy = dict(initializers) conv1 = snt.Conv1D(output_channels=1, kernel_shape=3, stride=1, name='conv1', initializers=initializers) conv1(tf.placeholder(tf.float32, [1, 10, 2])) self.assertAllEqual(initializers, initializers_copy)
'Run through for something with a known answer using SAME padding.'
@parameterized.NamedParameters(('WithBias', True), ('WithoutBias', False)) def testComputationSame(self, use_bias):
conv1 = snt.Conv1D(output_channels=1, kernel_shape=3, stride=1, padding=snt.SAME, use_bias=use_bias, name='conv1', initializers=create_constant_initializers(1.0, 1.0, use_bias)) out = conv1(tf.constant(np.ones([1, 5, 1], dtype=np.float32))) expected_out = np.asarray([3, 4, 4, 4, 3]) if (not use_bias): expected_out -= 1 with self.test_session(): tf.variables_initializer(([conv1.w, conv1.b] if use_bias else [conv1.w])).run() self.assertAllClose(np.reshape(out.eval(), [5]), expected_out)
'Run through for something with a known answer using snt.VALID padding.'
@parameterized.NamedParameters(('WithBias', True), ('WithoutBias', False)) def testComputationValid(self, use_bias):
conv1 = snt.Conv1D(output_channels=1, kernel_shape=3, stride=1, padding=snt.VALID, use_bias=use_bias, name='conv1', initializers=create_constant_initializers(1.0, 1.0, use_bias)) out = conv1(tf.constant(np.ones([1, 5, 1], dtype=np.float32))) expected_out = np.asarray([4, 4, 4]) if (not use_bias): expected_out -= 1 with self.test_session(): tf.variables_initializer(([conv1.w, conv1.b] if use_bias else [conv1.w])).run() self.assertAllClose(np.reshape(out.eval(), [3]), expected_out)
'Sharing is working.'
@parameterized.NamedParameters(('WithBias', True), ('WithoutBias', False)) def testSharing(self, use_bias):
conv1 = snt.Conv1D(output_channels=1, kernel_shape=3, stride=1, padding=snt.SAME, use_bias=use_bias, name='conv1') x = np.random.randn(1, 5, 1) x1 = tf.constant(x, dtype=np.float32) x2 = tf.constant(x, dtype=np.float32) out1 = conv1(x1) out2 = conv1(x2) with self.test_session(): tf.variables_initializer(([conv1.w, conv1.b] if use_bias else [conv1.w])).run() self.assertAllClose(out1.eval(), out2.eval()) w = np.random.randn(3, 1, 1) conv1.w.assign(w).eval() self.assertAllClose(out1.eval(), out2.eval())
'Tests error is raised if kernel shape is not specified.'
def testKernelsNotSpecified(self):
with self.assertRaisesRegexp(ValueError, '`kernel_shape` cannot be None.'): snt.Conv1DTranspose(output_channels=1)
'Check functionality with unknown batch size at build time.'
@parameterized.Parameters(*zip(out_channels, kernel_shape, padding, use_bias, in_shape, out_shape, stride_shape)) def testMissingBatchSize(self, out_channels, kernel_shape, padding, use_bias, in_shape, out_shape, stride_shape):
conv1 = snt.Conv1DTranspose(output_channels=out_channels, output_shape=out_shape, kernel_shape=kernel_shape, padding=padding, stride=stride_shape, name='conv1', use_bias=use_bias) image = tf.placeholder(tf.float32, shape=((None,) + in_shape[1:])) output = conv1(image) self.assertTrue(output.get_shape().is_compatible_with([None, out_shape, out_channels])) with self.test_session() as sess: tf.global_variables_initializer().run() sess.run(output, feed_dict={image: np.zeros(((10,) + in_shape[1:]))})
'The generated shapes are correct.'
@parameterized.Parameters(*zip(batch_size, in_length, in_channels, out_length, out_channels, kernel_shape, padding, use_bias, in_shape, out_shape, stride_shape)) def testShapesSame(self, batch_size, in_length, in_channels, out_length, out_channels, kernel_shape, padding, use_bias, in_shape, out_shape, stride_shape):
inputs = tf.placeholder(tf.float32, shape=[batch_size, in_length, in_channels]) conv1 = snt.Conv1DTranspose(output_channels=out_channels, output_shape=out_shape, kernel_shape=kernel_shape, padding=padding, stride=stride_shape, name='conv1', use_bias=use_bias) output = conv1(inputs) self.assertTrue(output.get_shape().is_compatible_with([batch_size, out_length, out_channels])) self.assertTrue(conv1.w.get_shape().is_compatible_with([1, kernel_shape, out_channels, in_channels])) if use_bias: self.assertTrue(conv1.b.get_shape().is_compatible_with([out_channels]))
'Errors are thrown for invalid kernel shapes.'
@parameterized.Parameters(*zip(out_channels, padding, use_bias, in_shape, out_shape, stride_shape)) def testKernelShape(self, out_channels, padding, use_bias, in_shape, out_shape, stride_shape):
snt.Conv1DTranspose(output_channels=out_channels, output_shape=out_shape, kernel_shape=[3], padding=padding, stride=stride_shape, name='conv1', use_bias=use_bias) snt.Conv1DTranspose(output_channels=out_channels, output_shape=out_shape, kernel_shape=3, padding=padding, stride=stride_shape, name='conv1', use_bias=use_bias) err = 'Invalid kernel' with self.assertRaisesRegexp(snt.IncompatibleShapeError, err): snt.Conv1DTranspose(output_channels=out_channels, output_shape=out_shape, kernel_shape=[3, 3], name='conv1', use_bias=use_bias) with self.assertRaisesRegexp(snt.IncompatibleShapeError, err): snt.Conv1DTranspose(output_channels=out_channels, output_shape=out_shape, kernel_shape=[3, 3, 3, 3], name='conv1', use_bias=use_bias)
'Errors are thrown for invalid strides.'
@parameterized.Parameters(*zip(out_channels, padding, use_bias, in_shape, out_shape)) def testStrideError(self, out_channels, padding, use_bias, in_shape, out_shape):
snt.Conv1DTranspose(output_channels=out_channels, output_shape=out_shape, kernel_shape=3, padding=padding, stride=1, name='conv1', use_bias=use_bias) err = 'must be either a positive integer or an iterable of positive integers of size 1' with self.assertRaisesRegexp(snt.IncompatibleShapeError, err): snt.Conv1DTranspose(output_channels=out_channels, output_shape=out_shape, kernel_shape=3, padding=padding, stride=[1, 1], name='conv1', use_bias=use_bias) with self.assertRaisesRegexp(snt.IncompatibleShapeError, err): snt.Conv1DTranspose(output_channels=out_channels, output_shape=out_shape, kernel_shape=3, padding=padding, stride=[1, 1, 1, 1], name='conv1', use_bias=use_bias)
'Errors are thrown for invalid input types.'
@parameterized.Parameters(*zip(batch_size, in_length, in_channels, out_channels, kernel_shape, padding, use_bias, out_shape, stride_shape)) def testInputTypeError(self, batch_size, in_length, in_channels, out_channels, kernel_shape, padding, use_bias, out_shape, stride_shape):
conv1 = snt.Conv1DTranspose(output_channels=out_channels, output_shape=out_shape, kernel_shape=kernel_shape, padding=padding, stride=stride_shape, name='conv1', use_bias=use_bias) for dtype in (tf.float16, tf.float64): x = tf.constant(np.ones([batch_size, in_length, in_channels]), dtype=dtype) err = 'Input must have dtype tf.float32.*' with self.assertRaisesRegexp(TypeError, err): conv1(x)
'Sharing is working.'
@parameterized.Parameters(*zip(batch_size, in_length, in_channels, out_channels, kernel_shape, padding, use_bias, out_shape, stride_shape)) def testSharing(self, batch_size, in_length, in_channels, out_channels, kernel_shape, padding, use_bias, out_shape, stride_shape):
conv1 = snt.Conv1DTranspose(output_channels=out_channels, output_shape=out_shape, kernel_shape=kernel_shape, padding=padding, stride=stride_shape, name='conv1', use_bias=use_bias) x = np.random.randn(batch_size, in_length, in_channels) x1 = tf.constant(x, dtype=np.float32) x2 = tf.constant(x, dtype=np.float32) out1 = conv1(x1) out2 = conv1(x2) with self.test_session(): tf.variables_initializer(([conv1.w, conv1.b] if use_bias else [conv1.w])).run() self.assertAllClose(out1.eval(), out2.eval()) w = np.random.randn(1, kernel_shape, out_channels, in_channels) conv1.w.assign(w).eval() self.assertAllClose(out1.eval(), out2.eval())
'Test transpose.'
@parameterized.Parameters(*zip(batch_size, in_length, in_channels, out_channels, kernel_shape, padding, use_bias, out_shape, stride_shape)) def testTranspose(self, batch_size, in_length, in_channels, out_channels, kernel_shape, padding, use_bias, out_shape, stride_shape):
conv1_transpose = snt.Conv1DTranspose(output_channels=out_channels, output_shape=out_shape, kernel_shape=kernel_shape, padding=padding, stride=stride_shape, name='conv1_transpose', use_bias=use_bias) conv1 = conv1_transpose.transpose() self.assertEqual(conv1_transpose.kernel_shape, conv1.kernel_shape) self.assertEqual((1, conv1_transpose.stride[2], 1), conv1.stride) self.assertEqual(conv1_transpose.padding, conv1.padding) err = 'Variables in conv1_transpose not instantiated yet' with self.assertRaisesRegexp(snt.NotConnectedError, err): conv1.output_channels x = tf.constant(np.random.randn(batch_size, in_length, in_channels), dtype=np.float32) conv1_transpose(x) self.assertEqual(in_channels, conv1.output_channels) err = 'Variables in conv1_transpose_transpose not instantiated yet' with self.assertRaisesRegexp(snt.NotConnectedError, err): self.assertEqual(conv1_transpose.output_shape, conv1.input_shape)
'Test that initializers are not mutated.'
def testInitializerMutation(self):
initializers = {'b': tf.constant_initializer(0)} initializers_copy = dict(initializers) conv1 = snt.Conv1DTranspose(output_shape=(10,), output_channels=1, kernel_shape=3, stride=1, name='conv1', initializers=initializers) conv1(tf.placeholder(tf.float32, [1, 10, 2])) self.assertAllEqual(initializers, initializers_copy)
'Run through for something with a known answer.'
@parameterized.NamedParameters(('WithBias', True), ('WithoutBias', False)) def testComputation(self, use_bias):
conv1 = snt.CausalConv1D(output_channels=1, kernel_shape=3, stride=1, use_bias=use_bias, name='conv1', initializers=create_constant_initializers(1.0, 1.0, use_bias)) out = conv1(tf.constant(np.ones([1, 5, 1], dtype=np.float32))) expected_out = np.reshape(np.array([1, 2, 3, 3, 3]), [1, 5, 1]) if use_bias: expected_out += 1 init_op = tf.variables_initializer(([conv1.w, conv1.b] if use_bias else [conv1.w])) with self.test_session() as sess: sess.run(init_op) actual_out = sess.run(out) self.assertAllClose(actual_out, expected_out)
'Run through for something with a known answer.'
@parameterized.NamedParameters(('WithBias', True), ('WithoutBias', False)) def testComputationStrided(self, use_bias):
conv1 = snt.CausalConv1D(output_channels=1, kernel_shape=3, stride=2, use_bias=use_bias, name='conv1', initializers=create_constant_initializers(1.0, 1.0, use_bias)) out = conv1(tf.constant(np.ones([1, 5, 1], dtype=np.float32))) expected_out = np.reshape(np.array([1, 3, 3]), [1, 3, 1]) if use_bias: expected_out += 1 init_op = tf.variables_initializer(([conv1.w, conv1.b] if use_bias else [conv1.w])) with self.test_session() as sess: sess.run(init_op) actual_out = sess.run(out) self.assertAllClose(actual_out, expected_out)
'Run through for something with a known answer.'
@parameterized.NamedParameters(('WithBias', True), ('WithoutBias', False)) def testComputationDilated(self, use_bias):
conv1 = snt.CausalConv1D(output_channels=1, kernel_shape=3, stride=1, rate=2, use_bias=use_bias, name='conv1', initializers=create_constant_initializers(1.0, 1.0, use_bias)) out = conv1(tf.constant(np.ones([1, 5, 1], dtype=np.float32))) expected_out = np.reshape(np.array([1, 1, 2, 2, 3]), [1, 5, 1]) if use_bias: expected_out += 1 init_op = tf.variables_initializer(([conv1.w, conv1.b] if use_bias else [conv1.w])) with self.test_session() as sess: sess.run(init_op) actual_out = sess.run(out) self.assertAllClose(actual_out, expected_out)
'Sharing is working.'
@parameterized.NamedParameters(('WithBias', True), ('WithoutBias', False)) def testSharing(self, use_bias):
conv1 = snt.CausalConv1D(output_channels=1, kernel_shape=3, stride=1, use_bias=use_bias, name='conv1') x = np.random.randn(1, 5, 1) x1 = tf.constant(x, dtype=np.float32) x2 = tf.constant(x, dtype=np.float32) out1 = conv1(x1) out2 = conv1(x2) w = np.random.randn(3, 1, 1) weight_change_op = conv1.w.assign(w) init_op = tf.variables_initializer(([conv1.w, conv1.b] if use_bias else [conv1.w])) with self.test_session() as sess: sess.run(init_op) first_replica_out = sess.run(out1) second_replica_out = sess.run(out2) sess.run(weight_change_op) first_replica_out_changed = sess.run(out1) second_replica_out_changed = sess.run(out2) self.assertAllClose(first_replica_out, second_replica_out) self.assertAllClose(first_replica_out_changed, second_replica_out_changed)
'Test that the number of output and input channels are equal.'
@parameterized.NamedParameters(('WithBias', True), ('WithoutBias', False)) def testSameNumberOfOutputAndInputChannels(self, use_bias):
input_channels = random.randint(1, 32) inputs = tf.placeholder(tf.float32, shape=[1, 10, 10, input_channels]) conv1 = snt.InPlaneConv2D(kernel_shape=3, use_bias=use_bias) err = 'Variables in in_plane_conv2d not instantiated yet' with self.assertRaisesRegexp(snt.NotConnectedError, err): _ = conv1.output_channels conv1(inputs) self.assertEqual(conv1.output_channels, input_channels)
'Sharing is working.'
@parameterized.NamedParameters(('WithBias', True), ('WithoutBias', False)) def testSharing(self, use_bias):
conv1 = snt.InPlaneConv2D(kernel_shape=3, use_bias=use_bias) x = np.random.randn(1, 5, 5, 1) x1 = tf.constant(x, dtype=np.float32) x2 = tf.constant(x, dtype=np.float32) out1 = conv1(x1) out2 = conv1(x2) with self.test_session(): tf.variables_initializer(([conv1.w, conv1.b] if use_bias else [conv1.w])).run() self.assertAllClose(out1.eval(), out2.eval()) w = np.random.randn(3, 3, 1, 1) conv1.w.assign(w).eval() self.assertAllClose(out1.eval(), out2.eval())
'Test that initializers are not mutated.'
def testInitializerMutation(self):
initializers = {'b': tf.constant_initializer(0)} initializers_copy = dict(initializers) conv1 = snt.InPlaneConv2D(kernel_shape=3, initializers=initializers) conv1(tf.placeholder(tf.float32, [1, 10, 10, 2])) self.assertAllEqual(initializers, initializers_copy)
'Set up some variables to re-use in multiple tests.'
def setUp(self):
super(DepthwiseConv2DTest, self).setUp() self.batch_size = batch_size = random.randint(1, 20) self.in_height = in_height = random.randint(10, 128) self.in_width = in_width = random.randint(10, 128) self.in_channels = in_channels = random.randint(1, 10) self.kernel_shape_h = kernel_shape_h = random.randint(1, 11) self.kernel_shape_w = kernel_shape_w = random.randint(1, 11) self.channel_multiplier = channel_multiplier = random.randint(1, 10) self.out_channels = out_channels = (in_channels * channel_multiplier) self.input_shape = [batch_size, in_height, in_width, in_channels] self.kernel_shape = [kernel_shape_h, kernel_shape_w] self.output_shape = [batch_size, in_height, in_width, out_channels] self.weight_shape = [kernel_shape_h, kernel_shape_w, in_channels, channel_multiplier]
'Test that the generated shapes are correct with SAME padding.'
@parameterized.NamedParameters(('WithBias', True), ('WithoutBias', False)) def testShapesSame(self, use_bias):
out_channels = self.out_channels input_shape = self.input_shape kernel_shape = self.kernel_shape output_shape = self.output_shape weight_shape = self.weight_shape channel_multiplier = self.channel_multiplier inputs = tf.placeholder(tf.float32, shape=input_shape) conv1 = snt.DepthwiseConv2D(name='conv1', channel_multiplier=channel_multiplier, kernel_shape=kernel_shape, padding=snt.SAME, stride=1, use_bias=use_bias) output = conv1(inputs) self.assertEqual(output.get_shape(), output_shape) self.assertEqual(conv1.w.get_shape(), weight_shape) if use_bias: self.assertEqual(conv1.b.get_shape(), out_channels)
'Test that the generated shapes are correct when input shape not known.'
@parameterized.NamedParameters(('WithBias', True), ('WithoutBias', False)) def testShapesNotKnown(self, use_bias):
inputs = tf.placeholder(tf.float32, shape=[None, None, None, self.in_channels], name='inputs') conv1 = snt.DepthwiseConv2D(channel_multiplier=self.channel_multiplier, kernel_shape=self.kernel_shape, padding=snt.SAME, stride=1, use_bias=use_bias) output = conv1(inputs) with self.test_session(): tf.variables_initializer(([conv1.w, conv1.b] if use_bias else [conv1.w])).run() output_eval = output.eval({inputs: np.zeros(self.input_shape)}) self.assertEqual(output_eval.shape, tuple(self.output_shape))
'Test that errors are thrown for invalid kernel shapes.'
@parameterized.NamedParameters(('WithBias', True), ('WithoutBias', False)) def testKernelShape(self, use_bias):
snt.DepthwiseConv2D(channel_multiplier=1, kernel_shape=[3, 4]) snt.DepthwiseConv2D(channel_multiplier=1, kernel_shape=3) error_msg = 'Invalid kernel shape: x is \\[3], must be either a positive integer or an iterable of positive integers of size 2' with self.assertRaisesRegexp(snt.IncompatibleShapeError, error_msg): snt.DepthwiseConv2D(channel_multiplier=1, kernel_shape=[3], use_bias=use_bias, name='conv1')
'Test that errors are thrown for invalid strides.'
@parameterized.NamedParameters(('WithBias', True), ('WithoutBias', False)) def testStrideError(self, use_bias):
snt.DepthwiseConv2D(channel_multiplier=1, kernel_shape=3, stride=1, use_bias=use_bias) snt.DepthwiseConv2D(channel_multiplier=1, kernel_shape=3, stride=([1] * 2), use_bias=use_bias) snt.DepthwiseConv2D(channel_multiplier=1, kernel_shape=3, stride=([1] * 4), use_bias=use_bias) error_msg = 'stride is \\[1, 1, 1\\] \\(.*\\), must be either a positive integer or an iterable of positive integers of size 2' with self.assertRaisesRegexp(snt.IncompatibleShapeError, error_msg): snt.DepthwiseConv2D(channel_multiplier=3, kernel_shape=3, stride=[1, 1, 1], use_bias=use_bias, name='conv1')
'Test that errors are thrown for invalid input types.'
@parameterized.NamedParameters(('WithBias', True), ('WithoutBias', False)) def testInputTypeError(self, use_bias):
conv1 = snt.DepthwiseConv2D(channel_multiplier=3, kernel_shape=3, stride=1, padding=snt.SAME, use_bias=use_bias, initializers=create_constant_initializers(1.0, 1.0, use_bias)) for dtype in (tf.float16, tf.float64): x = tf.constant(np.ones([1, 5, 5, 1]), dtype=dtype) err = 'Input must have dtype tf.float32.*' with self.assertRaisesRegexp(TypeError, err): conv1(x)
'Test that initializers work as expected.'
@parameterized.NamedParameters(('WithBias', True), ('WithoutBias', False)) def testInitializers(self, use_bias):
w = random.random() b = np.random.randn(6) conv1 = snt.DepthwiseConv2D(channel_multiplier=3, kernel_shape=3, stride=1, use_bias=use_bias, initializers=create_constant_initializers(w, b, use_bias)) conv1(tf.placeholder(tf.float32, [1, 10, 10, 2])) with self.test_session(): tf.variables_initializer(([conv1.w, conv1.b] if use_bias else [conv1.w])).run() self.assertAllClose(conv1.w.eval(), np.full([3, 3, 2, 3], w, dtype=np.float32)) if use_bias: self.assertAllClose(conv1.b.eval(), b) error_msg = "Initializer for 'w' is not a callable function" with self.assertRaisesRegexp(TypeError, error_msg): snt.DepthwiseConv2D(channel_multiplier=3, kernel_shape=3, stride=1, use_bias=use_bias, initializers={'w': tf.ones([])})
'Test that initializers are not mutated.'
def testInitializerMutation(self):
initializers = {'b': tf.constant_initializer(0)} initializers_copy = dict(initializers) conv1 = snt.DepthwiseConv2D(channel_multiplier=3, kernel_shape=3, stride=1, initializers=initializers) conv1(tf.placeholder(tf.float32, [10, 10, 1, 2])) self.assertAllEqual(initializers, initializers_copy)
'Run through for something with a known answer using SAME padding.'
@parameterized.NamedParameters(('WithBias', True), ('WithoutBias', False)) def testComputationSame(self, use_bias):
conv1 = snt.DepthwiseConv2D(channel_multiplier=1, kernel_shape=[3, 3], stride=1, padding=snt.SAME, use_bias=use_bias, initializers=create_constant_initializers(1.0, 1.0, use_bias)) out = conv1(tf.constant(np.ones([1, 5, 5, 1], dtype=np.float32))) expected_out = np.array([[5, 7, 7, 7, 5], [7, 10, 10, 10, 7], [7, 10, 10, 10, 7], [7, 10, 10, 10, 7], [5, 7, 7, 7, 5]]) if (not use_bias): expected_out -= 1 with self.test_session(): tf.variables_initializer(([conv1.w, conv1.b] if use_bias else [conv1.w])).run() self.assertAllClose(np.reshape(out.eval(), [5, 5]), expected_out)
'Run through for something with a known answer using snt.VALID padding.'
@parameterized.NamedParameters(('WithBias', True), ('WithoutBias', False)) def testComputationValid(self, use_bias):
conv1 = snt.DepthwiseConv2D(channel_multiplier=1, kernel_shape=[3, 3], stride=1, padding=snt.VALID, use_bias=use_bias, initializers=create_constant_initializers(1.0, 1.0, use_bias)) out = conv1(tf.constant(np.ones([1, 5, 5, 1], dtype=np.float32))) expected_out = np.array([[10, 10, 10], [10, 10, 10], [10, 10, 10]]) if (not use_bias): expected_out -= 1 with self.test_session(): tf.variables_initializer(([conv1.w, conv1.b] if use_bias else [conv1.w])).run() self.assertAllClose(np.reshape(out.eval(), [3, 3]), expected_out)
'Run through for something with a known answer using snt.VALID padding.'
@parameterized.NamedParameters(('WithBias', True), ('WithoutBias', False)) def testComputationValidMultiChannel(self, use_bias):
conv1 = snt.DepthwiseConv2D(channel_multiplier=1, kernel_shape=[3, 3], stride=1, padding=snt.VALID, use_bias=use_bias, initializers=create_constant_initializers(1.0, 1.0, use_bias)) out = conv1(tf.constant(np.ones([1, 5, 5, 3], dtype=np.float32))) expected_out = np.array(([([([10] * 3)] * 3)] * 3)) if (not use_bias): expected_out -= 1 with self.test_session(): tf.variables_initializer(([conv1.w, conv1.b] if use_bias else [conv1.w])).run() self.assertAllClose(np.reshape(out.eval(), [3, 3, 3]), expected_out)
'Sharing is working.'
@parameterized.NamedParameters(('WithBias', True), ('WithoutBias', False)) def testSharing(self, use_bias):
conv1 = snt.DepthwiseConv2D(channel_multiplier=3, kernel_shape=3, stride=1, padding=snt.SAME, use_bias=use_bias) x = np.random.randn(1, 5, 5, 1) x1 = tf.constant(x, dtype=np.float32) x2 = tf.constant(x, dtype=np.float32) out1 = conv1(x1) out2 = conv1(x2) with self.test_session(): tf.variables_initializer(([conv1.w, conv1.b] if use_bias else [conv1.w])).run() self.assertAllClose(out1.eval(), out2.eval()) w = np.random.randn(3, 3, 1, 3) conv1.w.assign(w).eval() self.assertAllClose(out1.eval(), out2.eval())
'Set up some variables to re-use in multiple tests.'
def setUp(self):
super(SeparableConv2DTest, self).setUp() self.batch_size = batch_size = random.randint(1, 100) self.in_height = in_height = random.randint(10, 188) self.in_width = in_width = random.randint(10, 188) self.in_channels = in_channels = random.randint(1, 10) self.input_shape = [batch_size, in_height, in_width, in_channels] self.kernel_shape_h = kernel_shape_h = random.randint(1, 10) self.kernel_shape_w = kernel_shape_w = random.randint(1, 10) self.channel_multiplier = channel_multiplier = random.randint(1, 10) self.kernel_shape = [kernel_shape_h, kernel_shape_w] self.out_channels_dw = out_channels_dw = (in_channels * channel_multiplier) self.output_shape = [batch_size, in_height, in_width, out_channels_dw] self.depthwise_filter_shape = [kernel_shape_h, kernel_shape_w, in_channels, channel_multiplier] self.pointwise_filter_shape = [1, 1, out_channels_dw, out_channels_dw]
'Test that the generated shapes are correct with SAME padding.'
@parameterized.NamedParameters(('WithBias', True), ('WithoutBias', False)) def testShapesSame(self, use_bias):
out_channels = self.out_channels_dw input_shape = self.input_shape kernel_shape = self.kernel_shape output_shape = self.output_shape depthwise_filter_shape = self.depthwise_filter_shape pointwise_filter_shape = self.pointwise_filter_shape channel_multiplier = self.channel_multiplier inputs = tf.placeholder(tf.float32, shape=input_shape) conv1 = snt.SeparableConv2D(output_channels=out_channels, channel_multiplier=channel_multiplier, kernel_shape=kernel_shape, padding=snt.SAME, use_bias=use_bias) output = conv1(inputs) self.assertTrue(output.get_shape().is_compatible_with(output_shape)) self.assertTrue(conv1.w_dw.get_shape().is_compatible_with(depthwise_filter_shape)) self.assertTrue(conv1.w_pw.get_shape().is_compatible_with(pointwise_filter_shape)) if use_bias: self.assertTrue(conv1.b.get_shape().is_compatible_with([out_channels]))
'Test that the generated shapes are correct when input shape not known.'
@parameterized.NamedParameters(('WithBias', True), ('WithoutBias', False)) def testShapesNotKnown(self, use_bias):
inputs = tf.placeholder(tf.float32, shape=[None, None, None, self.in_channels], name='inputs') conv1 = snt.SeparableConv2D(output_channels=self.out_channels_dw, channel_multiplier=1, kernel_shape=self.kernel_shape, padding=snt.SAME, use_bias=use_bias) output = conv1(inputs) with self.test_session(): tf.variables_initializer(([conv1.w_dw, conv1.w_pw, conv1.b] if use_bias else [conv1.w_dw, conv1.w_pw])).run() output_eval = output.eval({inputs: np.zeros(self.input_shape)}) self.assertEqual(output_eval.shape, tuple(self.output_shape))
'Test that errors are thrown for invalid kernel shapes.'
@parameterized.NamedParameters(('WithBias', True), ('WithoutBias', False)) def testKernelShape(self, use_bias):
snt.SeparableConv2D(output_channels=1, channel_multiplier=2, kernel_shape=[3, 4], name='conv1', use_bias=use_bias) snt.SeparableConv2D(output_channels=1, channel_multiplier=1, kernel_shape=3, name='conv1') error_msg = 'Invalid kernel shape: x is \\[3], must be either a positive integer or an iterable of positive integers of size 2' with self.assertRaisesRegexp(snt.IncompatibleShapeError, error_msg): snt.SeparableConv2D(output_channels=1, channel_multiplier=3, kernel_shape=[3], use_bias=use_bias)
'Test that errors are thrown for invalid strides.'
@parameterized.NamedParameters(('WithBias', True), ('WithoutBias', False)) def testStrideError(self, use_bias):
snt.SeparableConv2D(output_channels=1, channel_multiplier=3, kernel_shape=3, stride=1, use_bias=use_bias) snt.SeparableConv2D(output_channels=1, channel_multiplier=3, kernel_shape=3, stride=[1, 1], use_bias=use_bias) snt.SeparableConv2D(output_channels=1, channel_multiplier=3, kernel_shape=3, stride=[1, 1, 1, 1], use_bias=use_bias) error_msg = 'stride is \\[1, 1, 1\\] \\(.*\\), must be either a positive integer or an iterable of positive integers of size 2' with self.assertRaisesRegexp(snt.IncompatibleShapeError, error_msg): snt.SeparableConv2D(output_channels=1, channel_multiplier=3, kernel_shape=3, stride=[1, 1, 1], name='conv1', use_bias=use_bias)
'Test that errors are thrown for invalid input types.'
@parameterized.NamedParameters(('WithBias', True), ('WithoutBias', False)) def testInputTypeError(self, use_bias):
conv1 = snt.SeparableConv2D(output_channels=3, channel_multiplier=1, kernel_shape=3, padding=snt.SAME, use_bias=use_bias, initializers=create_separable_constant_initializers(1.0, 1.0, 1.0, use_bias)) for dtype in (tf.float16, tf.float64): x = tf.constant(np.ones([1, 5, 5, 1]), dtype=dtype) err = 'Input must have dtype tf.float32.*' with self.assertRaisesRegexp(TypeError, err): conv1(x)
'Test that initializers work as expected.'
@parameterized.NamedParameters(('WithBias', True), ('WithoutBias', False)) def testInitializers(self, use_bias):
w_dw = random.random() w_pw = random.random() b = np.random.randn(6) conv1 = snt.SeparableConv2D(output_channels=6, channel_multiplier=3, kernel_shape=3, use_bias=use_bias, initializers=create_separable_constant_initializers(w_dw, w_pw, b, use_bias)) conv1(tf.placeholder(tf.float32, [1, 10, 10, 2])) with self.test_session(): tf.variables_initializer(([conv1.w_dw, conv1.w_pw, conv1.b] if use_bias else [conv1.w_dw, conv1.w_pw])).run() self.assertAllClose(conv1.w_dw.eval(), np.full([3, 3, 2, 3], w_dw, dtype=np.float32)) self.assertAllClose(conv1.w_pw.eval(), np.full([1, 1, 6, 6], w_pw, dtype=np.float32)) if use_bias: self.assertAllClose(conv1.b.eval(), b) error_msg = "Initializer for 'w_dw' is not a callable function" with self.assertRaisesRegexp(TypeError, error_msg): snt.SeparableConv2D(output_channels=3, channel_multiplier=1, kernel_shape=3, stride=1, use_bias=use_bias, initializers={'w_dw': tf.ones([])})
'Test that initializers are not mutated.'
def testInitializerMutation(self):
initializers = {'b': tf.constant_initializer(0)} initializers_copy = dict(initializers) conv1 = snt.SeparableConv2D(output_channels=3, channel_multiplier=1, kernel_shape=3, stride=1, initializers=initializers) conv1(tf.placeholder(tf.float32, [10, 10, 1, 2])) self.assertAllEqual(initializers, initializers_copy)
'Run through for something with a known answer using SAME padding.'
@parameterized.NamedParameters(('WithBias', True), ('WithoutBias', False)) def testComputationSame(self, use_bias):
conv1 = snt.SeparableConv2D(output_channels=1, channel_multiplier=1, kernel_shape=[3, 3], padding=snt.SAME, name='conv1', use_bias=use_bias, initializers=create_separable_constant_initializers(1.0, 1.0, 1.0, use_bias)) out = conv1(tf.constant(np.ones([1, 5, 5, 1], dtype=np.float32))) expected_out = np.array([[5, 7, 7, 7, 5], [7, 10, 10, 10, 7], [7, 10, 10, 10, 7], [7, 10, 10, 10, 7], [5, 7, 7, 7, 5]]) if (not use_bias): expected_out -= 1 with self.test_session(): tf.variables_initializer(([conv1.w_dw, conv1.w_pw, conv1.b] if use_bias else [conv1.w_dw, conv1.w_pw])).run() self.assertAllClose(np.reshape(out.eval(), [5, 5]), expected_out)
'Run through for something with a known answer using snt.VALID padding.'
@parameterized.NamedParameters(('WithBias', True), ('WithoutBias', False)) def testComputationValid(self, use_bias):
conv1 = snt.SeparableConv2D(output_channels=1, channel_multiplier=1, kernel_shape=[3, 3], padding=snt.VALID, use_bias=use_bias, initializers=create_separable_constant_initializers(1.0, 1.0, 1.0, use_bias)) out = conv1(tf.constant(np.ones([1, 5, 5, 1], dtype=np.float32))) expected_out = np.array([[10, 10, 10], [10, 10, 10], [10, 10, 10]]) if (not use_bias): expected_out -= 1 with self.test_session(): tf.variables_initializer(([conv1.w_dw, conv1.w_pw, conv1.b] if use_bias else [conv1.w_dw, conv1.w_pw])).run() self.assertAllClose(np.reshape(out.eval(), [3, 3]), expected_out)
'Run through for something with a known answer using snt.VALID padding.'
@parameterized.NamedParameters(('WithBias', True), ('WithoutBias', False)) def testComputationValidMultiChannel(self, use_bias):
conv1 = snt.SeparableConv2D(output_channels=3, channel_multiplier=1, kernel_shape=[3, 3], padding=snt.VALID, use_bias=use_bias, initializers=create_separable_constant_initializers(1.0, 1.0, 1.0, use_bias)) out = conv1(tf.constant(np.ones([1, 5, 5, 3], dtype=np.float32))) expected_out = np.array(([([([28] * 3)] * 3)] * 3)) if (not use_bias): expected_out -= 1 with self.test_session(): tf.variables_initializer(([conv1.w_dw, conv1.w_pw, conv1.b] if use_bias else [conv1.w_dw, conv1.w_pw])).run() self.assertAllClose(np.reshape(out.eval(), [3, 3, 3]), expected_out)
'Run through for something with a known answer using snt.VALID padding.'
@parameterized.NamedParameters(('WithBias', True), ('WithoutBias', False)) def testComputationValidChannelMultiplier(self, use_bias):
input_channels = 3 channel_multiplier = 5 output_channels = (input_channels * channel_multiplier) conv1 = snt.SeparableConv2D(output_channels=output_channels, channel_multiplier=channel_multiplier, kernel_shape=[3, 3], padding=snt.VALID, use_bias=use_bias, initializers=create_separable_constant_initializers(1.0, 1.0, 1.0, use_bias)) input_data = np.ones([1, 5, 5, input_channels], dtype=np.float32) out = conv1(tf.constant(input_data)) expected_out = (np.ones((3, 3, output_channels)) * 136) if (not use_bias): expected_out -= 1 self.assertTrue(out.get_shape().is_compatible_with([1, 3, 3, output_channels])) with self.test_session(): tf.variables_initializer(([conv1.w_dw, conv1.w_pw, conv1.b] if use_bias else [conv1.w_dw, conv1.w_pw])).run() self.assertAllClose(np.reshape(out.eval(), [3, 3, output_channels]), expected_out)
'Sharing is working.'
@parameterized.NamedParameters(('WithBias', True), ('WithoutBias', False)) def testSharing(self, use_bias):
conv1 = snt.SeparableConv2D(output_channels=3, channel_multiplier=3, kernel_shape=3, use_bias=use_bias) x = np.random.randn(1, 5, 5, 1) x1 = tf.constant(x, dtype=np.float32) x2 = tf.constant(x, dtype=np.float32) out1 = conv1(x1) out2 = conv1(x2) with self.test_session(): tf.variables_initializer(([conv1.w_dw, conv1.w_pw, conv1.b] if use_bias else [conv1.w_dw, conv1.w_pw])).run() self.assertAllClose(out1.eval(), out2.eval()) w_dw = np.random.randn(3, 3, 1, 3) w_pw = np.random.randn(1, 1, 3, 3) conv1.w_dw.assign(w_dw).eval() conv1.w_pw.assign(w_pw).eval() self.assertAllClose(out1.eval(), out2.eval())
'The generated shapes are correct with SAME padding.'
@parameterized.NamedParameters(('WithBias', True), ('WithoutBias', False)) def testShapesSame(self, use_bias):
batch_size = random.randint(1, 100) in_depth = random.randint(10, 288) in_height = random.randint(10, 288) in_width = random.randint(10, 288) in_channels = random.randint(1, 10) out_channels = random.randint(1, 32) kernel_shape_d = random.randint(1, 11) kernel_shape_h = random.randint(1, 11) kernel_shape_w = random.randint(1, 11) inputs = tf.placeholder(tf.float32, shape=[batch_size, in_depth, in_height, in_width, in_channels]) conv1 = snt.Conv3D(output_channels=out_channels, kernel_shape=[kernel_shape_d, kernel_shape_h, kernel_shape_w], padding=snt.SAME, stride=1, use_bias=use_bias, name='conv1') output = conv1(inputs) self.assertTrue(output.get_shape().is_compatible_with([batch_size, in_depth, in_height, in_width, out_channels])) self.assertTrue(conv1.w.get_shape().is_compatible_with([kernel_shape_d, kernel_shape_h, kernel_shape_w, in_channels, out_channels])) if use_bias: self.assertTrue(conv1.b.get_shape().is_compatible_with([out_channels]))
'The generated shapes are correct when input shape not known.'
@parameterized.NamedParameters(('WithBias', True), ('WithoutBias', False)) def testShapesWithUnknownInputShape(self, use_bias):
batch_size = 5 in_depth = in_height = in_width = 32 in_channels = out_channels = 5 kernel_shape_d = kernel_shape_h = kernel_shape_w = 3 inputs = tf.placeholder(tf.float32, shape=[None, None, None, None, in_channels], name='inputs') conv1 = snt.Conv3D(name='conv1', output_channels=out_channels, kernel_shape=[kernel_shape_d, kernel_shape_h, kernel_shape_w], padding=snt.SAME, stride=1, use_bias=use_bias) output = conv1(inputs) with self.test_session(): tf.variables_initializer(([conv1.w, conv1.b] if use_bias else [conv1.w])).run() output_eval = output.eval({inputs: np.zeros([batch_size, in_depth, in_height, in_width, in_channels])}) self.assertEqual(output_eval.shape, (batch_size, in_depth, in_height, in_width, out_channels))
'Errors are thrown for invalid kernel shapes.'
def testKernelShape(self):
snt.Conv3D(output_channels=10, kernel_shape=[3, 4, 5], name='conv1') snt.Conv3D(output_channels=10, kernel_shape=3, name='conv1') with self.assertRaisesRegexp(snt.Error, 'Invalid kernel shape.*'): snt.Conv3D(output_channels=10, kernel_shape=[3, 3], name='conv1') snt.Conv3D(output_channels=10, kernel_shape=[3, 3, 3, 3], name='conv1')
'Errors are thrown for invalid strides.'
def testStrideError(self):
snt.Conv3D(output_channels=10, kernel_shape=3, stride=1, name='conv1') snt.Conv3D(output_channels=10, kernel_shape=3, stride=[1, 1, 1], name='conv1') snt.Conv3D(output_channels=10, kernel_shape=3, stride=[1, 1, 1, 1, 1], name='conv1') with self.assertRaisesRegexp(snt.Error, 'Invalid stride.*'): snt.Conv3D(output_channels=10, kernel_shape=3, stride=[1, 1], name='conv1') snt.Conv3D(output_channels=10, kernel_shape=3, stride=[1, 1, 1, 1], name='conv1')
'Errors are thrown for invalid dilation rates.'
@parameterized.NamedParameters(('WithBias', True), ('WithoutBias', False)) def testRateError(self, use_bias):
snt.Conv3D(output_channels=10, kernel_shape=3, rate=1, name='conv1', use_bias=use_bias) snt.Conv3D(output_channels=10, kernel_shape=3, rate=2, name='conv1', use_bias=use_bias) for rate in [0, 0.5, (-1)]: with self.assertRaisesRegexp(snt.IncompatibleShapeError, 'Invalid rate shape*'): snt.Conv3D(output_channels=10, kernel_shape=3, rate=rate, name='conv1')
'Errors are thrown for stride > 1 when using atrous convolution.'
@parameterized.NamedParameters(('WithBias', True), ('WithoutBias', False)) def testRateAndStrideError(self, use_bias):
err = 'Cannot have stride > 1 with rate > 1' with self.assertRaisesRegexp(snt.NotSupportedError, err): snt.Conv3D(output_channels=10, kernel_shape=3, stride=2, rate=2, name='conv1', use_bias=use_bias) with self.assertRaisesRegexp(snt.NotSupportedError, err): snt.Conv3D(output_channels=10, kernel_shape=3, stride=[2, 2, 1], rate=2, name='conv1', use_bias=use_bias)
'Errors are thrown for invalid input types.'
def testInputTypeError(self):
conv1 = snt.Conv3D(output_channels=1, kernel_shape=3, stride=1, padding=snt.SAME, name='conv1', initializers={'w': tf.constant_initializer(1.0), 'b': tf.constant_initializer(1.0)}) for dtype in (tf.float16, tf.float64): x = tf.constant(np.ones([1, 5, 5, 5, 1]), dtype=dtype) self.assertRaisesRegexp(TypeError, 'Input must have dtype tf.float32.*', conv1, x)
'Test initializers work as expected.'
@parameterized.NamedParameters(('WithBias', True), ('WithoutBias', False)) def testInitializers(self, use_bias):
w = random.random() b = random.random() conv1 = snt.Conv3D(output_channels=1, kernel_shape=3, stride=1, name='conv1', use_bias=use_bias, initializers=create_constant_initializers(w, b, use_bias)) conv1(tf.placeholder(tf.float32, [1, 10, 10, 10, 2])) with self.test_session(): tf.variables_initializer(([conv1.w, conv1.b] if use_bias else [conv1.w])).run() self.assertAllClose(conv1.w.eval(), np.full([3, 3, 3, 2, 1], w, dtype=np.float32)) if use_bias: self.assertAllClose(conv1.b.eval(), [b]) with self.assertRaises(TypeError): snt.Conv3D(output_channels=10, kernel_shape=3, stride=1, name='conv1', initializers={'w': tf.ones([])})