desc
stringlengths
3
26.7k
decl
stringlengths
11
7.89k
bodies
stringlengths
8
553k
'Module constructor. Args: idx: Indexes of the tensors to select. If `idx` is an integer, then a `Tensor` is returned. If `idx` is a (nested) list/tuple, then a (nested) tuple of `Tensor` is returned. name: Name of the module. Raises: TypeError: If `idx` is not an list, tuple or integer.'
def __init__(self, idx, name='select_input'):
super(SelectInput, self).__init__(name=name) self._check_type(idx) self._idx = idx
'Connects the module into the graph. Args: *inputs: `Tensor` variables to select. Returns: Subset of `inputs` in an arbitrarily nested configuration. Raises: ValueError: If any entry of `idx` is out of bounds with respect to the size of `inputs`.'
def _build(self, *inputs):
return self._select(inputs, self._idx)
'Test the minimum input size calculator.'
def testCalcMinSize(self):
net = snt.nets.AlexNet(mode=snt.nets.AlexNet.MINI) self.assertEqual(net._calc_min_size([(None, (3, 1), None)]), 3) self.assertEqual(net._calc_min_size([(None, (3, 1), (3, 2))]), 5) self.assertEqual(net._calc_min_size([(None, (3, 1), (3, 2)), (None, (3, 2), (5, 2))]), 25)
'Test that each mode can be instantiated.'
def testModes(self):
modes = [snt.nets.AlexNet.FULL, snt.nets.AlexNet.HALF, snt.nets.AlexNet.MINI] keep_prob = tf.placeholder(tf.float32) for mode in modes: net = snt.nets.AlexNet(name='net_{}'.format(mode), mode=mode) input_shape = [None, net._min_size, net._min_size, 3] inputs = tf.placeholder(tf.float32, shape=input_shape) net(inputs, keep_prob, is_training=True)
'Test that batch norm can be instantiated.'
def testBatchNorm(self):
net = snt.nets.AlexNet(mode=snt.nets.AlexNet.FULL, use_batch_norm=True) input_shape = [net._min_size, net._min_size, 3] inputs = tf.placeholder(tf.float32, shape=([None] + input_shape)) output = net(inputs, is_training=True) with self.test_session() as sess: sess.run(tf.global_variables_initializer()) sess.run(output, feed_dict={inputs: np.random.rand(10, *input_shape)}) err = 'is_training flag must be explicitly specified' with self.assertRaisesRegexp(ValueError, err): net(inputs) is_training = tf.placeholder(tf.bool) test_local_stats = tf.placeholder(tf.bool) net(inputs, is_training=is_training, test_local_stats=test_local_stats) net(inputs, is_training=False, test_local_stats=False) variance_name = 'alex_net/batch_norm/moving_variance:0' mean_name = 'alex_net/batch_norm/moving_mean:0' var_names = [var.name for var in tf.global_variables()] self.assertIn(variance_name, var_names) self.assertIn(mean_name, var_names)
'An exception should be raised if trying to use dropout when testing.'
def testNoDropoutInTesting(self):
net = snt.nets.AlexNet(mode=snt.nets.AlexNet.FULL) input_shape = [net._min_size, net._min_size, 3] inputs = tf.placeholder(tf.float32, shape=([None] + input_shape)) keep_prob = tf.placeholder(tf.float32, name='keep_prob') output = net(inputs, keep_prob, is_training=False) with self.test_session() as sess: sess.run(tf.global_variables_initializer()) with self.assertRaisesRegexp(tf.errors.InvalidArgumentError, 'keep_prob'): sess.run(output, feed_dict={inputs: np.random.rand(10, *input_shape), keep_prob: 0.7}) sess.run(output, feed_dict={inputs: np.random.rand(10, *input_shape), keep_prob: 1.0})
'Check that an error is raised if the input image is too small.'
def testInputTooSmall(self):
keep_prob = tf.placeholder(tf.float32) net = snt.nets.AlexNet(mode=snt.nets.AlexNet.FULL) input_shape = [None, net._min_size, net._min_size, 1] inputs = tf.placeholder(tf.float32, shape=input_shape) net(inputs, keep_prob, is_training=True) with self.assertRaisesRegexp(snt.IncompatibleShapeError, 'Image shape too small: (.*?, .*?) < .*?'): input_shape = [None, (net._min_size - 1), (net._min_size - 1), 1] inputs = tf.placeholder(tf.float32, shape=input_shape) net(inputs, keep_prob, is_training=True)
'Check that the correct number of variables are made when sharing.'
def testSharing(self):
net = snt.nets.AlexNet(mode=snt.nets.AlexNet.MINI) inputs1 = tf.placeholder(tf.float32, shape=[None, 64, 64, 3]) inputs2 = tf.placeholder(tf.float32, shape=[None, 64, 64, 3]) keep_prob1 = tf.placeholder(tf.float32) keep_prob2 = tf.placeholder(tf.float32) net(inputs1, keep_prob1, is_training=True) net(inputs2, keep_prob2, is_training=True) self.assertEqual(len(tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES)), (7 * 2)) model_variables = net.get_variables() self.assertEqual(len(model_variables), (7 * 2))
'Initialize Dilation module and test images. Args: num_output_classes: int. Number of output classes the dilation module should predict per pixel. depth: None or int. Input depth of image. If None, same as num_output_classes.'
def setUpWithNumOutputClasses(self, num_output_classes, depth=None):
self._num_output_classes = num_output_classes self._model_size = 'basic' self._module = snt.nets.Dilation(num_output_classes=self._num_output_classes, model_size=self._model_size) self._batch_size = 1 self._height = self._width = 5 self._depth = (depth or num_output_classes) self._rng = np.random.RandomState(0) self._images = np.abs(self._rng.randn(self._batch_size, self._height, self._width, self._depth).astype(np.float32))
'Constructs a `ConvNet2D` module. By default, neither batch normalization nor activation are applied to the output of the final layer. Args: output_channels: Iterable of output channels, as defined in `conv.Conv2D`. Output channels can be defined either as number or via a callable. In the latter case, since the function invocation is deferred to graph construction time, the user must only ensure that entries can be called when build is called. Each entry in the iterable defines properties in the corresponding convolutional layer. kernel_shapes: Iterable of kernel sizes as defined in `conv.Conv2D`; if the list contains one element only, the same kernel shape is used in each layer of the network. strides: Iterable of kernel strides as defined in `conv.Conv2D`; if the list contains one element only, the same stride is used in each layer of the network. paddings: Iterable of padding options, either `snt.SAME` or `snt.VALID`; if the Iterable contains one element only, the same padding is used in each layer of the network. activation: An activation op. activate_final: Boolean determining if the activation and batch normalization, if turned on, are applied to the final layer. initializers: Optional dict containing ops to initialize the filters of the whole network (with key \'w\') or biases (with key \'b\'). partitioners: Optional dict containing partitioners to partition weights (with key \'w\') or biases (with key \'b\'). As a default, no partitioners are used. regularizers: Optional dict containing regularizers for the filters of the whole network (with key \'w\') or biases (with key \'b\'). As a default, no regularizers are used. A regularizer should be a function that takes a single `Tensor` as an input and returns a scalar `Tensor` output, e.g. the L1 and L2 regularizers in `tf.contrib.layers`. use_batch_norm: Boolean determining if batch normalization is applied after convolution. use_bias: Boolean or iterable of booleans determining whether to include bias parameters in the convolutional layers. Default `True`. batch_norm_config: Optional mapping of additional configuration for the `snt.BatchNorm` modules. name: Name of the module. Raises: TypeError: If `output_channels` is not iterable; or if `kernel_shapes` is not iterable; or `strides` is not iterable; or `paddings` is not iterable; or if `activation` is not callable; or `batch_norm_config` is not a mappable (e.g. `dict`). ValueError: If `output_channels` is empty; or if `kernel_shapes` has not length 1 or `len(output_channels)`; or if `strides` has not length 1 or `len(output_channels)`; or if `paddings` has not length 1 or `len(output_channels)`. KeyError: If `initializers`, `partitioners` or `regularizers` contain any keys other than \'w\' or \'b\'. TypeError: If any of the given initializers, partitioners or regularizers are not callable.'
def __init__(self, output_channels, kernel_shapes, strides, paddings, activation=tf.nn.relu, activate_final=False, initializers=None, partitioners=None, regularizers=None, use_batch_norm=False, use_bias=True, batch_norm_config=None, name='conv_net_2d'):
if (not isinstance(output_channels, collections.Iterable)): raise TypeError('output_channels must be iterable') output_channels = tuple(output_channels) if (not isinstance(kernel_shapes, collections.Iterable)): raise TypeError('kernel_shapes must be iterable') kernel_shapes = tuple(kernel_shapes) if (not isinstance(strides, collections.Iterable)): raise TypeError('strides must be iterable') strides = tuple(strides) if (not isinstance(paddings, collections.Iterable)): raise TypeError('paddings must be iterable') paddings = tuple(paddings) super(ConvNet2D, self).__init__(name=name) if (not output_channels): raise ValueError('output_channels must not be empty') self._output_channels = tuple(output_channels) self._num_layers = len(self._output_channels) self._input_shape = None self._initializers = util.check_initializers(initializers, self.POSSIBLE_INITIALIZER_KEYS) self._partitioners = util.check_partitioners(partitioners, self.POSSIBLE_INITIALIZER_KEYS) self._regularizers = util.check_regularizers(regularizers, self.POSSIBLE_INITIALIZER_KEYS) if (not callable(activation)): raise TypeError("Input 'activation' must be callable") self._activation = activation self._activate_final = activate_final self._kernel_shapes = _replicate_elements(kernel_shapes, self._num_layers) if (len(self._kernel_shapes) != self._num_layers): raise ValueError('kernel_shapes must be of length 1 or len(output_channels)') self._strides = _replicate_elements(strides, self._num_layers) if (len(self._strides) != self._num_layers): raise ValueError('strides must be of length 1 or len(output_channels)') self._paddings = _replicate_elements(paddings, self._num_layers) if (len(self._paddings) != self._num_layers): raise ValueError('paddings must be of length 1 or len(output_channels)') self._use_batch_norm = use_batch_norm if (batch_norm_config is not None): if (not isinstance(batch_norm_config, collections.Mapping)): raise TypeError('`batch_norm_config` must be a mapping, e.g. `dict`.') self._batch_norm_config = batch_norm_config else: self._batch_norm_config = {} if isinstance(use_bias, bool): use_bias = (use_bias,) else: if (not isinstance(use_bias, collections.Iterable)): raise TypeError('use_bias must be either a bool or an iterable') use_bias = tuple(use_bias) self._use_bias = _replicate_elements(use_bias, self._num_layers) self._instantiate_layers()
'Instantiates all the convolutional modules used in the network.'
def _instantiate_layers(self):
with self._enter_variable_scope(): self._layers = tuple((conv.Conv2D(name='conv_2d_{}'.format(i), output_channels=self._output_channels[i], kernel_shape=self._kernel_shapes[i], stride=self._strides[i], padding=self._paddings[i], use_bias=self._use_bias[i], initializers=self._initializers, partitioners=self._partitioners, regularizers=self._regularizers) for i in xrange(self._num_layers)))
'Assembles the `ConvNet2D` and connects it to the graph. Args: inputs: A 4D Tensor of shape `[batch_size, input_height, input_width, input_channels]`. is_training: Boolean to indicate to `snt.BatchNorm` if we are currently training. Must be specified explicitly if `use_batchnorm` is `True`. test_local_stats: Boolean to indicate to `snt.BatchNorm` if batch normalization should use local batch statistics at test time. By default `True`. Returns: A 4D Tensor of shape `[batch_size, output_height, output_width, output_channels[-1]]`. Raises: ValueError: If `is_training` is not explicitly specified when using batch normalization.'
def _build(self, inputs, is_training=None, test_local_stats=True):
if (self._use_batch_norm and (is_training is None)): raise ValueError('Boolean is_training flag must be explicitly specified when using batch normalization.') self._input_shape = tuple(inputs.get_shape().as_list()) net = inputs final_index = (len(self._layers) - 1) for (i, layer) in enumerate(self._layers): net = layer(net) if ((i != final_index) or self._activate_final): if self._use_batch_norm: bn = batch_norm.BatchNorm(name='batch_norm_{}'.format(i), **self._batch_norm_config) net = bn(net, is_training=is_training, test_local_stats=test_local_stats) net = self._activation(net) return net
'Returns a tuple containing the convolutional layers of the network.'
@property def layers(self):
return self._layers
'Returns shape of input `Tensor` passed at last call to `build`.'
@property def input_shape(self):
self._ensure_is_connected() return self._input_shape
'Returns transposed version of this network. Args: transpose_constructor: A method that creates an instance of the transposed network type. The method must accept the same kwargs as this methods with the exception of the `transpose_constructor` argument. name: Optional string specifying the name of the transposed module. The default name is constructed by appending "_transpose" to `self.module_name`. output_channels: Optional iterable of numbers of output channels. kernel_shapes: Optional iterable of kernel sizes. The default value is constructed by reversing `self.kernel_shapes`. strides: Optional iterable of kernel strides. The default value is constructed by reversing `self.strides`. paddings: Optional iterable of padding options, either `snt.SAME` or `snt.VALID`; The default value is constructed by reversing `self.paddings`. activation: Optional activation op. Default value is `self.activation`. activate_final: Optional boolean determining if the activation and batch normalization, if turned on, are applied to the final layer. initializers: Optional dict containing ops to initialize the filters of the whole network (with key \'w\') or biases (with key \'b\'). The default value is `self.initializers`. partitioners: Optional dict containing partitioners to partition weights (with key \'w\') or biases (with key \'b\'). The default value is `self.partitioners`. regularizers: Optional dict containing regularizers for the filters of the whole network (with key \'w\') or biases (with key \'b\'). The default is `self.regularizers`. use_batch_norm: Optional boolean determining if batch normalization is applied after convolution. The default value is `self.use_batch_norm`. use_bias: Optional boolean or iterable of booleans determining whether to include bias parameters in the convolutional layers. Default is constructed by reversing `self.use_bias`. batch_norm_config: Optional mapping of additional configuration for the `snt.BatchNorm` modules. Default is `self.batch_norm_config`. Returns: Matching transposed module. Raises: ValueError: If output_channels is specified and its length does not match the number of layers.'
def _transpose(self, transpose_constructor, name=None, output_channels=None, kernel_shapes=None, strides=None, paddings=None, activation=None, activate_final=None, initializers=None, partitioners=None, regularizers=None, use_batch_norm=None, use_bias=None, batch_norm_config=None):
if (output_channels is None): output_channels = [] for layer in reversed(self._layers): output_channels.append((lambda l=layer: l.input_shape[(-1)])) elif (len(output_channels) != len(self._layers)): raise ValueError('Iterable output_channels length must match the number of layers ({}), but is {} instead.'.format(len(self._layers), len(output_channels))) if (kernel_shapes is None): kernel_shapes = reversed(self.kernel_shapes) if (strides is None): strides = reversed(self.strides) if (paddings is None): paddings = reversed(self.paddings) if (activation is None): activation = self.activation if (activate_final is None): activate_final = self.activate_final if (initializers is None): initializers = self.initializers if (partitioners is None): partitioners = self.partitioners if (regularizers is None): regularizers = self.regularizers if (use_batch_norm is None): use_batch_norm = self.use_batch_norm if (use_bias is None): use_bias = reversed(self.use_bias) if (batch_norm_config is None): batch_norm_config = self.batch_norm_config if (name is None): name = (self.module_name + '_transpose') return transpose_constructor(output_channels=output_channels, kernel_shapes=kernel_shapes, strides=strides, paddings=paddings, activation=activation, activate_final=activate_final, initializers=initializers, partitioners=partitioners, regularizers=regularizers, use_batch_norm=use_batch_norm, use_bias=use_bias, batch_norm_config=batch_norm_config, name=name)
'Returns transposed version of this network. Args: name: Optional string specifying the name of the transposed module. The default name is constructed by appending "_transpose" to `self.module_name`. output_channels: Optional iterable of numbers of output channels. kernel_shapes: Optional iterable of kernel sizes. The default value is constructed by reversing `self.kernel_shapes`. strides: Optional iterable of kernel strides. The default value is constructed by reversing `self.strides`. paddings: Optional iterable of padding options, either `snt.SAME` or `snt.VALID`; The default value is constructed by reversing `self.paddings`. activation: Optional activation op. Default value is `self.activation`. activate_final: Optional boolean determining if the activation and batch normalization, if turned on, are applied to the final layer. initializers: Optional dict containing ops to initialize the filters of the whole network (with key \'w\') or biases (with key \'b\'). The default value is `self.initializers`. partitioners: Optional dict containing partitioners to partition weights (with key \'w\') or biases (with key \'b\'). The default value is `self.partitioners`. regularizers: Optional dict containing regularizers for the filters of the whole network (with key \'w\') or biases (with key \'b\'). The default is `self.regularizers`. use_batch_norm: Optional boolean determining if batch normalization is applied after convolution. The default value is `self.use_batch_norm`. use_bias: Optional boolean or iterable of booleans determining whether to include bias parameters in the convolutional layers. Default is constructed by reversing `self.use_bias`. batch_norm_config: Optional mapping of additional configuration for the `snt.BatchNorm` modules. Default is `self.batch_norm_config`. Returns: Matching `ConvNet2DTranspose` module. Raises: ValueError: If output_channels is specified and its length does not match the number of layers.'
def transpose(self, name=None, output_channels=None, kernel_shapes=None, strides=None, paddings=None, activation=None, activate_final=None, initializers=None, partitioners=None, regularizers=None, use_batch_norm=None, use_bias=None, batch_norm_config=None):
output_shapes = [] for layer in reversed(self._layers): output_shapes.append((lambda l=layer: l.input_shape[1:(-1)])) transpose_constructor = functools.partial(ConvNet2DTranspose, output_shapes=output_shapes) return self._transpose(transpose_constructor=transpose_constructor, name=name, output_channels=output_channels, kernel_shapes=kernel_shapes, strides=strides, paddings=paddings, activation=activation, activate_final=activate_final, initializers=initializers, partitioners=partitioners, regularizers=regularizers, use_batch_norm=use_batch_norm, use_bias=use_bias, batch_norm_config=batch_norm_config)
'Constructs a `ConvNetTranspose2D` module. `output_{shapes,channels}` can be defined either as iterable of {iterables,integers} or via a callable. In the latter case, since the function invocation is deferred to graph construction time, the user must only ensure that entries can be called returning meaningful values when build is called. Each entry in the iterable defines properties in the corresponding convolutional layer. By default, neither batch normalization nor activation are applied to the output of the final layer. Args: output_channels: Iterable of numbers of output channels. output_shapes: Iterable of output shapes as defined in `conv.conv2DTranpose`; if the iterable contains one element only, the same shape is used in each layer of the network. kernel_shapes: Iterable of kernel sizes as defined in `conv.Conv2D`; if the list contains one element only, the same kernel shape is used in each layer of the network. strides: Iterable of kernel strides as defined in `conv.Conv2D`; if the list contains one element only, the same stride is used in each layer of the network. paddings: Iterable of padding options, either `snt.SAME` or `snt.VALID`; if the Iterable contains one element only, the same padding is used in each layer of the network. activation: An activation op. activate_final: Boolean determining if the activation and batch normalization, if turned on, are applied to the final layer. initializers: Optional dict containing ops to initialize the filters of the whole network (with key \'w\') or biases (with key \'b\'). partitioners: Optional dict containing partitioners to partition weights (with key \'w\') or biases (with key \'b\'). As a default, no partitioners are used. regularizers: Optional dict containing regularizers for the filters of the whole network (with key \'w\') or biases (with key \'b\'). As a default, no regularizers are used. A regularizer should be a function that takes a single `Tensor` as an input and returns a scalar `Tensor` output, e.g. the L1 and L2 regularizers in `tf.contrib.layers`. use_batch_norm: Boolean determining if batch normalization is applied after convolution. use_bias: Boolean or iterable of booleans determining whether to include bias parameters in the convolutional layers. Default `True`. batch_norm_config: Optional mapping of additional configuration for the `snt.BatchNorm` modules. name: Name of the module. Raises: TypeError: If `output_channels` is not iterable; or if `output_shapes` is not iterable; or if `kernel_shapes` is not iterable; or if `strides` is not iterable; or if `paddings` is not iterable; or if `activation` is not callable. ValueError: If `output_channels` is empty; or if `kernel_shapes` has not length 1 or `len(output_channels)`; or if `strides` has not length 1 or `len(output_channels)`; or if `paddings` has not length 1 or `len(output_channels)`. KeyError: If `initializers`, `partitioners` or `regularizers` contain any keys other than \'w\' or \'b\'. TypeError: If any of the given initializers, partitioners or regularizers are not callable.'
def __init__(self, output_channels, output_shapes, kernel_shapes, strides, paddings, activation=tf.nn.relu, activate_final=False, initializers=None, partitioners=None, regularizers=None, use_batch_norm=False, use_bias=True, batch_norm_config=None, name='conv_net_2d_transpose'):
if (not isinstance(output_channels, collections.Iterable)): raise TypeError('output_channels must be iterable') output_channels = tuple(output_channels) num_layers = len(output_channels) if (not isinstance(output_shapes, collections.Iterable)): raise TypeError('output_shapes must be iterable') output_shapes = tuple(output_shapes) self._output_shapes = _replicate_elements(output_shapes, num_layers) if (len(self._output_shapes) != num_layers): raise ValueError('output_shapes must be of length 1 or len(output_channels)') super(ConvNet2DTranspose, self).__init__(output_channels, kernel_shapes, strides, paddings, activation=activation, activate_final=activate_final, initializers=initializers, partitioners=partitioners, regularizers=regularizers, use_batch_norm=use_batch_norm, use_bias=use_bias, batch_norm_config=batch_norm_config, name=name)
'Instantiates all the convolutional modules used in the network.'
def _instantiate_layers(self):
with self._enter_variable_scope(): self._layers = tuple((conv.Conv2DTranspose(name='conv_2d_transpose_{}'.format(i), output_channels=self._output_channels[i], output_shape=self._output_shapes[i], kernel_shape=self._kernel_shapes[i], stride=self._strides[i], padding=self._paddings[i], initializers=self._initializers, partitioners=self._partitioners, regularizers=self._regularizers, use_bias=self._use_bias[i]) for i in xrange(self._num_layers)))
'Returns transposed version of this network. Args: name: Optional string specifying the name of the transposed module. The default name is constructed by appending "_transpose" to `self.module_name`. output_channels: Optional iterable of numbers of output channels. kernel_shapes: Optional iterable of kernel sizes. The default value is constructed by reversing `self.kernel_shapes`. strides: Optional iterable of kernel strides. The default value is constructed by reversing `self.strides`. paddings: Optional iterable of padding options, either `snt.SAME` or `snt.VALID`; The default value is constructed by reversing `self.paddings`. activation: Optional activation op. Default value is `self.activation`. activate_final: Optional boolean determining if the activation and batch normalization, if turned on, are applied to the final layer. initializers: Optional dict containing ops to initialize the filters of the whole network (with key \'w\') or biases (with key \'b\'). The default value is `self.initializers`. partitioners: Optional dict containing partitioners to partition weights (with key \'w\') or biases (with key \'b\'). The default value is `self.partitioners`. regularizers: Optional dict containing regularizers for the filters of the whole network (with key \'w\') or biases (with key \'b\'). The default is `self.regularizers`. use_batch_norm: Optional boolean determining if batch normalization is applied after convolution. The default value is `self.use_batch_norm`. use_bias: Optional boolean or iterable of booleans determining whether to include bias parameters in the convolutional layers. Default is constructed by reversing `self.use_bias`. batch_norm_config: Optional mapping of additional configuration for the `snt.BatchNorm` modules. Default is `self.batch_norm_config`. Returns: Matching `ConvNet2D` module. Raises: ValueError: If output_channels is specified and its length does not match the number of layers.'
def transpose(self, name=None, output_channels=None, kernel_shapes=None, strides=None, paddings=None, activation=None, activate_final=None, initializers=None, partitioners=None, regularizers=None, use_batch_norm=None, use_bias=None, batch_norm_config=None):
return self._transpose(transpose_constructor=ConvNet2D, name=name, output_channels=output_channels, kernel_shapes=kernel_shapes, strides=strides, paddings=paddings, activation=activation, activate_final=activate_final, initializers=initializers, partitioners=partitioners, regularizers=regularizers, use_batch_norm=use_batch_norm, use_bias=use_bias, batch_norm_config=batch_norm_config)
'Constructs AlexNet. Args: mode: Construction mode of network: `AlexNet.FULL`, `AlexNet.HALF` or `AlexNet.MINI`. use_batch_norm: Whether to use batch normalization between the output of a layer and the activation function. batch_norm_config: Optional mapping of additional configuration for the `snt.BatchNorm` modules. initializers: Optional dict containing ops to initialize the filters (with key \'w\') or biases (with key \'b\'). The default initializers are truncated normal initializers, which are commonly used when the inputs are zero centered (see https://arxiv.org/pdf/1502.03167v3.pdf). partitioners: Optional dict containing partitioners for the filters (with key \'w\') and the biases (with key \'b\'). As a default, no partitioners are used. regularizers: Optional dict containing regularizers for the filters (with key \'w\') and the biases (with key \'b\'). As a default, no regularizers are used. A regularizer should be a function that takes a single `Tensor` as an input and returns a scalar `Tensor` output, e.g. the L1 and L2 regularizers in `tf.contrib.layers`. name: Name of the module. Raises: base.Error: If the given `mode` is not one of `AlexNet.FULL`, `AlexNet.HALF` or `AlexNet.MINI`. TypeError: If `batch_norm_config` is not a mapping, e.g. `dict`. KeyError: If `initializers` contains any keys other than \'w\' or \'b\'. KeyError: If `partitioners` contains any keys other than \'w\' or \'b\'. KeyError: If `regularizers` contains any keys other than \'w\' or \'b\'.'
def __init__(self, mode=HALF, use_batch_norm=False, batch_norm_config=None, initializers=None, partitioners=None, regularizers=None, name='alex_net'):
super(AlexNet, self).__init__(name=name) self._mode = mode self._use_batch_norm = use_batch_norm if (batch_norm_config is not None): if (not isinstance(batch_norm_config, collections.Mapping)): raise TypeError('`batch_norm_config` must be a mapping, e.g. `dict`.') self._batch_norm_config = batch_norm_config else: self._batch_norm_config = {} if (self._mode == self.HALF): self._conv_layers = [(48, (11, 4), (3, 2)), (128, (5, 1), (3, 2)), (192, (3, 1), None), (192, (3, 1), None), (128, (3, 1), (3, 2))] self._fc_layers = [2048, 2048] elif (self._mode == self.FULL): self._conv_layers = [(96, (11, 4), (3, 2)), (256, (5, 1), (3, 2)), (384, (3, 1), None), (384, (3, 1), None), (256, (3, 1), (3, 2))] self._fc_layers = [4096, 4096] elif (self._mode == self.MINI): self._conv_layers = [(48, (3, 1), (3, 1)), (128, (3, 1), (3, 1)), (192, (3, 1), None), (192, (3, 1), None), (128, (3, 1), (3, 1))] self._fc_layers = [1024, 1024] else: raise base.Error("AlexNet construction mode '{}' not recognised, must be one of: '{}', '{}', '{}'".format(mode, self.HALF, self.FULL, self.MINI)) self._min_size = self._calc_min_size(self._conv_layers) self._conv_modules = [] self._linear_modules = [] self.possible_keys = self.POSSIBLE_INITIALIZER_KEYS self._initializers = util.check_initializers(initializers, self.POSSIBLE_INITIALIZER_KEYS) self._partitioners = util.check_partitioners(partitioners, self.POSSIBLE_INITIALIZER_KEYS) self._regularizers = util.check_regularizers(regularizers, self.POSSIBLE_INITIALIZER_KEYS)
'Calculates the minimum size of the input layer. Given a set of convolutional layers, calculate the minimum value of the `input_height` and `input_width`, i.e. such that the output has size 1x1. Assumes snt.VALID padding. Args: conv_layers: List of tuples `(output_channels, (kernel_size, stride), (pooling_size, pooling_stride))` Returns: Minimum value of input height and width.'
def _calc_min_size(self, conv_layers):
input_size = 1 for (_, conv_params, max_pooling) in reversed(conv_layers): if (max_pooling is not None): (kernel_size, stride) = max_pooling input_size = ((input_size * stride) + (kernel_size - stride)) if (conv_params is not None): (kernel_size, stride) = conv_params input_size = ((input_size * stride) + (kernel_size - stride)) return input_size
'Connects the AlexNet module into the graph. The is_training flag only controls the batch norm settings, if `False` it does not force no dropout by overriding any input `keep_prob`. To avoid any confusion this may cause, if `is_training=False` and `keep_prob` would cause dropout to be applied, an error is thrown. Args: inputs: A Tensor of size [batch_size, input_height, input_width, input_channels], representing a batch of input images. keep_prob: A scalar Tensor representing the dropout keep probability. When `is_training=False` this must be None or 1 to give no dropout. is_training: Boolean to indicate if we are currently training. Must be specified if batch normalization or dropout is used. test_local_stats: Boolean to indicate to `snt.BatchNorm` if batch normalization should use local batch statistics at test time. By default `True`. Returns: A Tensor of size [batch_size, output_size], where `output_size` depends on the mode the network was constructed in. Raises: base.IncompatibleShapeError: If any of the input image dimensions (input_height, input_width) are too small for the given network mode. ValueError: If `keep_prob` is not None or 1 when `is_training=False`. ValueError: If `is_training` is not explicitly specified when using batch normalization.'
def _build(self, inputs, keep_prob=None, is_training=None, test_local_stats=True):
if ((self._use_batch_norm or (keep_prob is not None)) and (is_training is None)): raise ValueError('Boolean is_training flag must be explicitly specified when using batch normalization or dropout.') input_shape = inputs.get_shape().as_list() if ((input_shape[1] < self._min_size) or (input_shape[2] < self._min_size)): raise base.IncompatibleShapeError('Image shape too small: ({:d}, {:d}) < {:d}'.format(input_shape[1], input_shape[2], self._min_size)) net = inputs if (keep_prob is not None): valid_inputs = tf.logical_or(is_training, tf.equal(keep_prob, 1.0)) keep_prob_check = tf.assert_equal(valid_inputs, True, message='Input `keep_prob` must be None or 1 if `is_training=False`.') with tf.control_dependencies([keep_prob_check]): net = tf.identity(net) for (i, params) in enumerate(self._conv_layers): (output_channels, conv_params, max_pooling) = params (kernel_size, stride) = conv_params conv_mod = conv.Conv2D(name='conv_{}'.format(i), output_channels=output_channels, kernel_shape=kernel_size, stride=stride, padding=conv.VALID, initializers=self._initializers, partitioners=self._partitioners, regularizers=self._regularizers) if (not self.is_connected): self._conv_modules.append(conv_mod) net = conv_mod(net) if self._use_batch_norm: bn = batch_norm.BatchNorm(**self._batch_norm_config) net = bn(net, is_training, test_local_stats) net = tf.nn.relu(net) if (max_pooling is not None): (pooling_kernel_size, pooling_stride) = max_pooling net = tf.nn.max_pool(net, ksize=[1, pooling_kernel_size, pooling_kernel_size, 1], strides=[1, pooling_stride, pooling_stride, 1], padding=conv.VALID) net = basic.BatchFlatten(name='flatten')(net) for (i, output_size) in enumerate(self._fc_layers): linear_mod = basic.Linear(name='fc_{}'.format(i), output_size=output_size, partitioners=self._partitioners) if (not self.is_connected): self._linear_modules.append(linear_mod) net = linear_mod(net) if self._use_batch_norm: bn = batch_norm.BatchNorm(**self._batch_norm_config) net = bn(net, is_training, test_local_stats) net = tf.nn.relu(net) if (keep_prob is not None): net = tf.nn.dropout(net, keep_prob=keep_prob) return net
'Returns integer specifying the minimum width and height for the input. Note that the input can be non-square, but both the width and height must be >= this number in size. Returns: The minimum size as an integer.'
@property def min_input_size(self):
return self._min_size
'Returns list containing convolutional modules of network. Returns: A list containing the Conv2D modules.'
@property def conv_modules(self):
self._ensure_is_connected() return self._conv_modules
'Returns list containing linear modules of network. Returns: A list containing the Linear modules.'
@property def linear_modules(self):
self._ensure_is_connected() return self._linear_modules
'Creates a dilation module. Args: num_output_classes: Int. Number of output classes to predict for each pixel in an image. initializers: Optional dict containing ops to initialize filters (with key \'w\') or biases (with key \'b\'). The default initializer makes this module equivalent to the identity. regularizers: Optional dict containing regularizers for the weights (with key \'w\') or biases (with key \'b\'). As a default, no regularizers are used. A regularizer should be a function that takes a single `Tensor` as an input and returns a scalar `Tensor` output, e.g. the L1 and L2 regularizers in `tf.contrib.layers`. model_size: string. One of \'basic\' or \'large\'. name: string. Name of module.'
def __init__(self, num_output_classes, initializers=None, regularizers=None, model_size='basic', name='dilation'):
super(Dilation, self).__init__(name=name) self._num_output_classes = num_output_classes self._model_size = model_size self._initializers = util.check_initializers(initializers, self.POSSIBLE_INITIALIZER_KEYS) self._regularizers = util.check_regularizers(regularizers, self.POSSIBLE_INITIALIZER_KEYS)
'Build dilation module. Args: images: Tensor of shape [batch_size, height, width, depth] and dtype float32. Represents a set of images with an arbitrary depth. Note that when using the default initializer, depth must equal num_output_classes. Returns: Tensor of shape [batch_size, height, width, num_output_classes] and dtype float32. Represents, for each image and pixel, logits for per-class predictions. Raises: IncompatibleShapeError: If images is not rank 4. ValueError: If model_size is not one of \'basic\' or \'large\'.'
def _build(self, images):
num_classes = self._num_output_classes if (len(images.get_shape()) != 4): raise base.IncompatibleShapeError("'images' must have shape [batch_size, height, width, depth].") if (self.WEIGHTS not in self._initializers): if (self._model_size == self.BASIC): self._initializers[self.WEIGHTS] = identity_kernel_initializer elif (self._model_size == self.LARGE): self._initializers[self.WEIGHTS] = noisy_identity_kernel_initializer(num_classes) else: raise ValueError(('Unrecognized model_size: %s' % self._model_size)) if (self.BIASES not in self._initializers): self._initializers[self.BIASES] = tf.zeros_initializer() if (self._model_size == self.BASIC): self._conv_modules = [self._dilated_conv_layer(num_classes, 1, True, 'conv1'), self._dilated_conv_layer(num_classes, 1, True, 'conv2'), self._dilated_conv_layer(num_classes, 2, True, 'conv3'), self._dilated_conv_layer(num_classes, 4, True, 'conv4'), self._dilated_conv_layer(num_classes, 8, True, 'conv5'), self._dilated_conv_layer(num_classes, 16, True, 'conv6'), self._dilated_conv_layer(num_classes, 1, True, 'conv7'), self._dilated_conv_layer(num_classes, 1, False, 'conv8')] elif (self._model_size == self.LARGE): self._conv_modules = [self._dilated_conv_layer((2 * num_classes), 1, True, 'conv1'), self._dilated_conv_layer((2 * num_classes), 1, True, 'conv2'), self._dilated_conv_layer((4 * num_classes), 2, True, 'conv3'), self._dilated_conv_layer((8 * num_classes), 4, True, 'conv4'), self._dilated_conv_layer((16 * num_classes), 8, True, 'conv5'), self._dilated_conv_layer((32 * num_classes), 16, True, 'conv6'), self._dilated_conv_layer((32 * num_classes), 1, True, 'conv7'), self._dilated_conv_layer(num_classes, 1, False, 'conv8')] else: raise ValueError(('Unrecognized model_size: %s' % self._model_size)) dilation_mod = sequential.Sequential(self._conv_modules, name='dilation') return dilation_mod(images)
'Create a dilated convolution layer. Args: output_channels: int. Number of output channels for each pixel. dilation_rate: int. Represents how many pixels each stride offset will move. A value of 1 indicates a standard convolution. apply_relu: bool. If True, a ReLU non-linearlity is added. name: string. Name for layer. Returns: a sonnet Module for a dilated convolution.'
def _dilated_conv_layer(self, output_channels, dilation_rate, apply_relu, name):
layer_components = [conv.Conv2D(output_channels, [3, 3], initializers=self._initializers, regularizers=self._regularizers, rate=dilation_rate, name=('dilated_conv_' + name))] if apply_relu: layer_components.append((lambda net: tf.nn.relu(net, name=('relu_' + name)))) return sequential.Sequential(layer_components, name=name)
'Tests for regressions in variable names.'
def testVariableMap(self):
use_bias = True var_names_w = [u'mlp/linear_0/w:0', u'mlp/linear_1/w:0', u'mlp/linear_2/w:0'] var_names_b = [u'mlp/linear_0/b:0', u'mlp/linear_1/b:0', u'mlp/linear_2/b:0'] correct_variable_names = set((var_names_w + var_names_b)) mlp = snt.nets.MLP(name=self.module_name, output_sizes=self.output_sizes, activate_final=False, use_bias=use_bias) input_shape = [10, 100] input_to_net = tf.placeholder(tf.float32, shape=input_shape) _ = mlp(input_to_net) variable_names = [var.name for var in mlp.get_variables()] self.assertEqual(set(variable_names), set(correct_variable_names))
'Constructs an MLP module. Args: output_sizes: An iterable of output dimensionalities as defined in `basic.Linear`. Output size can be defined either as number or via a callable. In the latter case, since the function invocation is deferred to graph construction time, the user must only ensure that entries can be called when build is called. Each entry in the iterable defines properties in the corresponding linear layer. activation: An activation op. The activation is applied to intermediate layers, and optionally to the output of the final layer. activate_final: Boolean determining if the activation is applied to the output of the final layer. Default `False`. initializers: Optional dict containing ops to initialize the linear layers\' weights (with key \'w\') or biases (with key \'b\'). partitioners: Optional dict containing partitioners to partition the linear layers\' weights (with key \'w\') or biases (with key \'b\'). regularizers: Optional dict containing regularizers for the linear layers\' weights (with key \'w\') and the biases (with key \'b\'). As a default, no regularizers are used. A regularizer should be a function that takes a single `Tensor` as an input and returns a scalar `Tensor` output, e.g. the L1 and L2 regularizers in `tf.contrib.layers`. use_bias: Whether to include bias parameters in the linear layers. Default `True`. custom_getter: Callable or dictionary of callables to use as custom getters inside the module. If a dictionary, the keys correspond to regexes to match variable names. See the `tf.get_variable` documentation for information about the custom_getter API. name: Name of the module. Raises: KeyError: If initializers contains any keys other than \'w\' or \'b\'. KeyError: If regularizers contains any keys other than \'w\' or \'b\'. ValueError: If output_sizes is empty. TypeError: If `activation` is not callable; or if `output_sizes` is not iterable.'
def __init__(self, output_sizes, activation=tf.nn.relu, activate_final=False, initializers=None, partitioners=None, regularizers=None, use_bias=True, custom_getter=None, name='mlp'):
super(MLP, self).__init__(custom_getter=custom_getter, name=name) if (not isinstance(output_sizes, collections.Iterable)): raise TypeError('output_sizes must be iterable') output_sizes = tuple(output_sizes) if (not output_sizes): raise ValueError('output_sizes must not be empty') self._output_sizes = output_sizes self._num_layers = len(self._output_sizes) self._input_shape = None self.possible_keys = self.get_possible_initializer_keys(use_bias=use_bias) self._initializers = util.check_initializers(initializers, self.possible_keys) self._partitioners = util.check_partitioners(partitioners, self.possible_keys) self._regularizers = util.check_regularizers(regularizers, self.possible_keys) if (not callable(activation)): raise TypeError("Input 'activation' must be callable") self._activation = activation self._activate_final = activate_final self._use_bias = use_bias self._instantiate_layers()
'Instantiates all the linear modules used in the network. Layers are instantiated in the constructor, as opposed to the build function, because MLP implements the Transposable interface, and the transpose function can be called before the module is actually connected to the graph and build is called. Notice that this is safe since layers in the transposed module are instantiated using a lambda returning input_size of the mlp layers, and this doesn\'t have to return sensible values until the original module is connected to the graph.'
def _instantiate_layers(self):
with self._enter_variable_scope(): self._layers = [basic.Linear(self._output_sizes[i], name='linear_{}'.format(i), initializers=self._initializers, partitioners=self._partitioners, regularizers=self._regularizers, use_bias=self.use_bias) for i in xrange(self._num_layers)]
'Assembles the `MLP` and connects it to the graph. Args: inputs: A 2D Tensor of size `[batch_size, input_size]`. Returns: A 2D Tensor of size `[batch_size, output_sizes[-1]]`.'
def _build(self, inputs):
self._input_shape = tuple(inputs.get_shape().as_list()) net = inputs final_index = (self._num_layers - 1) for layer_id in xrange(self._num_layers): net = self._layers[layer_id](net) if ((final_index != layer_id) or self._activate_final): net = self._activation(net) return net
'Returns a tuple containing the linear layers of the `MLP`.'
@property def layers(self):
return self._layers
'Returns a tuple of all output sizes of all the layers.'
@property def output_sizes(self):
return tuple([(l() if callable(l) else l) for l in self._output_sizes])
'Returns the size of the module output, not including the batch dimension. This allows the MLP to be used inside a DeepRNN. Returns: The scalar size of the module output.'
@property def output_size(self):
last_size = self._output_sizes[(-1)] return (last_size() if callable(last_size) else last_size)
'Returns the intializers dictionary.'
@property def initializers(self):
return self._initializers
'Returns the partitioners dictionary.'
@property def partitioners(self):
return self._partitioners
'Returns the regularizers dictionary.'
@property def regularizers(self):
return self._regularizers
'Returns shape of input `Tensor` passed at last call to `build`.'
@property def input_shape(self):
self._ensure_is_connected() return self._input_shape
'Returns transposed `MLP`. Args: name: Optional string specifying the name of the transposed module. The default name is constructed by appending "_transpose" to `self.module_name`. activate_final: Optional boolean determining if the activation and batch normalization, if turned on, are applied to the final layer. Returns: Matching transposed `MLP` module.'
def transpose(self, name=None, activate_final=None):
if (name is None): name = (self.module_name + '_transpose') if (activate_final is None): activate_final = self.activate_final output_sizes = [(lambda l=layer: l.input_shape[1]) for layer in self._layers] output_sizes.reverse() return MLP(name=name, output_sizes=output_sizes, activation=self.activation, activate_final=activate_final, initializers=self.initializers, partitioners=self.partitioners, regularizers=self.regularizers, use_bias=self.use_bias)
'Tests if .transpose correctly chooses the default parameters. Args: module: The conv net class. param_name: The name of the parameter to test.'
@parameterized.Parameters(*itertools.product([snt.nets.ConvNet2D, partial(snt.nets.ConvNet2DTranspose, output_shapes=[[100, 100]])], ['kernel_shapes', 'strides', 'paddings', 'activation', 'initializers', 'partitioners', 'regularizers', 'use_bias', 'batch_norm_config'])) def testTransposeDefaultParameter(self, module, param_name):
expected_reversed = ['kernel_shapes', 'strides', 'paddings', 'use_bias'] model = module(output_channels=[2, 3, 4], kernel_shapes=[[3, 3], [5, 5], [7, 7]], strides=[[1, 1], [2, 2], [3, 3]], paddings=[snt.SAME, snt.SAME, snt.VALID], use_batch_norm=[True, True, False], use_bias=[True, True, False]) transpose_model = model.transpose() if (param_name in expected_reversed): self.assertItemsEqual(reversed(getattr(model, param_name)), getattr(transpose_model, param_name)) else: self.assertEqual(getattr(model, param_name), getattr(transpose_model, param_name))
'Tests if .transpose correctly passes through the given parameters. Args: module: The conv net class. param_name_and_value: Tuple consisting of the parameter name and value.'
@parameterized.Parameters(*itertools.product([snt.nets.ConvNet2D, partial(snt.nets.ConvNet2DTranspose, output_shapes=[[100, 100]])], [('kernel_shapes', [[3, 3], [3, 3], [3, 3]]), ('strides', [[1, 1], [1, 1], [1, 1]]), ('paddings', [snt.SAME, snt.SAME, snt.SAME]), ('activation', tf.nn.tanh), ('initializers', {}), ('partitioners', {}), ('regularizers', {}), ('use_bias', [True, True, True]), ('batch_norm_config', {'scale': True})])) def testTransposePassThroughParameter(self, module, param_name_and_value):
(param_name, param_value) = param_name_and_value model = module(output_channels=[2, 3, 4], kernel_shapes=[[3, 3], [5, 5], [7, 7]], strides=[[1, 1], [2, 2], [3, 3]], paddings=[snt.SAME, snt.SAME, snt.VALID], use_batch_norm=[True, True, False], use_bias=[True, True, False]) transpose_model = model.transpose(**{param_name: param_value}) if isinstance(param_value, collections.Iterable): self.assertItemsEqual(param_value, getattr(transpose_model, param_name)) else: self.assertEqual(param_value, getattr(transpose_model, param_name))
'Tests for regressions in variable names.'
def testVariableMap(self):
use_bias = True use_batch_norm = True var_names_w = [u'conv_net_2d/conv_2d_0/w:0', u'conv_net_2d/conv_2d_1/w:0', u'conv_net_2d/conv_2d_2/w:0'] var_names_b = [u'conv_net_2d/conv_2d_0/b:0', u'conv_net_2d/conv_2d_1/b:0', u'conv_net_2d/conv_2d_2/b:0'] var_names_bn = [u'conv_net_2d/batch_norm_0/beta:0', u'conv_net_2d/batch_norm_1/beta:0'] correct_variable_names = set(((var_names_w + var_names_b) + var_names_bn)) module = snt.nets.ConvNet2D(output_channels=self.output_channels, kernel_shapes=self.kernel_shapes, strides=self.strides, paddings=self.paddings, use_bias=use_bias, use_batch_norm=use_batch_norm) input_shape = [10, 100, 100, 3] input_to_net = tf.placeholder(tf.float32, shape=input_shape) _ = module(input_to_net, is_training=True) variable_names = [var.name for var in module.get_variables()] self.assertEqual(set(variable_names), correct_variable_names)
'Returns concrete test functions for a test and a list of parameters. The naming_type is used to determine the name of the concrete functions as reported by the unittest framework. If naming_type is _FIRST_ARG, the testcases must be tuples, and the first element must have a string representation that is a valid Python identifier. Args: test_method: The decorated test method. testcases: (list of tuple/dict) A list of parameter tuples/dicts for individual test invocations. naming_type: The test naming type, either _NAMED or _ARGUMENT_REPR.'
def __init__(self, test_method, testcases, naming_type):
self._test_method = test_method self.testcases = testcases self._naming_type = naming_type self.__name__ = _ParameterizedTestIter.__name__
'Returns the descriptive ID of the test. This is used internally by the unittesting framework to get a name for the test to be used in reports. Returns: The test id.'
def id(self):
return ('%s.%s%s' % (_StrClass(self.__class__), self._OriginalName(), self._id_suffix.get(self._testMethodName, '')))
'Creates a TokenDataSource instance. Args: data_file: file object containing text data to be tokenized. vocab_data_file: file object containing text data used to initialize the vocabulary.'
def __init__(self, data_file, vocab_data_file):
def reading_function(f): return list(f.read().replace('\n', self.CHAR_EOS)) self._vocab_dict = {} self._inv_vocab_dict = {} token_list = reading_function(vocab_data_file) self.vocab_size = 0 for token in (self.DEFAULT_START_TOKENS + token_list): if (token not in self._vocab_dict): self._vocab_dict[token] = self.vocab_size self._inv_vocab_dict[self.vocab_size] = token self.vocab_size += 1 raw_data = reading_function(data_file) self.flat_data = np.array(self.tokenize(raw_data), dtype=np.int32) self.num_tokens = self.flat_data.shape[0]
'Produces the list of integer indices corresponding to a token list.'
def tokenize(self, token_list):
return [self._vocab_dict.get(token, self._vocab_dict[self.UNK]) for token in token_list]
'Produces a human-readable representation of the token list.'
def decode(self, token_list):
return ''.join([self._inv_vocab_dict[token] for token in token_list])
'Initializes a TinyShakespeare sequence data object. Args: num_steps: sequence_length. batch_size: batch size. subset: \'train\', \'valid\' or \'test\'. random: boolean indicating whether to do random sampling of sequences. Default is false (sequential sampling). dtype: type of generated tensors (both observations and targets). name: object name. Raises: ValueError: if subset is not train, valid or test.'
def __init__(self, num_steps=1, batch_size=1, subset='train', random=False, dtype=tf.float32, name='tiny_shakespeare_dataset'):
if (subset not in [self.TRAIN, self.VALID, self.TEST]): raise ValueError(('subset should be %s, %s, or %s. Received %s instead.' % (self.TRAIN, self.VALID, self.TEST, subset))) super(TinyShakespeareDataset, self).__init__(name=name) self._vocab_file = gfile.Open(os.path.join(self._RESOURCE_ROOT, 'ts.train.txt')) self._data_file = gfile.Open(os.path.join(self._RESOURCE_ROOT, 'ts.{}.txt'.format(subset))) self._num_steps = num_steps self._batch_size = batch_size self._random_sampling = random self._dtype = dtype self._data_source = TokenDataSource(data_file=self._data_file, vocab_data_file=self._vocab_file) self._vocab_size = self._data_source.vocab_size self._flat_data = self._data_source.flat_data self._n_flat_elements = self._data_source.num_tokens self._num_batches = (self._n_flat_elements // (self._num_steps * batch_size)) self._reset_head_indices() self._queue_capacity = 10
'Returns a batch of sequences. Returns: obs: np.int32 array of size [Time, Batch] target: np.int32 array of size [Time, Batch]'
def _get_batch(self):
batch_indices = np.mod(np.array([np.arange(head_index, ((head_index + self._num_steps) + 1)) for head_index in self._head_indices]), self._n_flat_elements) obs = np.array([self._flat_data[indices[:self._num_steps]] for indices in batch_indices]).T target = np.array([self._flat_data[indices[1:(self._num_steps + 1)]] for indices in batch_indices]).T if self._random_sampling: self._reset_head_indices() else: self._head_indices = np.mod((self._head_indices + self._num_steps), self._n_flat_elements) return (obs, target)
'Returns a tuple containing observation and target one-hot tensors.'
def _build(self):
q = tf.FIFOQueue(self._queue_capacity, [self._dtype, self._dtype], shapes=([[self._num_steps, self._batch_size, self._vocab_size]] * 2)) (obs, target) = tf.py_func(self._get_batch, [], [tf.int32, tf.int32]) obs = self._one_hot(obs) target = self._one_hot(target) enqueue_op = q.enqueue([obs, target]) (obs, target) = q.dequeue() tf.train.add_queue_runner(tf.train.QueueRunner(q, [enqueue_op])) return SequenceDataOpsNoMask(obs, target)
'Returns cost. Args: logits: model output. target: target. Returns: Cross-entropy loss for a sequence of logits. The loss will be averaged across time steps if time_average_cost was enabled at construction time.'
def cost(self, logits, target):
logits = tf.reshape(logits, [(self._num_steps * self._batch_size), (-1)]) target = tf.reshape(target, [(self._num_steps * self._batch_size), (-1)]) xent = tf.nn.softmax_cross_entropy_with_logits(logits=logits, labels=target) loss = tf.reduce_sum(xent) return (loss / self._batch_size)
'Returns a human-readable version of a one-hot encoding of words. Args: data: A tuple with (obs, target). `obs` is a numpy array with one-hot encoding of words. label_batch_entries: bool. Whether to add numerical label before each batch element in the output string. indices: List of int or None. Used to select a subset of minibatch indices to print. None will print the whole minibatch. sep: A char separator which separates the output for each batch. Defaults to the newline character. Returns: String with the words from `data[0]`.'
def to_human_readable(self, data, label_batch_entries=True, indices=None, sep='\n'):
obs = data[0] batch_size = obs.shape[1] result = [] indices = (xrange(batch_size) if (not indices) else indices) for b in indices: index_seq = np.argmax(obs[:, b], axis=1) prefix = ('b_{}: '.format(b) if label_batch_entries else '') result.append((prefix + self._data_source.decode(index_seq))) return sep.join(result)
'Constructs a `TextModel`. Args: num_embedding: Size of embedding representation, used directly after the one-hot encoded input. num_hidden: Number of hidden units in each LSTM layer. lstm_depth: Number of LSTM layers. output_size: Size of the output layer on top of the DeepRNN. use_dynamic_rnn: Whether to use dynamic RNN unrolling. If `False`, it uses static unrolling. Default is `True`. use_skip_connections: Whether to use skip connections in the `snt.DeepRNN`. Default is `True`. name: Name of the module.'
def __init__(self, num_embedding, num_hidden, lstm_depth, output_size, use_dynamic_rnn=True, use_skip_connections=True, name='text_model'):
super(TextModel, self).__init__(name=name) self._num_embedding = num_embedding self._num_hidden = num_hidden self._lstm_depth = lstm_depth self._output_size = output_size self._use_dynamic_rnn = use_dynamic_rnn self._use_skip_connections = use_skip_connections with self._enter_variable_scope(): self._embed_module = snt.Linear(self._num_embedding, name='linear_embed') self._output_module = snt.Linear(self._output_size, name='linear_output') self._lstms = [snt.LSTM(self._num_hidden, name='lstm_{}'.format(i)) for i in range(self._lstm_depth)] self._core = snt.DeepRNN(self._lstms, skip_connections=self._use_skip_connections, name='deep_lstm')
'Builds the deep LSTM model sub-graph. Args: one_hot_input_sequence: A Tensor with the input sequence encoded as a one-hot representation. Its dimensions should be `[truncation_length, batch_size, output_size]`. Returns: Tuple of the Tensor of output logits for the batch, with dimensions `[truncation_length, batch_size, output_size]`, and the final state of the unrolled core,.'
def _build(self, one_hot_input_sequence):
input_shape = one_hot_input_sequence.get_shape() batch_size = input_shape[1] batch_embed_module = snt.BatchApply(self._embed_module) input_sequence = batch_embed_module(one_hot_input_sequence) input_sequence = tf.nn.relu(input_sequence) initial_state = self._core.initial_state(batch_size) if self._use_dynamic_rnn: (output_sequence, final_state) = tf.nn.dynamic_rnn(cell=self._core, inputs=input_sequence, time_major=True, initial_state=initial_state) else: rnn_input_sequence = tf.unstack(input_sequence) (output, final_state) = tf.contrib.rnn.static_rnn(cell=self._core, inputs=rnn_input_sequence, initial_state=initial_state) output_sequence = tf.stack(output) batch_output_module = snt.BatchApply(self._output_module) output_sequence_logits = batch_output_module(output_sequence) return (output_sequence_logits, final_state)
'Builds sub-graph to generate a string, sampled from the model. Args: initial_logits: Starting logits to sampling from. initial_state: Starting state for the RNN core. sequence_length: Number of characters to sample. Returns: A Tensor of characters, with dimensions `[sequence_length, batch_size, output_size]`.'
@snt.experimental.reuse_vars def generate_string(self, initial_logits, initial_state, sequence_length):
current_logits = initial_logits current_state = initial_state generated_letters = [] for _ in range(sequence_length): char_index = tf.squeeze(tf.multinomial(current_logits, 1)) char_one_hot = tf.one_hot(char_index, self._output_size, 1.0, 0.0) generated_letters.append(char_one_hot) (gen_out_seq, current_state) = self._core(tf.nn.relu(self._embed_module(char_one_hot)), current_state) current_logits = self._output_module(gen_out_seq) generated_string = tf.stack(generated_letters) return generated_string
'Tests reproducibility of Torch results.'
def testResults(self):
problem = problems.simple() optimizer = meta.MetaOptimizer(net=dict(net='CoordinateWiseDeepLSTM', net_options={'layers': (), 'initializer': 'zeros'})) minimize_ops = optimizer.meta_minimize(problem, 5) with self.test_session() as sess: sess.run(tf.global_variables_initializer()) (cost, final_x) = train(sess, minimize_ops, 1, 2) torch_cost = 0.7325327 torch_final_x = 0.8559 self.assertAlmostEqual(cost, torch_cost, places=4) self.assertAlmostEqual(final_x[0], torch_final_x, places=4)
'Tests different variable->net mappings in multi-optimizer problem.'
@parameterized.expand([(None, {'net': {'net': 'CoordinateWiseDeepLSTM', 'net_options': {'layers': (1, 1)}}}), ([('net', ['x_0', 'x_1'])], {'net': {'net': 'CoordinateWiseDeepLSTM', 'net_options': {'layers': (1,)}}}), ([('net1', ['x_0']), ('net2', ['x_1'])], {'net1': {'net': 'CoordinateWiseDeepLSTM', 'net_options': {'layers': (1,)}}, 'net2': {'net': 'Adam'}}), ([('net1', ['x_0']), ('net2', ['x_0'])], {'net1': {'net': 'CoordinateWiseDeepLSTM', 'net_options': {'layers': (1,)}}, 'net2': {'net': 'CoordinateWiseDeepLSTM', 'net_options': {'layers': (1,)}}})]) def testMultiOptimizer(self, net_assignments, net_config):
problem = problems.simple_multi_optimizer(num_dims=2) optimizer = meta.MetaOptimizer(**net_config) minimize_ops = optimizer.meta_minimize(problem, 3, net_assignments=net_assignments) with self.test_session() as sess: sess.run(tf.global_variables_initializer()) train(sess, minimize_ops, 1, 2)
'Tests second derivatives for simple problem.'
def testSecondDerivatives(self):
problem = problems.simple() optimizer = meta.MetaOptimizer(net=dict(net='CoordinateWiseDeepLSTM', net_options={'layers': ()})) minimize_ops = optimizer.meta_minimize(problem, 3, second_derivatives=True) with self.test_session() as sess: sess.run(tf.global_variables_initializer()) train(sess, minimize_ops, 1, 2)
'Tests L2L applied to problem with convolutions.'
def testConvolutional(self):
kernel_shape = 4 def convolutional_problem(): conv = snt.Conv2D(output_channels=1, kernel_shape=kernel_shape, stride=1, name='conv') output = conv(tf.random_normal((100, 100, 3, 10))) return tf.reduce_sum(output) net_config = {'conv': {'net': 'KernelDeepLSTM', 'net_options': {'kernel_shape': ([kernel_shape] * 2), 'layers': (5,)}}} optimizer = meta.MetaOptimizer(**net_config) minimize_ops = optimizer.meta_minimize(convolutional_problem, 3, net_assignments=[('conv', ['conv/w'])]) with self.test_session() as sess: sess.run(tf.global_variables_initializer()) train(sess, minimize_ops, 1, 2)
'Tests L2L applied to problem with while loop.'
def testWhileLoopProblem(self):
def while_loop_problem(): x = tf.get_variable('x', shape=[], initializer=tf.ones_initializer()) (_, x_squared) = tf.while_loop(cond=(lambda t, _: (t < 1)), body=(lambda t, x: ((t + 1), (x * x))), loop_vars=(0, x), name='loop') return x_squared optimizer = meta.MetaOptimizer(net=dict(net='CoordinateWiseDeepLSTM', net_options={'layers': ()})) minimize_ops = optimizer.meta_minimize(while_loop_problem, 3) with self.test_session() as sess: sess.run(tf.global_variables_initializer()) train(sess, minimize_ops, 1, 2)
'Tests saving and loading a meta-optimizer.'
def testSaveAndLoad(self):
layers = (2, 3) net_options = {'layers': layers, 'initializer': 'zeros'} num_unrolls = 2 num_epochs = 1 problem = problems.simple() with tf.Graph().as_default() as g1: optimizer = meta.MetaOptimizer(net=dict(net='CoordinateWiseDeepLSTM', net_options=net_options)) minimize_ops = optimizer.meta_minimize(problem, 3) with self.test_session(graph=g1) as sess: sess.run(tf.global_variables_initializer()) train(sess, minimize_ops, 1, 2) tmp_dir = tempfile.mkdtemp() save_result = optimizer.save(sess, path=tmp_dir) net_path = next(iter(save_result)) (cost, x) = train(sess, minimize_ops, num_unrolls, num_epochs) with tf.Graph().as_default() as g2: optimizer = meta.MetaOptimizer(net=dict(net='CoordinateWiseDeepLSTM', net_options=net_options, net_path=net_path)) minimize_ops = optimizer.meta_minimize(problem, 3) with self.test_session(graph=g2) as sess: sess.run(tf.global_variables_initializer()) (cost_loaded, x_loaded) = train(sess, minimize_ops, num_unrolls, num_epochs) self.assertAlmostEqual(cost, cost_loaded, places=3) self.assertAlmostEqual(x[0], x_loaded[0], places=3) os.remove(net_path) os.rmdir(tmp_dir)
'Tests the network contains trainable variables.'
def testTrainable(self):
shape = [10, 5] gradients = tf.random_normal(shape) net = networks.CoordinateWiseDeepLSTM(layers=(1,)) state = net.initial_state_for_inputs(gradients) net(gradients, state) variables = snt.get_variables_in_module(net) self.assertEqual(len(variables), 4)
'Tests zero updates when last layer is initialized to zero.'
@parameterized.expand([['zeros'], [{'w': 'zeros', 'b': 'zeros', 'bad': 'bad'}], [{'w': tf.zeros_initializer(), 'b': np.array([0])}], [{'linear': {'w': tf.zeros_initializer(), 'b': 'zeros'}}]]) def testResults(self, initializer):
shape = [10] gradients = tf.random_normal(shape) net = networks.CoordinateWiseDeepLSTM(layers=(1, 1), initializer=initializer) state = net.initial_state_for_inputs(gradients) (update, _) = net(gradients, state) with self.test_session() as sess: sess.run(tf.global_variables_initializer()) update_np = sess.run(update) self.assertAllEqual(update_np, np.zeros(shape))
'Tests the network contains trainable variables.'
def testTrainable(self):
kernel_shape = [5, 5] shape = (kernel_shape + [2, 2]) gradients = tf.random_normal(shape) net = networks.KernelDeepLSTM(layers=(1,), kernel_shape=kernel_shape) state = net.initial_state_for_inputs(gradients) net(gradients, state) variables = snt.get_variables_in_module(net) self.assertEqual(len(variables), 4)
'Tests zero updates when last layer is initialized to zero.'
@parameterized.expand([['zeros'], [{'w': 'zeros', 'b': 'zeros', 'bad': 'bad'}], [{'w': tf.zeros_initializer(), 'b': np.array([0])}], [{'linear': {'w': tf.zeros_initializer(), 'b': 'zeros'}}]]) def testResults(self, initializer):
kernel_shape = [5, 5] shape = (kernel_shape + [2, 2]) gradients = tf.random_normal(shape) net = networks.KernelDeepLSTM(layers=(1, 1), kernel_shape=kernel_shape, initializer=initializer) state = net.initial_state_for_inputs(gradients) (update, _) = net(gradients, state) with self.test_session() as sess: sess.run(tf.global_variables_initializer()) update_np = sess.run(update) self.assertAllEqual(update_np, np.zeros(shape))
'Tests the network doesn\'t contain trainable variables.'
def testNonTrainable(self):
shape = [10, 5] gradients = tf.random_normal(shape) net = networks.Sgd() state = net.initial_state_for_inputs(gradients) net(gradients, state) variables = snt.get_variables_in_module(net) self.assertEqual(len(variables), 0)
'Tests network produces zero updates with learning rate equal to zero.'
def testResults(self):
shape = [10] learning_rate = 0.01 gradients = tf.random_normal(shape) net = networks.Sgd(learning_rate=learning_rate) state = net.initial_state_for_inputs(gradients) (update, _) = net(gradients, state) with self.test_session() as sess: (gradients_np, update_np) = sess.run([gradients, update]) self.assertAllEqual(update_np, ((- learning_rate) * gradients_np))
'Tests the network doesn\'t contain trainable variables.'
def testNonTrainable(self):
shape = [10, 5] gradients = tf.random_normal(shape) net = networks.Adam() state = net.initial_state_for_inputs(gradients) net(gradients, state) variables = snt.get_variables_in_module(net) self.assertEqual(len(variables), 0)
'Tests network produces zero updates with learning rate equal to zero.'
def testZeroLearningRate(self):
shape = [10] gradients = tf.random_normal(shape) net = networks.Adam(learning_rate=0) state = net.initial_state_for_inputs(gradients) (update, _) = net(gradients, state) with self.test_session() as sess: update_np = sess.run(update) self.assertAllEqual(update_np, np.zeros(shape))
'Creates a MetaOptimizer. Args: **kwargs: A set of keyword arguments mapping network identifiers (the keys) to parameters that will be passed to networks.Factory (see docs for more info). These can be used to assign different optimizee parameters to different optimizers (see net_assignments in the meta_loss method).'
def __init__(self, **kwargs):
self._nets = None if (not kwargs): self._config = {'coordinatewise': {'net': 'CoordinateWiseDeepLSTM', 'net_options': {'layers': (20, 20), 'preprocess_name': 'LogAndSign', 'preprocess_options': {'k': 5}, 'scale': 0.01}}} else: self._config = kwargs
'Save meta-optimizer.'
def save(self, sess, path=None):
result = {} for (k, net) in self._nets.items(): if (path is None): filename = None key = k else: filename = os.path.join(path, '{}.l2l'.format(k)) key = filename net_vars = networks.save(net, sess, filename=filename) result[key] = net_vars return result
'Returns an operator computing the meta-loss. Args: make_loss: Callable which returns the optimizee loss; note that this should create its ops in the default graph. len_unroll: Number of steps to unroll. net_assignments: variable to optimizer mapping. If not None, it should be a list of (k, names) tuples, where k is a valid key in the kwargs passed at at construction time and names is a list of variable names. second_derivatives: Use second derivatives (default is false). Returns: namedtuple containing (loss, update, reset, fx, x)'
def meta_loss(self, make_loss, len_unroll, net_assignments=None, second_derivatives=False):
(x, constants) = _get_variables(make_loss) print('Optimizee variables') print([op.name for op in x]) print('Problem variables') print([op.name for op in constants]) (nets, net_keys, subsets) = _make_nets(x, self._config, net_assignments) self._nets = nets state = [] with tf.name_scope('states'): for (i, (subset, key)) in enumerate(zip(subsets, net_keys)): net = nets[key] with tf.name_scope('state_{}'.format(i)): state.append(_nested_variable([net.initial_state_for_inputs(x[j], dtype=tf.float32) for j in subset], name='state', trainable=False)) def update(net, fx, x, state): 'Parameter and RNN state update.' with tf.name_scope('gradients'): gradients = tf.gradients(fx, x) if (not second_derivatives): gradients = [tf.stop_gradient(g) for g in gradients] with tf.name_scope('deltas'): (deltas, state_next) = zip(*[net(g, s) for (g, s) in zip(gradients, state)]) state_next = list(state_next) return (deltas, state_next) def time_step(t, fx_array, x, state): 'While loop body.' x_next = list(x) state_next = [] with tf.name_scope('fx'): fx = _make_with_custom_variables(make_loss, x) fx_array = fx_array.write(t, fx) with tf.name_scope('dx'): for (subset, key, s_i) in zip(subsets, net_keys, state): x_i = [x[j] for j in subset] (deltas, s_i_next) = update(nets[key], fx, x_i, s_i) for (idx, j) in enumerate(subset): x_next[j] += deltas[idx] state_next.append(s_i_next) with tf.name_scope('t_next'): t_next = (t + 1) return (t_next, fx_array, x_next, state_next) fx_array = tf.TensorArray(tf.float32, size=(len_unroll + 1), clear_after_read=False) (_, fx_array, x_final, s_final) = tf.while_loop(cond=(lambda t, *_: (t < len_unroll)), body=time_step, loop_vars=(0, fx_array, x, state), parallel_iterations=1, swap_memory=True, name='unroll') with tf.name_scope('fx'): fx_final = _make_with_custom_variables(make_loss, x_final) fx_array = fx_array.write(len_unroll, fx_final) loss = tf.reduce_sum(fx_array.stack(), name='loss') with tf.name_scope('reset'): variables = ((nest.flatten(state) + x) + constants) reset = [tf.variables_initializer(variables), fx_array.close()] with tf.name_scope('update'): update = (nest.flatten(_nested_assign(x, x_final)) + nest.flatten(_nested_assign(state, s_final))) for (k, net) in nets.items(): print("Optimizer '{}' variables".format(k)) print([op.name for op in snt.get_variables_in_module(net)]) return MetaLoss(loss, update, reset, fx_final, x_final)
'Returns an operator minimizing the meta-loss. Args: make_loss: Callable which returns the optimizee loss; note that this should create its ops in the default graph. len_unroll: Number of steps to unroll. learning_rate: Learning rate for the Adam optimizer. **kwargs: keyword arguments forwarded to meta_loss. Returns: namedtuple containing (step, update, reset, fx, x)'
def meta_minimize(self, make_loss, len_unroll, learning_rate=0.01, **kwargs):
info = self.meta_loss(make_loss, len_unroll, **kwargs) optimizer = tf.train.AdamOptimizer(learning_rate) step = optimizer.minimize(info.loss) return MetaStep(step, *info[1:])
'Initial state given inputs.'
@abc.abstractmethod def initial_state_for_inputs(self, inputs, **kwargs):
pass
'Creates an instance of `StandardDeepLSTM`. Args: output_size: Output sizes of the final linear layer. layers: Output sizes of LSTM layers. preprocess_name: Gradient preprocessing class name (in `l2l.preprocess` or tf modules). Default is `tf.identity`. preprocess_options: Gradient preprocessing options. scale: Gradient scaling (default is 1.0). initializer: Variable initializer for linear layer. See `snt.Linear` and `snt.LSTM` docs for more info. This parameter can be a string (e.g. "zeros" will be converted to tf.zeros_initializer). name: Module name.'
def __init__(self, output_size, layers, preprocess_name='identity', preprocess_options=None, scale=1.0, initializer=None, name='deep_lstm'):
super(StandardDeepLSTM, self).__init__(name=name) self._output_size = output_size self._scale = scale if hasattr(preprocess, preprocess_name): preprocess_class = getattr(preprocess, preprocess_name) self._preprocess = preprocess_class(**preprocess_options) else: self._preprocess = getattr(tf, preprocess_name) with tf.variable_scope(self._template.variable_scope): self._cores = [] for (i, size) in enumerate(layers, start=1): name = 'lstm_{}'.format(i) init = _get_layer_initializers(initializer, name, ('w_gates', 'b_gates')) self._cores.append(snt.LSTM(size, name=name, initializers=init)) self._rnn = snt.DeepRNN(self._cores, skip_connections=False, name='deep_rnn') init = _get_layer_initializers(initializer, 'linear', ('w', 'b')) self._linear = snt.Linear(output_size, name='linear', initializers=init)
'Connects the `StandardDeepLSTM` module into the graph. Args: inputs: 2D `Tensor` ([batch_size, input_size]). prev_state: `DeepRNN` state. Returns: `Tensor` shaped as `inputs`.'
def _build(self, inputs, prev_state):
inputs = self._preprocess(tf.expand_dims(inputs, (-1))) inputs = tf.reshape(inputs, [inputs.get_shape().as_list()[0], (-1)]) (output, next_state) = self._rnn(inputs, prev_state) return ((self._linear(output) * self._scale), next_state)
'Creates an instance of `CoordinateWiseDeepLSTM`. Args: name: Module name. **kwargs: Additional `DeepLSTM` args.'
def __init__(self, name='cw_deep_lstm', **kwargs):
super(CoordinateWiseDeepLSTM, self).__init__(1, name=name, **kwargs)
'Connects the CoordinateWiseDeepLSTM module into the graph. Args: inputs: Arbitrarily shaped `Tensor`. prev_state: `DeepRNN` state. Returns: `Tensor` shaped as `inputs`.'
def _build(self, inputs, prev_state):
input_shape = inputs.get_shape().as_list() reshaped_inputs = self._reshape_inputs(inputs) build_fn = super(CoordinateWiseDeepLSTM, self)._build (output, next_state) = build_fn(reshaped_inputs, prev_state) return (tf.reshape(output, input_shape), next_state)
'Creates an instance of `KernelDeepLSTM`. Args: kernel_shape: Kernel shape (2D `tuple`). name: Module name. **kwargs: Additional `DeepLSTM` args.'
def __init__(self, kernel_shape, name='kernel_deep_lstm', **kwargs):
self._kernel_shape = kernel_shape output_size = np.prod(kernel_shape) super(KernelDeepLSTM, self).__init__(output_size, name=name, **kwargs)
'Connects the KernelDeepLSTM module into the graph. Args: inputs: 4D `Tensor` (convolutional filter). prev_state: `DeepRNN` state. Returns: `Tensor` shaped as `inputs`.'
def _build(self, inputs, prev_state):
input_shape = inputs.get_shape().as_list() reshaped_inputs = self._reshape_inputs(inputs) build_fn = super(KernelDeepLSTM, self)._build (output, next_state) = build_fn(reshaped_inputs, prev_state) transposed_output = tf.transpose(output, [1, 0]) return (tf.reshape(transposed_output, input_shape), next_state)
'Batch size given inputs.'
def initial_state_for_inputs(self, inputs, **kwargs):
reshaped_inputs = self._reshape_inputs(inputs) return super(KernelDeepLSTM, self).initial_state_for_inputs(reshaped_inputs, **kwargs)
'Creates an instance of the Identity optimizer network. Args: learning_rate: constant learning rate to use. name: Module name.'
def __init__(self, learning_rate=0.001, name='sgd'):
super(Sgd, self).__init__(name=name) self._learning_rate = learning_rate
'Creates an instance of Adam.'
def __init__(self, learning_rate=0.001, beta1=0.9, beta2=0.999, epsilon=1e-08, name='adam'):
super(Adam, self).__init__(name=name) self._learning_rate = learning_rate self._beta1 = beta1 self._beta2 = beta2 self._epsilon = epsilon
'Connects the Adam module into the graph.'
def _build(self, g, prev_state):
b1 = self._beta1 b2 = self._beta2 g_shape = g.get_shape().as_list() g = tf.reshape(g, ((-1), 1)) (t, m, v) = prev_state t_next = (t + 1) m_next = _update_adam_estimate(m, g, b1) m_hat = _debias_adam_estimate(m_next, b1, t_next) v_next = _update_adam_estimate(v, tf.square(g), b2) v_hat = _debias_adam_estimate(v_next, b2, t_next) update = (((- self._learning_rate) * m_hat) / (tf.sqrt(v_hat) + self._epsilon)) return (tf.reshape(update, g_shape), (t_next, m_next, v_next))
'Tests L2L applied to simple problem.'
def testSimple(self):
problem = problems.simple() optimizer = meta.MetaOptimizer(net=dict(net='CoordinateWiseDeepLSTM', net_options={'layers': (), 'initializer': 'zeros'})) minimize_ops = optimizer.meta_minimize(problem, 20, learning_rate=0.01) with self.test_session() as sess: sess.run(tf.global_variables_initializer()) (cost, _) = train(sess, minimize_ops, 500, 5) self.assertLess(cost, 1e-05)
'Connects the LogAndSign module into the graph. Args: gradients: `Tensor` of gradients with shape `[d_1, ..., d_n]`. Returns: `Tensor` with shape `[d_1, ..., d_n-1, 2 * d_n]`. The first `d_n` elements along the nth dimension correspond to the log output and the remaining `d_n` elements to the sign output.'
def _build(self, gradients):
eps = np.finfo(gradients.dtype.as_numpy_dtype).eps ndims = gradients.get_shape().ndims log = tf.log((tf.abs(gradients) + eps)) clamped_log = Clamp(min_value=(-1.0))((log / self._k)) sign = Clamp(min_value=(-1.0), max_value=1.0)((gradients * np.exp(self._k))) return tf.concat([clamped_log, sign], (ndims - 1))
'You should not call this constructor directly, rather use the convenience functions that are in track.py. For example, call track.track_from_filename Let\'s always get the bucket `audio_summary`'
def __init__(self, identifier, md5, properties):
super(TrackProxy, self).__init__() self.id = identifier self.md5 = md5 self.analysis_url = None self._object_type = 'track' self.__dict__.update(properties)
'Create a catalog object (get a catalog by ID or get or create one given by name and type) Args: id (str): A catalog id or name Kwargs: type (str): \'song\' or \'artist\', specifying the catalog type Returns: A catalog object Example: >>> c = catalog.Catalog(\'my_songs\', type=\'song\') >>> c.id u\'CAVKUPC12BCA792120\' >>> c.name u\'my_songs\''
def __init__(self, id, type=None, **kwargs):
super(Catalog, self).__init__(id, type, **kwargs)
'Update a catalog object Args: items (list): A list of dicts describing update data and action codes (see api docs) Kwargs: Returns: A ticket id Example: >>> c = catalog.Catalog(\'my_songs\', type=\'song\') >>> items [{\'action\': \'update\', \'item\': {\'artist_name\': \'dAn ThE aUtOmAtOr\', \'disc_number\': 1, \'genre\': \'Instrumental\', \'item_id\': \'38937DDF04BC7FC4\', \'play_count\': 5, \'release\': \'Bombay the Hard Way: Guns, Cars & Sitars\', \'song_name\': \'Inspector Jay From Dehli\', \'track_number\': 9, \'url\': \'file://localhost/Users/tylerw/Music/iTunes/iTunes%20Media/Music/Dan%20the%20Automator/Bombay%20the%20Hard%20Way_%20Guns,%20Cars%20&%20Sitars/09%20Inspector%20Jay%20From%20Dehli.m4a\'}}] >>> ticket = c.update(items) >>> ticket u\'7dcad583f2a38e6689d48a792b2e4c96\' >>> c.status(ticket) {u\'ticket_status\': u\'complete\', u\'update_info\': []}'
def update(self, items):
post_data = {} items_json = json.dumps(items, default=dthandler) post_data['data'] = items_json response = self.post_attribute('update', data=post_data) return response['ticket']
'Check the status of a catalog update Args: ticket (str): A string representing a ticket ID Kwargs: Returns: A dictionary representing ticket status Example: >>> ticket u\'7dcad583f2a38e6689d48a792b2e4c96\' >>> c.status(ticket) {u\'ticket_status\': u\'complete\', u\'update_info\': []}'
def status(self, ticket):
return self.get_attribute_simple('status', ticket=ticket)
'Check the status of a catalog update Args: Kwargs: Returns: A dictionary representing ticket status Example: >>> c <catalog - test_song_catalog> >>> c.profile() {u\'id\': u\'CAGPXKK12BB06F9DE9\', u\'name\': u\'test_song_catalog\', u\'pending_tickets\': [], u\'resolved\': 2, u\'total\': 4, u\'type\': u\'song\'}'
def get_profile(self):
result = self.get_attribute('profile') return result['catalog']
'Returns data from the catalog; also expanded for the requested buckets. This method is provided for backwards-compatibility Args: Kwargs: buckets (list): A list of strings specifying which buckets to retrieve results (int): An integer number of results to return start (int): An integer starting value for the result set Returns: A list of objects in the catalog; list contains additional attributes \'start\' and \'total\' Example: >>> c <catalog - my_songs> >>> c.read_items(results=1) [<song - Harmonice Mundi II>]'
def read_items(self, buckets=None, results=15, start=0, item_ids=None):
warnings.warn('catalog.read_items() is depreciated. Please use catalog.get_item_dicts() instead.') kwargs = {} kwargs['bucket'] = (buckets or []) kwargs['item_id'] = (item_ids or []) response = self.get_attribute('read', results=results, start=start, **kwargs) rval = ResultList([]) if item_ids: rval.start = 0 rval.total = len(response['catalog']['items']) else: rval.start = response['catalog']['start'] rval.total = response['catalog']['total'] for item in response['catalog']['items']: new_item = None if ('song_id' in item): item['id'] = item.pop('song_id') item['title'] = item.pop('song_name') request = item['request'] new_item = song.Song(**util.fix(item)) new_item.request = request elif ('artist_id' in item): item['id'] = item.pop('artist_id') item['name'] = item.pop('artist_name') request = item['request'] new_item = artist.Artist(**util.fix(item)) new_item.request = request else: new_item = item rval.append(new_item) return rval
'Returns data from the catalog; also expanded for the requested buckets Args: Kwargs: buckets (list): A list of strings specifying which buckets to retrieve results (int): An integer number of results to return start (int): An integer starting value for the result set Returns: A list of dicts representing objects in the catalog; list has additional attributes \'start\' and \'total\' Example: >>> c <catalog - my_songs> >>> c.read_items(results=1) "artist_id": "AR78KRI1187B98E6F2", "artist_name": "Art of Noise", "date_added": "2012-04-02T16:50:02", "foreign_id": "CAHLYLR13674D1CF83:song:1000", "request": { "artist_name": "The Art Of Noise", "item_id": "1000", "song_name": "Love" "song_id": "SOSBCTO1311AFE7AE0", "song_name": "Love"'
def get_item_dicts(self, buckets=None, results=15, start=0, item_ids=None):
kwargs = {} kwargs['bucket'] = (buckets or []) kwargs['item_id'] = (item_ids or []) response = self.get_attribute('read', results=results, start=start, **kwargs) rval = ResultList(response['catalog']['items']) if item_ids: rval.start = 0 rval.total = len(response['catalog']['items']) else: rval.start = response['catalog']['start'] rval.total = response['catalog']['total'] return rval
'Returns feed (news, blogs, reviews, audio, video) for the catalog artists; response depends on requested buckets Args: Kwargs: buckets (list): A list of strings specifying which feed items to retrieve results (int): An integer number of results to return start (int): An integer starting value for the result set Returns: A list of news, blogs, reviews, audio or video document dicts; Example: >>> c <catalog - my_artists> >>> c.get_feed(results=15) {u\'date_found\': u\'2011-02-06T07:50:25\', u\'date_posted\': u\'2011-02-06T07:50:23\', u\'id\': u\'caec686c0dff361e4c53dceb58fb9d2f\', u\'name\': u\'Linkin Park \u2013 \u201cWaiting For The End\u201d + \u201cWhen They Come For Me\u201d 2/5 SNL\', u\'references\': [{u\'artist_id\': u\'ARQUMH41187B9AF699\', u\'artist_name\': u\'Linkin Park\'}], u\'summary\': u\'<span>Linkin</span> <span>Park</span> performed "Waiting For The End" and "When They Come For Me" on Saturday Night Live. Watch the videos below and pick up their album A Thousand Suns on iTunes, Amazon MP3, CD Social Bookmarking ... \', u\'type\': u\'blogs\', u\'url\': u\'http://theaudioperv.com/2011/02/06/linkin-park-waiting-for-the-end-when-they-come-for-me-25-snl/\'}'
def get_feed(self, buckets=None, since=None, results=15, start=0):
kwargs = {} kwargs['bucket'] = (buckets or []) if since: kwargs['since'] = since response = self.get_attribute('feed', results=results, start=start, **kwargs) rval = ResultList(response['feed']) return rval
'Deletes the entire catalog Args: Kwargs: Returns: The deleted catalog\'s id. Example: >>> c <catalog - test_song_catalog> >>> c.delete() {u\'id\': u\'CAXGUPY12BB087A21D\'}'
def delete(self):
return self.post_attribute('delete')
'Retrieve the detailed analysis for the track, if available. Raises Exception if unable to create the detailed analysis.'
def get_analysis(self):
if self.analysis_url: try: try: json_string = urllib2.urlopen(self.analysis_url).read() except urllib2.HTTPError: param_dict = dict(id=self.id) new_track = _profile(param_dict, DEFAULT_ASYNC_TIMEOUT) if (new_track and new_track.analysis_url): self.analysis_url = new_track.analysis_url json_string = urllib2.urlopen(self.analysis_url).read() else: raise Exception('Failed to create track analysis.') analysis = json.loads(json_string) analysis_track = analysis.pop('track', {}) self.__dict__.update(analysis) self.__dict__.update(analysis_track) except Exception: raise Exception('Failed to create track analysis.') else: raise Exception('Failed to create track analysis.')
'Artist class Args: id (str): an artistw ID Returns: An artist object Example: >>> a = artist.Artist(\'ARH6W4X1187B99274F\', buckets=[\'hotttnesss\']) >>> a.hotttnesss 0.80098515900997658'
def __init__(self, id, **kwargs):
super(Artist, self).__init__(id, **kwargs)
'Get a list of audio documents found on the web related to an artist Args: Kwargs: cache (bool): A boolean indicating whether or not the cached value should be used (if available). Defaults to True. results (int): An integer number of results to return start (int): An integer starting value for the result set Returns: A list of audio document dicts; list contains additional attributes \'start\' and \'total\' Example: >>> a = artist.Artist(\'alphabeat\') >>> a.get_audio()[0] {u\'artist\': u\'Alphabeat\', u\'date\': u\'2010-04-28T01:40:45\', u\'id\': u\'70be4373fa57ac2eee8c7f30b0580899\', u\'length\': 210.0, u\'link\': u\'http://iamthecrime.com\', u\'release\': u\'The Beat Is...\', u\'title\': u\'DJ\', u\'url\': u\'http://iamthecrime.com/wp-content/uploads/2010/04/03_DJ_iatc.mp3\'}'
def get_audio(self, results=15, start=0, cache=True):
if (cache and ('audio' in self.cache) and (results == 15) and (start == 0)): return self.cache['audio'] else: response = self.get_attribute('audio', results=results, start=start) if ((results == 15) and (start == 0)): self.cache['audio'] = ResultList(response['audio'], 0, response['total']) return ResultList(response['audio'], start, response['total'])
'Get a list of artist biographies Args: Kwargs: cache (bool): A boolean indicating whether or not the cached value should be used (if available). Defaults to True. results (int): An integer number of results to return start (int): An integer starting value for the result set license (str): A string specifying the desired license type Returns: A list of biography document dicts; list contains additional attributes \'start\' and \'total\' Example: >>> a = artist.Artist(\'britney spears\') >>> bio = a.get_biographies(results=1)[0] >>> bio[\'url\'] u\'http://www.mtvmusic.com/spears_britney\''
def get_biographies(self, results=15, start=0, license=None, cache=True):
if (cache and ('biographies' in self.cache) and (results == 15) and (start == 0) and (license == None)): return self.cache['biographies'] else: response = self.get_attribute('biographies', results=results, start=start, license=license) if ((results == 15) and (start == 0) and (license == None)): self.cache['biographies'] = ResultList(response['biographies'], 0, response['total']) return ResultList(response['biographies'], start, response['total'])