desc
stringlengths
3
26.7k
decl
stringlengths
11
7.89k
bodies
stringlengths
8
553k
'Helper function to add monitoring records to the log.'
def add_records(self, log, record_tuples):
for (name, value) in record_tuples: if (not name): raise ValueError('monitor variable without name') log.current_row[self._record_name(name)] = value
'Write the values of monitored variables to the log.'
def do(self, callback_name, *args):
logger.info('Monitoring on auxiliary data started') value_dict = self._evaluator.evaluate(self.data_stream) self.add_records(self.main_loop.log, value_dict.items()) logger.info('Monitoring on auxiliary data finished')
'Initializes the buffer or commits the values to the log. What this method does depends on from what callback it is called and with which arguments. When called within `before_training`, it initializes the aggregation buffer and instructs the training algorithm what additional computations should be carried at each step by adding corresponding updates to it. In most_other cases it writes aggregated values of the monitored variables to the log. An exception is when an argument `just_aggregate` is given: in this cases it updates the values of monitored non-Theano quantities, but does not write anything to the log.'
def do(self, callback_name, *args):
(data, args) = self.parse_args(callback_name, args) if (callback_name == 'before_training'): if (not isinstance(self.main_loop.algorithm, UpdatesAlgorithm)): raise ValueError self.main_loop.algorithm.add_updates(self._variables.accumulation_updates) self.main_loop.algorithm.add_updates(self._required_for_non_variables.accumulation_updates) self._variables.initialize_aggregators() self._required_for_non_variables.initialize_aggregators() self._non_variables.initialize_quantities() else: if (self.main_loop.status['iterations_done'] > self._last_time_called): self._non_variables.aggregate_quantities(list(self._required_for_non_variables.get_aggregated_values().values())) self._required_for_non_variables.initialize_aggregators() self._last_time_called = self.main_loop.status['iterations_done'] if (args == ('just_aggregate',)): return self.add_records(self.main_loop.log, self._variables.get_aggregated_values().items()) self._variables.initialize_aggregators() self.add_records(self.main_loop.log, self._non_variables.get_aggregated_values().items()) self._non_variables.initialize_quantities()
'Pickle the main loop object to the disk. If `*args` contain an argument from user, it is treated as saving path to be used instead of the one given at the construction stage.'
def do(self, callback_name, *args):
logger.info('Checkpointing has started') (_, from_user) = self.parse_args(callback_name, args) try: path = self.path if from_user: (path,) = from_user to_add = None if self.save_separately: to_add = {attr: getattr(self.main_loop, attr) for attr in self.save_separately} if (self.parameters is None): if hasattr(self.main_loop, 'model'): self.parameters = self.main_loop.model.parameters object_ = None if self.save_main_loop: object_ = self.main_loop secure_dump(object_, path, dump_function=dump_and_add_to_dump, parameters=self.parameters, to_add=to_add, use_cpickle=self.use_cpickle) except Exception: path = None raise finally: already_saved_to = self.main_loop.log.current_row.get(SAVED_TO, ()) self.main_loop.log.current_row[SAVED_TO] = (already_saved_to + (path,)) logger.info('Checkpointing has finished')
'Initialize a shared variable with generated parameters. Parameters var : object A Theano shared variable whose value will be set with values drawn from this :class:`NdarrayInitialization` instance. rng : :class:`numpy.random.RandomState` shape : tuple A shape tuple for the requested parameter array shape.'
def initialize(self, var, rng, shape=None):
if (not shape): shape = var.get_value(borrow=True, return_internal_type=True).shape var.set_value(self.generate(rng, shape))
'Returns parameters with their hierarchical names. The parameter names are formed from positions of their owner bricks in the bricks hierarchy. The variable names are used for the parameters that do not belong to any brick. Returns parameter_dict : dict A dictionary of (hierarchical name, shared variable) pairs.'
def get_parameter_dict(self):
return self._parameter_dict
'Return the values of model parameters. The same hierarhical names as in :meth:`get_parameter_dict` are used to uniquely identify parameters. Returns parameter_values : OrderedDict Dictionary of (hierarchical name, :class:`~numpy.ndarray`) pairs.'
def get_parameter_values(self):
return OrderedDict(((name, parameter.get_value()) for (name, parameter) in self.get_parameter_dict().items()))
'Set the values of model parameters. The same hierarhical names as in :meth:`get_parameter_dict` are used to uniquely identify parameters. Parameters parameter_values : OrderedDict Dictionary of (hierarchical name, :class:`~numpy.ndarray`) pairs.'
def set_parameter_values(self, parameter_values):
parameters = self.get_parameter_dict() unknown = (set(parameter_values) - set(parameters)) missing = (set(parameters) - set(parameter_values)) if len(unknown): logger.error('unknown parameter names: {}\n'.format(unknown)) if len(missing): logger.error('missing values for parameters: {}\n'.format(missing)) for (name, value) in parameter_values.items(): if (name in parameters): model_shape = parameters[name].container.data.shape if (model_shape != value.shape): raise ValueError('Shape mismatch for parameter: {}. Expected {}, got {}.'.format(name, model_shape, value.shape)) parameters[name].set_value(value)
'Get the bricks that do not have parents. Returns bricks : list of :class:`~blocks.bricks.base.Brick`'
def get_top_bricks(self):
return self.top_bricks
'Initialize the quantities.'
def initialize_quantities(self):
self._initialized = True for quantity in self.quantities: quantity.initialize()
'Get the aggregated values.'
def get_aggregated_values(self):
if (not self._initialized): raise Exception('To readout you must first initialize, thenprocess batches!') else: ret_vals = [q.get_aggregated_value() for q in self.quantities] return dict(zip(self.quantity_names, ret_vals))
'Aggregate the results for every batch.'
def aggregate_quantities(self, numerical_values):
if (not self._initialized): raise Exception('To readout you must first initialize, thenprocess batches!') else: for quantity in self.quantities: quantity.aggregate(*[numerical_values[self.requires.index(requirement)] for requirement in quantity.requires])
'Create aggregators and collect updates.'
def _create_aggregators(self):
self.initialization_updates = [] self.accumulation_updates = [] self.readout_variables = OrderedDict() for v in self.variables: logger.debug('variable to evaluate: %s', v.name) if (not hasattr(v.tag, 'aggregation_scheme')): if (not self._computation_graph.has_inputs(v)): scheme = (TakeLast if self.use_take_last else _DataIndependent) logger.debug('Using %s aggregation scheme for %s since it does not depend on the data', scheme.__name__, v.name) v.tag.aggregation_scheme = scheme(v) else: logger.debug('Using the default (average over minibatches) aggregation scheme for %s', v.name) v.tag.aggregation_scheme = Mean(v, 1.0) aggregator = v.tag.aggregation_scheme.get_aggregator() self.initialization_updates.extend(aggregator.initialization_updates) self.accumulation_updates.extend(aggregator.accumulation_updates) self.readout_variables[v.name] = aggregator.readout_variable
'Compiles Theano functions. .. todo:: The current compilation method does not account for updates attached to `ComputationGraph` elements. Compiling should be out-sourced to `ComputationGraph` to deal with it.'
def _compile(self):
logger.debug('Compiling initialization and readout functions') if self.initialization_updates: self._initialize_fun = theano.function([], [], updates=self.initialization_updates) else: self._initialize_fun = None self._readout_fun = theano.function([], [tensor.as_tensor_variable(v) for v in self.readout_variables.values()]) logger.debug('Initialization and readout functions compiled')
'Initialize the aggregators.'
def initialize_aggregators(self):
self._initialized = True if (self._initialize_fun is not None): self._initialize_fun()
'Readout the aggregated values.'
def get_aggregated_values(self):
if (not self._initialized): raise Exception('To readout you must first initialize, then process batches!') ret_vals = self._readout_fun() return OrderedDict(equizip(self.variable_names, ret_vals))
'Compiles Theano functions. .. todo:: The current compilation method does not account for updates attached to `ComputationGraph` elements. Compiling should be out-sourced to `ComputationGraph` to deal with it.'
def _compile(self):
inputs = [] outputs = [] updates = None if self.theano_buffer.accumulation_updates: updates = OrderedDict() updates.update(self.theano_buffer.accumulation_updates) inputs += self.theano_buffer.inputs if self.updates: if (updates is None): updates = self.updates else: updates.update(self.updates) inputs += self.monitored_quantities_buffer.inputs outputs = self.monitored_quantities_buffer.requires if (inputs != []): self.unique_inputs = list(set(inputs)) self._aggregate_fun = theano.function(self.unique_inputs, outputs, updates=updates) else: self._aggregate_fun = None
'Compute the variables over a data stream. Parameters data_stream : instance of :class:`.DataStream` The data stream. Only the first epoch of data is used. Returns A mapping from record names to the values computed on the provided dataset.'
def evaluate(self, data_stream):
self.initialize_aggregators() if (not hasattr(self, '_aggregate_fun')): self._compile() if (self._aggregate_fun is not None): for batch in data_stream.get_epoch_iterator(as_dict=True): self.process_batch(batch) else: logger.debug('Only data independent variables were given,will not iterate the over data!') return self.get_aggregated_values()
'Return a new Aggregator for this variable.'
@abstractmethod def get_aggregator(self):
pass
'Initialize accumulators for this monitored quantity.'
@abstractmethod def initialize(self):
pass
'Aggregate results for every batch. \*args : list of :class:`~numpy.ndarray` The values of the variables required to aggregate the value of the quantity.'
@abstractmethod def aggregate(self, *args):
pass
'Obtain the result of aggregation.'
@abstractmethod def get_aggregated_value(self):
pass
'Return a hexadecimal version of the UUID bytes. This is necessary to store ids in an SQLite database.'
@property def h_uuid(self):
return self.uuid.hex
'Resume a log by setting a new random UUID. Keeps a record of the old log that this is a continuation of. It copies the status of the old log into the new log.'
def resume(self):
old_uuid = self.h_uuid old_status = dict(self.status) self.uuid = uuid4() self.status.update(old_status) self.status['resumed_from'] = old_uuid
'Retrieve the state for pickling. :class:`sqlite3.Connection` objects are not picklable, so the `conn` attribute is removed and the connection re-opened upon unpickling.'
def __getstate__(self):
state = self.__dict__.copy() if ('_conn' in state): del state['_conn'] self.resume() return state
'Filter the given variables. Parameters variables : list of :class:`~tensor.TensorVariable`'
def __call__(self, variables):
if self.roles: variables = [var for var in variables if has_roles(var, self.roles, self.each_role)] if (self.bricks is not None): filtered_variables = [] for var in variables: var_brick = get_brick(var) if (var_brick is None): continue for brick in self.bricks: if (isclass(brick) and isinstance(var_brick, brick)): filtered_variables.append(var) break elif (isinstance(brick, Brick) and (var_brick is brick)): filtered_variables.append(var) break variables = filtered_variables if self.name: variables = [var for var in variables if (hasattr(var.tag, 'name') and (self.name == var.tag.name))] if self.name_regex: variables = [var for var in variables if (hasattr(var.tag, 'name') and re.match(self.name_regex, var.tag.name))] if self.theano_name: variables = [var for var in variables if ((var.name is not None) and (self.theano_name == var.name))] if self.theano_name_regex: variables = [var for var in variables if ((var.name is not None) and re.match(self.theano_name_regex, var.name))] if self.applications: filtered_variables = [] for var in variables: var_application = get_application_call(var) if (var_application is None): continue if ((var_application.application in self.applications) or (var_application.application.application in self.applications)): filtered_variables.append(var) variables = filtered_variables if self.call_id: variables = [var for var in variables if (get_application_call(var) and (get_application_call(var).metadata['call_id'] == self.call_id))] return variables
'Decorator to make application properties. Parameters name : str The name the property should take. Examples >>> class Foo(Brick): ... @application ... def apply(self, x): ... return x + 1 ... @apply.property(\'inputs\') ... def apply_inputs(self): ... return [\'foo\', \'bar\'] >>> foo = Foo() >>> foo.apply.inputs [\'foo\', \'bar\']'
def property(self, name):
if (not isinstance(name, six.string_types)): raise ValueError def wrap_property(application_property): self.properties[name] = application_property.__name__ return application_property return wrap_property
'Decorator to assign a delegate application. An application method can assign a delegate application. Whenever an attribute is not available, it will be requested from the delegate instead. Examples >>> class Foo(Brick): ... @application(outputs=[\'baz\']) ... def apply(self, x): ... return x + 1 ... @apply.property(\'inputs\') ... def apply_inputs(self): ... return [\'foo\', \'bar\'] >>> class Bar(Brick): ... def __init__(self, foo): ... self.foo = foo ... @application(outputs=[\'foo\']) ... def apply(self, x): ... return x + 1 ... @apply.delegate ... def apply_delegate(self): ... return self.foo.apply >>> foo = Foo() >>> bar = Bar(foo) >>> bar.apply.outputs [\'foo\'] >>> bar.apply.inputs [\'foo\', \'bar\']'
def delegate(self, f):
self.delegate_function = f.__name__ return f
'Instantiate :class:`BoundApplication` for each :class:`Brick`.'
def __get__(self, instance, owner):
if (instance is None): return self if (not hasattr(instance, '_bound_applications')): instance._bound_applications = {} key = '{}.{}'.format(self.brick.__name__, self.application_name) return instance._bound_applications.setdefault(key, BoundApplication(self, instance))
'Allocate shared variables for parameters. Based on the current configuration of this :class:`Brick` create Theano shared variables to store the parameters. After allocation, parameters are accessible through the :attr:`parameters` attribute. This method calls the :meth:`allocate` method of all children first, allowing the :meth:`_allocate` method to override the parameters of the children if needed. Raises ValueError If the configuration of this brick is insufficient to determine the number of parameters or their dimensionality to be initialized. Notes This method sets the :attr:`parameters` attribute to an empty list. This is in order to ensure that calls to this method completely reset the parameters.'
def allocate(self):
if hasattr(self, 'allocation_args'): missing_config = [arg for arg in self.allocation_args if (getattr(self, arg) is NoneAllocation)] if missing_config: raise ValueError('allocation config not set: {}'.format(', '.join(missing_config))) if (not self.allocation_config_pushed): self.push_allocation_config() for child in self.children: child.allocate() self.parameters = [] self._allocate() self.allocated = True
'Brick implementation of parameter initialization. Implement this if your brick needs to allocate its parameters. .. warning:: This method should never be called directly. Call :meth:`initialize` instead.'
def _allocate(self):
pass
'Initialize parameters. Intialize parameters, such as weight matrices and biases. Notes If the brick has not allocated its parameters yet, this method will call the :meth:`allocate` method in order to do so.'
def initialize(self):
if hasattr(self, 'initialization_args'): missing_config = [arg for arg in self.initialization_args if (getattr(self, arg) is NoneInitialization)] if missing_config: raise ValueError('initialization config not set: {}'.format(', '.join(missing_config))) if (not self.allocated): self.allocate() if (not self.initialization_config_pushed): self.push_initialization_config() for child in self.children: child.initialize() self._initialize() self.initialized = True
'Brick implementation of parameter initialization. Implement this if your brick needs to initialize its parameters. .. warning:: This method should never be called directly. Call :meth:`initialize` instead.'
def _initialize(self):
pass
'Push the configuration for allocation to child bricks. Bricks can configure their children, based on their own current configuration. This will be automatically done by a call to :meth:`allocate`, but if you want to override the configuration of child bricks manually, then you can call this function manually.'
def push_allocation_config(self):
self._push_allocation_config() self.allocation_config_pushed = True for child in self.children: try: child.push_allocation_config() except Exception: self.allocation_config_pushed = False raise
'Brick implementation of configuring child before allocation. Implement this if your brick needs to set the configuration of its children before allocation. .. warning:: This method should never be called directly. Call :meth:`push_allocation_config` instead.'
def _push_allocation_config(self):
pass
'Push the configuration for initialization to child bricks. Bricks can configure their children, based on their own current configuration. This will be automatically done by a call to :meth:`initialize`, but if you want to override the configuration of child bricks manually, then you can call this function manually.'
def push_initialization_config(self):
self._push_initialization_config() self.initialization_config_pushed = True for child in self.children: try: child.push_initialization_config() except Exception: self.initialization_config_pushed = False raise
'Brick implementation of configuring child before initialization. Implement this if your brick needs to set the configuration of its children before initialization. .. warning:: This method should never be called directly. Call :meth:`push_initialization_config` instead.'
def _push_initialization_config(self):
pass
'Get dimension of an input/output variable of a brick. Parameters name : str The name of the variable.'
def get_dim(self, name):
raise ValueError('No dimension information for {} available'.format(name))
'Get list of dimensions for a set of input/output variables. Parameters names : list The variable names. Returns dims : list The dimensions of the sources.'
def get_dims(self, names):
return [self.get_dim(name) for name in names]
'Returns unique path to this brick in the application graph.'
def get_unique_path(self):
if self.parents: parent = min(self.parents, key=attrgetter('name')) return (parent.get_unique_path() + [self]) else: return [self]
'Return hierarhical name for a parameter. Returns a path of the form ``brick1/brick2/brick3.parameter1``. The delimiter is configurable. Parameters delimiter : str The delimiter used to separate brick names in the path.'
def get_hierarchical_name(self, parameter, delimiter=BRICK_DELIMITER):
return '{}.{}'.format(delimiter.join(([''] + [brick.name for brick in self.get_unique_path()])), parameter.name)
'Returns Brick\'s Theano RNG, or a default one. The default seed can be set through ``blocks.config``.'
@property def theano_rng(self):
if (not hasattr(self, '_theano_rng')): self._theano_rng = MRG_RandomStreams(self.theano_seed) return self._theano_rng
'Distribute the source across the targets. Parameters \*\*kwargs : dict The source and the target variables. Returns output : list The new target variables.'
@application def apply(self, **kwargs):
result = super(Distribute, self).apply(kwargs.pop(self.source_name), as_list=True) for (i, name) in enumerate(self.target_names): result[i] += kwargs.pop(name) if len(kwargs): raise ValueError return result
'Perform the preprocessing of the attended. Stage 1 of the attention mechanism, see :class:`AbstractAttention` docstring for an explanation of stages. The default implementation simply returns attended. Parameters attended : :class:`~theano.Variable` The attended. Returns preprocessed_attended : :class:`~theano.Variable` The preprocessed attended.'
@application(inputs=['attended'], outputs=['preprocessed_attended']) def preprocess(self, attended):
return attended
'Extract glimpses from the attended given the current states. Stage 2 of the attention mechanism, see :class:`AbstractAttention` for an explanation of stages. If `preprocessed_attended` is not given, should trigger the stage 1. This application method *must* declare its inputs and outputs. The glimpses to be carried over are identified by their presence in both inputs and outputs list. The attended *must* be the first input, the preprocessed attended *must* be the second one. Parameters attended : :class:`~theano.Variable` The attended. preprocessed_attended : :class:`~theano.Variable`, optional The preprocessed attended computed by :meth:`preprocess`. When not given, :meth:`preprocess` should be called. attended_mask : :class:`~theano.Variable`, optional The mask for the attended. This is required in the case of padded structured output, e.g. when a number of sequences are force to be the same length. The mask identifies position of the `attended` that actually contain information. \*\*kwargs : dict Includes the states and the glimpses to be carried over from the previous step in the case when the attention mechanism is applied sequentially.'
@abstractmethod def take_glimpses(self, attended, preprocessed_attended=None, attended_mask=None, **kwargs):
pass
'Return sensible initial values for carried over glimpses. Parameters batch_size : int or :class:`~theano.Variable` The batch size. attended : :class:`~theano.Variable` The attended. Returns initial_glimpses : list of :class:`~theano.Variable` The initial values for the requested glimpses. These might simply consist of zeros or be somehow extracted from the attended.'
@abstractmethod def initial_glimpses(self, batch_size, attended):
pass
'Compute weights from energies in softmax-like fashion. .. todo :: Use :class:`~blocks.bricks.Softmax`. Parameters energies : :class:`~theano.Variable` The energies. Must be of the same shape as the mask. attended_mask : :class:`~theano.Variable` The mask for the attended. The index in the sequence must be the first dimension. Returns weights : :class:`~theano.Variable` Summing to 1 non-negative weights of the same shape as `energies`.'
@application def compute_weights(self, energies, attended_mask):
energies = (energies - energies.max(axis=0)) unnormalized_weights = tensor.exp(energies) if attended_mask: unnormalized_weights *= attended_mask normalization = (unnormalized_weights.sum(axis=0) + tensor.all((1 - attended_mask), axis=0)) return (unnormalized_weights / normalization)
'Compute weighted averages of the attended sequence vectors. Parameters weights : :class:`~theano.Variable` The weights. The shape must be equal to the attended shape without the last dimension. attended : :class:`~theano.Variable` The attended. The index in the sequence must be the first dimension. Returns weighted_averages : :class:`~theano.Variable` The weighted averages of the attended elements. The shape is equal to the attended shape with the first dimension dropped.'
@application def compute_weighted_averages(self, weights, attended):
return (tensor.shape_padright(weights) * attended).sum(axis=0)
'Compute attention weights and produce glimpses. Parameters attended : :class:`~tensor.TensorVariable` The sequence, time is the 1-st dimension. preprocessed_attended : :class:`~tensor.TensorVariable` The preprocessed sequence. If ``None``, is computed by calling :meth:`preprocess`. attended_mask : :class:`~tensor.TensorVariable` A 0/1 mask specifying available data. 0 means that the corresponding sequence element is fake. \*\*states The states of the network. Returns weighted_averages : :class:`~theano.Variable` Linear combinations of sequence elements with the attention weights. weights : :class:`~theano.Variable` The attention weights. The first dimension is batch, the second is time.'
@application(outputs=['weighted_averages', 'weights']) def take_glimpses(self, attended, preprocessed_attended=None, attended_mask=None, **states):
energies = self.compute_energies(attended, preprocessed_attended, states) weights = self.compute_weights(energies, attended_mask) weighted_averages = self.compute_weighted_averages(weights, attended) return (weighted_averages, weights.T)
'Preprocess the sequence for computing attention weights. Parameters attended : :class:`~tensor.TensorVariable` The attended sequence, time is the 1-st dimension.'
@application(inputs=['attended'], outputs=['preprocessed_attended']) def preprocess(self, attended):
return self.attended_transformer.apply(attended)
'Compute next states taking glimpses on the way.'
@abstractmethod def apply(self, **kwargs):
pass
'Compute glimpses given the current states.'
@abstractmethod def take_glimpses(self, **kwargs):
pass
'Compute next states given current states and glimpses.'
@abstractmethod def compute_states(self, **kwargs):
pass
'Compute glimpses with the attention mechanism. A thin wrapper over `self.attention.take_glimpses`: takes care of choosing and renaming the necessary arguments. Parameters \*\*kwargs Must contain the attended, previous step states and glimpses. Can optionaly contain the attended mask and the preprocessed attended. Returns glimpses : list of :class:`~tensor.TensorVariable` Current step glimpses.'
@application def take_glimpses(self, **kwargs):
states = dict_subset(kwargs, self._state_names, pop=True) glimpses = dict_subset(kwargs, self._glimpse_names, pop=True) glimpses_needed = dict_subset(glimpses, self.previous_glimpses_needed) result = self.attention.take_glimpses(kwargs.pop(self.attended_name), kwargs.pop(self.preprocessed_attended_name, None), kwargs.pop(self.attended_mask_name, None), **dict_union(states, glimpses_needed)) return result
'Compute current states when glimpses have already been computed. Combines an application of the `distribute` that alter the sequential inputs of the wrapped transition and an application of the wrapped transition. All unknown keyword arguments go to the wrapped transition. Parameters \*\*kwargs Should contain everything what `self.transition` needs and in addition the current glimpses. Returns current_states : list of :class:`~tensor.TensorVariable` Current states computed by `self.transition`.'
@application def compute_states(self, **kwargs):
normal_inputs = [name for name in self._sequence_names if ('mask' not in name)] sequences = dict_subset(kwargs, normal_inputs, pop=True) glimpses = dict_subset(kwargs, self._glimpse_names, pop=True) if self.add_contexts: kwargs.pop(self.attended_name) kwargs.pop(self.attended_mask_name, None) sequences.update(self.distribute.apply(as_dict=True, **dict_subset(dict_union(sequences, glimpses), self.distribute.apply.inputs))) current_states = self.transition.apply(iterate=False, as_list=True, **dict_union(sequences, kwargs)) return current_states
'Process a sequence attending the attended context every step. In addition to the original sequence this method also requires its preprocessed version, the one computed by the `preprocess` method of the attention mechanism. Unknown keyword arguments are passed to the wrapped transition. Parameters \*\*kwargs Should contain current inputs, previous step states, contexts, the preprocessed attended context, previous step glimpses. Returns outputs : list of :class:`~tensor.TensorVariable` The current step states and glimpses.'
@recurrent def do_apply(self, **kwargs):
attended = kwargs[self.attended_name] preprocessed_attended = kwargs.pop(self.preprocessed_attended_name) attended_mask = kwargs.get(self.attended_mask_name) sequences = dict_subset(kwargs, self._sequence_names, pop=True, must_have=False) states = dict_subset(kwargs, self._state_names, pop=True) glimpses = dict_subset(kwargs, self._glimpse_names, pop=True) current_glimpses = self.take_glimpses(as_dict=True, **dict_union(states, glimpses, {self.attended_name: attended, self.attended_mask_name: attended_mask, self.preprocessed_attended_name: preprocessed_attended})) current_states = self.compute_states(as_list=True, **dict_union(sequences, states, current_glimpses, kwargs)) return (current_states + list(current_glimpses.values()))
'Preprocess a sequence attending the attended context at every step. Preprocesses the attended context and runs :meth:`do_apply`. See :meth:`do_apply` documentation for further information.'
@application def apply(self, **kwargs):
preprocessed_attended = self.attention.preprocess(kwargs[self.attended_name]) return self.do_apply(**dict_union(kwargs, {self.preprocessed_attended_name: preprocessed_attended}))
'Perform lookup. Parameters indices : :class:`~tensor.TensorVariable` The indices of interest. The dtype must be integer. Returns output : :class:`~tensor.TensorVariable` Representations for the indices of the query. Has :math:`k+1` dimensions, where :math:`k` is the number of dimensions of the `indices` parameter. The last dimension stands for the representation element.'
@application(inputs=['indices'], outputs=['output']) def apply(self, indices):
check_theano_variable(indices, None, ('int', 'uint')) output_shape = ([indices.shape[i] for i in range(indices.ndim)] + [self.dim]) return self.W[indices.flatten()].reshape(output_shape)
'Perform the convolution. Parameters input_ : :class:`~tensor.TensorVariable` A 4D tensor with the axes representing batch size, number of channels, image height, and image width. Returns output : :class:`~tensor.TensorVariable` A 4D tensor of filtered images (feature maps) with dimensions representing batch size, number of filters, feature map height, and feature map width. The height and width of the feature map depend on the border mode. For \'valid\' it is ``image_size - filter_size + 1`` while for \'full\' it is ``image_size + filter_size - 1``.'
@application(inputs=['input_'], outputs=['output']) def apply(self, input_):
if (self.image_size == (None, None)): input_shape = None else: input_shape = (self.batch_size, self.num_channels) input_shape += self.image_size output = self.conv2d_impl(input_, self.W, input_shape=input_shape, subsample=self.step, border_mode=self.border_mode, filter_shape=((self.num_filters, self.num_channels) + self.filter_size)) if getattr(self, 'use_bias', True): if self.tied_biases: output += self.b.dimshuffle('x', 0, 'x', 'x') else: output += self.b.dimshuffle('x', 0, 1, 2) return output
'Apply the pooling (subsampling) transformation. Parameters input_ : :class:`~tensor.TensorVariable` An tensor with dimension greater or equal to 2. The last two dimensions will be downsampled. For example, with images this means that the last two dimensions should represent the height and width of your image. Returns output : :class:`~tensor.TensorVariable` A tensor with the same number of dimensions as `input_`, but with the last two dimensions downsampled.'
@application(inputs=['input_'], outputs=['output']) def apply(self, input_):
output = pool_2d(input_, self.pooling_size, stride=self.step, mode=self.mode, pad=self.padding, ignore_border=self.ignore_border) return output
'Apply the linear transformation. Parameters input_ : :class:`~tensor.TensorVariable` The input on which to apply the transformation Returns output : :class:`~tensor.TensorVariable` The transformed input plus optional bias'
@application(inputs=['input_'], outputs=['output']) def apply(self, input_):
output = tensor.dot(input_, self.W) if getattr(self, 'use_bias', True): output += self.b return output
'Apply the linear transformation. Parameters input_ : :class:`~tensor.TensorVariable` The input on which to apply the transformation Returns output : :class:`~tensor.TensorVariable` The transformed input plus optional bias'
@application(inputs=['input_'], outputs=['output']) def apply(self, input_):
(b,) = self.parameters return (input_ + b)
'Apply the maxout transformation. Parameters input_ : :class:`~tensor.TensorVariable` The input on which to apply the transformation Returns output : :class:`~tensor.TensorVariable` The transformed input'
@application(inputs=['input_'], outputs=['output']) def apply(self, input_):
last_dim = input_.shape[(-1)] output_dim = (last_dim // self.num_pieces) new_shape = ([input_.shape[i] for i in range((input_.ndim - 1))] + [output_dim, self.num_pieces]) output = tensor.max(input_.reshape(new_shape, ndim=(input_.ndim + 1)), axis=input_.ndim) return output
'Apply the linear transformation followed by maxout. Parameters input_ : :class:`~tensor.TensorVariable` The input on which to apply the transformations Returns output : :class:`~tensor.TensorVariable` The transformed input'
@application(inputs=['input_'], outputs=['output']) def apply(self, input_):
pre_activation = self.linear.apply(input_) output = self.maxout.apply(pre_activation) return output
'Standard softmax. Parameters input_ : :class:`~theano.Variable` A matrix, each row contains unnormalized log-probabilities of a distribution. Returns output_ : :class:`~theano.Variable` A matrix with probabilities in each row for each distribution from `input_`.'
@application(inputs=['input_'], outputs=['output']) def apply(self, input_):
return tensor.nnet.softmax(input_)
'Normalize log-probabilities. Converts unnormalized log-probabilities (exponents of which do not sum to one) into actual log-probabilities (exponents of which sum to one). Parameters input_ : :class:`~theano.Variable` A matrix, each row contains unnormalized log-probabilities of a distribution. Returns output : :class:`~theano.Variable` A matrix with normalized log-probabilities in each row for each distribution from `input_`.'
@application(inputs=['input_'], outputs=['output']) def log_probabilities(self, input_):
shifted = (input_ - input_.max(axis=1, keepdims=True)) return (shifted - tensor.log(tensor.exp(shifted).sum(axis=1, keepdims=True)))
'Computationally stable cross-entropy for pre-softmax values. Parameters y : :class:`~tensor.TensorVariable` In the case of a matrix argument, each row represents a probabilility distribution. In the vector case, each element represents a distribution by specifying the position of 1 in a 1-hot vector. x : :class:`~tensor.TensorVariable` A matrix, each row contains unnormalized probabilities of a distribution. Returns cost : :class:`~tensor.TensorVariable` A vector of cross-entropies between respective distributions from y and x.'
@application(inputs=['y', 'x'], outputs=['output']) def categorical_cross_entropy(self, application_call, y, x):
x = self.log_probabilities(x) application_call.add_auxiliary_variable(x.copy(name='log_probabilities')) if (y.ndim == (x.ndim - 1)): indices = ((tensor.arange(y.shape[0]) * x.shape[1]) + y) cost = (- x.flatten()[indices]) elif (y.ndim == x.ndim): cost = (- (x * y).sum(axis=1)) else: raise TypeError('rank mismatch between x and y') return cost
'Calls :meth:`wrap` for all applications of the base class.'
def __call__(self, mcs, name, bases, namespace):
if (not (len(bases) == 1)): raise ValueError('can only wrap one class') (base,) = bases for attribute in base.__dict__.values(): if isinstance(attribute, Application): self.wrap(attribute, namespace) namespace['__doc__'] = _wrapped_class_doc.format(base.__name__, self.__class__.__name__)
'Wrap an application of the base brick. This method should be overriden to write into its `namespace` argument all required changes. Parameters mcs : type The metaclass. wrapped : :class:`~blocks.bricks.base.Application` The application to be wrapped. namespace : dict The namespace of the class being created.'
@abstractmethod def wrap(self, wrapped, namespace):
pass
'Return initial states for an application call. Default implementation assumes that the recurrent application method is called `apply`. It fetches the state names from `apply.states` and a returns a zero matrix for each of them. :class:`SimpleRecurrent`, :class:`LSTM` and :class:`GatedRecurrent` override this method with trainable initial states initialized with zeros. Parameters batch_size : int The batch size. \*args The positional arguments of the application call. \*\*kwargs The keyword arguments of the application call.'
@application def initial_states(self, batch_size, *args, **kwargs):
result = [] for state in self.apply.states: dim = self.get_dim(state) if (dim == 0): result.append(tensor.zeros((batch_size,))) else: result.append(tensor.zeros((batch_size, dim))) return result
'Apply the simple transition. Parameters inputs : :class:`~tensor.TensorVariable` The 2D inputs, in the shape (batch, features). states : :class:`~tensor.TensorVariable` The 2D states, in the shape (batch, features). mask : :class:`~tensor.TensorVariable` A 1D binary array in the shape (batch,) which is 1 if there is data available, 0 if not. Assumed to be 1-s only if not given.'
@recurrent(sequences=['inputs', 'mask'], states=['states'], outputs=['states'], contexts=[]) def apply(self, inputs, states, mask=None):
next_states = (inputs + tensor.dot(states, self.W)) next_states = self.children[0].apply(next_states) if mask: next_states = ((mask[:, None] * next_states) + ((1 - mask[:, None]) * states)) return next_states
'Apply the Long Short Term Memory transition. Parameters states : :class:`~tensor.TensorVariable` The 2 dimensional matrix of current states in the shape (batch_size, features). Required for `one_step` usage. cells : :class:`~tensor.TensorVariable` The 2 dimensional matrix of current cells in the shape (batch_size, features). Required for `one_step` usage. inputs : :class:`~tensor.TensorVariable` The 2 dimensional matrix of inputs in the shape (batch_size, features * 4). The `inputs` needs to be four times the dimension of the LSTM brick to insure each four gates receive different transformations of the input. See [Grav13]_ equations 7 to 10 for more details. The `inputs` are then split in this order: Input gates, forget gates, cells and output gates. mask : :class:`~tensor.TensorVariable` A 1D binary array in the shape (batch,) which is 1 if there is data available, 0 if not. Assumed to be 1-s only if not given. .. [Grav13] Graves, Alex, *Generating sequences with recurrent* *neural networks*, arXiv preprint arXiv:1308.0850 (2013). Returns states : :class:`~tensor.TensorVariable` Next states of the network. cells : :class:`~tensor.TensorVariable` Next cell activations of the network.'
@recurrent(sequences=['inputs', 'mask'], states=['states', 'cells'], contexts=[], outputs=['states', 'cells']) def apply(self, inputs, states, cells, mask=None):
def slice_last(x, no): return x[:, (no * self.dim):((no + 1) * self.dim)] activation = (tensor.dot(states, self.W_state) + inputs) in_gate = self.gate_activation.apply((slice_last(activation, 0) + (cells * self.W_cell_to_in))) forget_gate = self.gate_activation.apply((slice_last(activation, 1) + (cells * self.W_cell_to_forget))) next_cells = ((forget_gate * cells) + (in_gate * self.activation.apply(slice_last(activation, 2)))) out_gate = self.gate_activation.apply((slice_last(activation, 3) + (next_cells * self.W_cell_to_out))) next_states = (out_gate * self.activation.apply(next_cells)) if mask: next_states = ((mask[:, None] * next_states) + ((1 - mask[:, None]) * states)) next_cells = ((mask[:, None] * next_cells) + ((1 - mask[:, None]) * cells)) return (next_states, next_cells)
'Apply the gated recurrent transition. Parameters states : :class:`~tensor.TensorVariable` The 2 dimensional matrix of current states in the shape (batch_size, dim). Required for `one_step` usage. inputs : :class:`~tensor.TensorVariable` The 2 dimensional matrix of inputs in the shape (batch_size, dim) gate_inputs : :class:`~tensor.TensorVariable` The 2 dimensional matrix of inputs to the gates in the shape (batch_size, 2 * dim). mask : :class:`~tensor.TensorVariable` A 1D binary array in the shape (batch,) which is 1 if there is data available, 0 if not. Assumed to be 1-s only if not given. Returns output : :class:`~tensor.TensorVariable` Next states of the network.'
@recurrent(sequences=['mask', 'inputs', 'gate_inputs'], states=['states'], outputs=['states'], contexts=[]) def apply(self, inputs, gate_inputs, states, mask=None):
gate_values = self.gate_activation.apply((states.dot(self.state_to_gates) + gate_inputs)) update_values = gate_values[:, :self.dim] reset_values = gate_values[:, self.dim:] states_reset = (states * reset_values) next_states = self.activation.apply((states_reset.dot(self.state_to_state) + inputs)) next_states = ((next_states * update_values) + (states * (1 - update_values))) if mask: next_states = ((mask[:, None] * next_states) + ((1 - mask[:, None]) * states)) return next_states
'Applies forward and backward networks and concatenates outputs.'
@application def apply(self, *args, **kwargs):
forward = self.children[0].apply(as_list=True, *args, **kwargs) backward = [x[::(-1)] for x in self.children[1].apply(reverse=True, as_list=True, *args, **kwargs)] return [tensor.concatenate([f, b], axis=2) for (f, b) in equizip(forward, backward)]
'Apply the stack of transitions. This is the undecorated implementation of the apply method. A method with an @apply decoration should call this method with `iterate=True` to indicate that the iteration over all steps should be done internally by this method. A method with a `@recurrent` method should have `iterate=False` (or unset) to indicate that the iteration over all steps is done externally.'
def do_apply(self, *args, **kwargs):
nargs = len(args) args_names = (self.apply.sequences + self.apply.contexts) assert (nargs <= len(args_names)) kwargs.update(zip(args_names[:nargs], args)) if kwargs.get('reverse', False): raise NotImplementedError results = [] last_states = None for (level, transition) in enumerate(self.transitions): normal_inputs = self.normal_inputs(level) layer_kwargs = dict() if ((level == 0) or self.skip_connections): for name in normal_inputs: layer_kwargs[name] = kwargs.get(self.suffix(name, level)) if ('mask' in transition.apply.sequences): layer_kwargs['mask'] = kwargs.get('mask') for name in transition.apply.states: layer_kwargs[name] = kwargs.get(self.suffix(name, level)) for name in transition.apply.contexts: layer_kwargs[name] = kwargs.get(name) if (level > 0): inputs = self.forks[(level - 1)].apply(last_states, as_list=True) for (name, input_) in zip(normal_inputs, inputs): if layer_kwargs.get(name): layer_kwargs[name] += input_ else: layer_kwargs[name] = input_ for k in (set(kwargs.keys()) - self.transition_args): layer_kwargs[k] = kwargs[k] result = transition.apply(as_list=True, **layer_kwargs) results.extend(result) state_index = transition.apply.outputs.index(self.states_name) last_states = result[state_index] if kwargs.get('return_initial_states', False): last_states = last_states[1:] return tuple(results)
'Apply the stack of transitions. Parameters low_memory : bool Use the slow, but also memory efficient, implementation of this code. \*args : :class:`~tensor.TensorVariable`, optional Positional argumentes in the order in which they appear in `self.apply.sequences` followed by `self.apply.contexts`. \*\*kwargs : :class:`~tensor.TensorVariable` Named argument defined in `self.apply.sequences`, `self.apply.states` or `self.apply.contexts` Returns outputs : (list of) :class:`~tensor.TensorVariable` The outputs of all transitions as defined in `self.apply.outputs` See Also See docstring of this class for arguments appearing in the lists `self.apply.sequences`, `self.apply.states`, `self.apply.contexts`. See :func:`~blocks.brick.recurrent.recurrent` : for all other parameters such as `iterate` and `return_initial_states` however `reverse` is currently not implemented.'
@application def apply(self, *args, **kwargs):
if kwargs.pop('low_memory', False): return self.low_memory_apply(*args, **kwargs) return self.do_apply(*args, **kwargs)
'Returns the average cost over the minibatch. The cost is computed by averaging the sum of per token costs for each sequence over the minibatch. .. warning:: Note that, the computed cost can be problematic when batches consist of vastly different sequence lengths. Parameters outputs : :class:`~tensor.TensorVariable` The 3(2) dimensional tensor containing output sequences. The axis 0 must stand for time, the axis 1 for the position in the batch. mask : :class:`~tensor.TensorVariable` The binary matrix identifying fake outputs. Returns cost : :class:`~tensor.Variable` Theano variable for cost, computed by summing over timesteps and then averaging over the minibatch. Notes The contexts are expected as keyword arguments. Adds average cost per sequence element `AUXILIARY` variable to the computational graph with name ``per_sequence_element``.'
@application def cost(self, application_call, outputs, mask=None, **kwargs):
costs = self.cost_matrix(outputs, mask=mask, **kwargs) cost = tensor.mean(costs.sum(axis=0)) add_role(cost, COST) application_call.add_auxiliary_variable(((costs.sum() / mask.sum()) if (mask is not None) else costs.mean()), name='per_sequence_element') return cost
'Returns generation costs for output sequences. See Also :meth:`cost` : Scalar cost.'
@application def cost_matrix(self, application_call, outputs, mask=None, **kwargs):
batch_size = outputs.shape[1] states = dict_subset(kwargs, self._state_names, must_have=False) contexts = dict_subset(kwargs, self._context_names, must_have=False) feedback = self.readout.feedback(outputs) inputs = self.fork.apply(feedback, as_dict=True) results = self.transition.apply(mask=mask, return_initial_states=True, as_dict=True, **dict_union(inputs, states, contexts)) states = {name: results[name][:(-1)] for name in self._state_names} glimpses = {name: results[name][1:] for name in self._glimpse_names} feedback = tensor.roll(feedback, 1, 0) feedback = tensor.set_subtensor(feedback[0], self.readout.feedback(self.readout.initial_outputs(batch_size))) readouts = self.readout.readout(feedback=feedback, **dict_union(states, glimpses, contexts)) costs = self.readout.cost(readouts, outputs) if (mask is not None): costs *= mask for (name, variable) in (list(glimpses.items()) + list(states.items())): application_call.add_auxiliary_variable(variable.copy(), name=name) for name in (self._state_names + self._glimpse_names): application_call.add_auxiliary_variable(results[name][(-1)].copy(), name=(name + '_final_value')) return costs
'A sequence generation step. Parameters outputs : :class:`~tensor.TensorVariable` The outputs from the previous step. Notes The contexts, previous states and glimpses are expected as keyword arguments.'
@recurrent def generate(self, outputs, **kwargs):
states = dict_subset(kwargs, self._state_names) contexts = dict_subset(kwargs, self._context_names, must_have=False) glimpses = dict_subset(kwargs, self._glimpse_names) next_glimpses = self.transition.take_glimpses(as_dict=True, **dict_union(states, glimpses, contexts)) next_readouts = self.readout.readout(feedback=self.readout.feedback(outputs), **dict_union(states, next_glimpses, contexts)) next_outputs = self.readout.emit(next_readouts) next_costs = self.readout.cost(next_readouts, next_outputs) next_feedback = self.readout.feedback(next_outputs) next_inputs = (self.fork.apply(next_feedback, as_dict=True) if self.fork else {'feedback': next_feedback}) next_states = self.transition.compute_states(as_list=True, **dict_union(next_inputs, states, next_glimpses, contexts)) return (((next_states + [next_outputs]) + list(next_glimpses.values())) + [next_costs])
'Produce outputs from readouts. Parameters readouts : :class:`~theano.Variable` Readouts produced by the :meth:`readout` method of a `(batch_size, readout_dim)` shape.'
@abstractmethod def emit(self, readouts):
pass
'Compute generation cost of outputs given readouts. Parameters readouts : :class:`~theano.Variable` Readouts produced by the :meth:`readout` method of a `(..., readout dim)` shape. outputs : :class:`~theano.Variable` Outputs whose cost should be computed. Should have as many or one less dimensions compared to `readout`. If readout has `n` dimensions, first `n - 1` dimensions of `outputs` should match with those of `readouts`.'
@abstractmethod def cost(self, readouts, outputs):
pass
'Compute initial outputs for the generator\'s first step. In the notation from the :class:`BaseSequenceGenerator` documentation this method should compute :math:`y_0`.'
@abstractmethod def initial_outputs(self, batch_size):
pass
'Compute the readout vector from states, glimpses, etc. Parameters \*\*kwargs: dict Contains sequence generator states, glimpses, contexts and feedback from the previous outputs.'
@abstractmethod def readout(self, **kwargs):
pass
'Feeds outputs back to be used as inputs of the transition.'
@abstractmethod def feedback(self, outputs):
pass
'Implements the respective method of :class:`Readout`.'
@abstractmethod def emit(self, readouts):
pass
'Implements the respective method of :class:`Readout`.'
@abstractmethod def cost(self, readouts, outputs):
pass
'Implements the respective method of :class:`Readout`.'
@abstractmethod def initial_outputs(self, batch_size):
pass
'Implements the respective method of :class:`Readout`.'
@abstractmethod def feedback(self, outputs):
pass
'Quick access to the (data stream, epoch iterator) pair.'
@property def iteration_state(self):
return (self.data_stream, self.epoch_iterator)
'A shortcut for `self.log.status`.'
@property def status(self):
return self.log.status
'Starts the main loop. The main loop ends when a training extension makes a `training_finish_requested` record in the log.'
def run(self):
logging.basicConfig() self.profile.current = [] if hasattr(self._model, 'check_sanity'): self._model.check_sanity(self.algorithm) with change_recursion_limit(config.recursion_limit): self.original_sigint_handler = signal.signal(signal.SIGINT, self._handle_epoch_interrupt) self.original_sigterm_handler = signal.signal(signal.SIGTERM, self._handle_batch_interrupt) try: logger.info('Entered the main loop') if (not self.status['training_started']): for extension in self.extensions: extension.main_loop = self self._run_extensions('before_training') with Timer('initialization', self.profile): self.algorithm.initialize() self.status['training_started'] = True if (self.log.status['iterations_done'] > 0): self.log.resume() self._run_extensions('on_resumption') self.status['epoch_interrupt_received'] = False self.status['batch_interrupt_received'] = False with Timer('training', self.profile): while self._run_epoch(): pass except TrainingFinish: self.log.current_row['training_finished'] = True except Exception as e: self._restore_signal_handlers() self.log.current_row['got_exception'] = traceback.format_exc() logger.error(('Error occured during training.' + error_message)) try: self._run_extensions('on_error', e) except Exception: logger.error(traceback.format_exc()) logger.error(('Error occured when running extensions.' + error_in_error_handling_message)) reraise_as(e) finally: self._restore_signal_handlers() if self.log.current_row.get('training_finished', False): self._run_extensions('after_training') if config.profile: self.profile.report()
'Find an extension with a given name. Parameters name : str The name of the extension looked for. Notes Will crash if there no or several extension found.'
def find_extension(self, name):
return unpack([extension for extension in self.extensions if (extension.name == name)], singleton=True)
'Checks whether the current training should be terminated. Parameters level : {\'epoch\', \'batch\'} The level at which this check was performed. In some cases, we only want to quit after completing the remained of the epoch.'
def _check_finish_training(self, level):
if (self.log.current_row.get('training_finish_requested', False) or self.status.get('batch_interrupt_received', False)): raise TrainingFinish if ((level == 'epoch') and self.status.get('epoch_interrupt_received', False)): raise TrainingFinish
'Print a report of timing information to standard output. Parameters f : object, optional An object with a ``write`` method that accepts string inputs. Can be a file object, ``sys.stdout``, etc. Defaults to ``sys.stderr``.'
def report(self, f=sys.stderr):
total = sum((v for (k, v) in self.total.items() if (len(k) == 1))) def print_report(keys, level=0): subtotal = 0 for key in keys: if (len(key) > (level + 1)): continue subtotal += self.total[key] section = ' '.join(key[(-1)].split('_')) section = (section[0].upper() + section[1:]) print('{:30}{:15.2f}{:15.2%}'.format(((level * ' ') + section), self.total[key], (self.total[key] / total)), file=f) children = [k for k in keys if ((k[level] == key[level]) and (len(k) > (level + 1)))] child_total = print_report(children, (level + 1)) if children: print('{:30}{:15.2f}{:15.2%}'.format((((level + 1) * ' ') + 'Other'), (self.total[key] - child_total), ((self.total[key] - child_total) / total)), file=f) return subtotal print('{:30}{:>15}{:>15}'.format('Section', 'Time', '% of total'), file=f) print(('-' * 60), file=f) if total: print_report(self.order.keys()) else: print('No profile information collected.', file=f)
'The operation to perform when an item is inserted/appended.'
def _setitem(self, key, value):
pass
'The operation to perform when an item is deleted.'
def _delitem(self, key):
pass
'Parameters weight_init_std : 重みの暙準偏差を指定e.g. 0.01 \'relu\'たたは\'he\'を指定した堎合は「Heの初期倀」を蚭定 \'sigmoid\'たたは\'xavier\'を指定した堎合は「Xavierの初期倀」を蚭定'
def __init_weight(self, weight_init_std):
all_size_list = (([self.input_size] + self.hidden_size_list) + [self.output_size]) for idx in range(1, len(all_size_list)): scale = weight_init_std if (str(weight_init_std).lower() in ('relu', 'he')): scale = np.sqrt((2.0 / all_size_list[(idx - 1)])) elif (str(weight_init_std).lower() in ('sigmoid', 'xavier')): scale = np.sqrt((1.0 / all_size_list[(idx - 1)])) self.params[('W' + str(idx))] = (scale * np.random.randn(all_size_list[(idx - 1)], all_size_list[idx])) self.params[('b' + str(idx))] = np.zeros(all_size_list[idx])
'Parameters x : 入力デヌタ t : 教垫ラベル Returns'
def loss(self, x, t):
y = self.predict(x) weight_decay = 0 for idx in range(1, (self.hidden_layer_num + 2)): W = self.params[('W' + str(idx))] weight_decay += ((0.5 * self.weight_decay_lambda) * np.sum((W ** 2))) return (self.last_layer.forward(y, t) + weight_decay)
'Parameters x : 入力デヌタ t : 教垫ラベル Returns grads[\'W1\']、grads[\'W2\']、...は各局の重み grads[\'b1\']、grads[\'b2\']、...は各局のバむアス'
def numerical_gradient(self, x, t):
loss_W = (lambda W: self.loss(x, t)) grads = {} for idx in range(1, (self.hidden_layer_num + 2)): grads[('W' + str(idx))] = numerical_gradient(loss_W, self.params[('W' + str(idx))]) grads[('b' + str(idx))] = numerical_gradient(loss_W, self.params[('b' + str(idx))]) return grads
'Parameters x : 入力デヌタ t : 教垫ラベル Returns grads[\'W1\']、grads[\'W2\']、...は各局の重み grads[\'b1\']、grads[\'b2\']、...は各局のバむアス'
def gradient(self, x, t):
self.loss(x, t) dout = 1 dout = self.last_layer.backward(dout) layers = list(self.layers.values()) layers.reverse() for layer in layers: dout = layer.backward(dout) grads = {} for idx in range(1, (self.hidden_layer_num + 2)): grads[('W' + str(idx))] = (self.layers[('Affine' + str(idx))].dW + (self.weight_decay_lambda * self.layers[('Affine' + str(idx))].W)) grads[('b' + str(idx))] = self.layers[('Affine' + str(idx))].db return grads