code
stringlengths 66
870k
| docstring
stringlengths 19
26.7k
| func_name
stringlengths 1
138
| language
stringclasses 1
value | repo
stringlengths 7
68
| path
stringlengths 5
324
| url
stringlengths 46
389
| license
stringclasses 7
values |
---|---|---|---|---|---|---|---|
def fit(self, paths):
"""Fit regressor based on paths.
Args:
paths (list[dict]): Sample paths.
"""
xs = np.concatenate([p['observations'] for p in paths])
if not isinstance(xs, np.ndarray) or len(xs.shape) > 2:
xs = self._env_spec.observation_space.flatten_n(xs)
ys = np.concatenate([p['returns'] for p in paths])
ys = ys.reshape((-1, 1))
if self._subsample_factor < 1:
num_samples_tot = xs.shape[0]
idx = np.random.randint(
0, num_samples_tot,
int(num_samples_tot * self._subsample_factor))
xs, ys = xs[idx], ys[idx]
if self._normalize_inputs:
# recompute normalizing constants for inputs
self._x_mean.load(np.mean(xs, axis=0, keepdims=True))
self._x_std.load(np.std(xs, axis=0, keepdims=True) + 1e-8)
self._old_network.x_mean.load(np.mean(xs, axis=0, keepdims=True))
self._old_network.x_std.load(
np.std(xs, axis=0, keepdims=True) + 1e-8)
if self._normalize_outputs:
# recompute normalizing constants for outputs
self._y_mean.load(np.mean(ys, axis=0, keepdims=True))
self._y_std.load(np.std(ys, axis=0, keepdims=True) + 1e-8)
self._old_network.y_mean.load(np.mean(ys, axis=0, keepdims=True))
self._old_network.y_std.load(
np.std(ys, axis=0, keepdims=True) + 1e-8)
inputs = [xs, ys]
loss_before = self._optimizer.loss(inputs)
tabular.record('{}/LossBefore'.format(self._name), loss_before)
self._optimizer.optimize(inputs)
loss_after = self._optimizer.loss(inputs)
tabular.record('{}/LossAfter'.format(self._name), loss_after)
if self._use_trust_region:
tabular.record('{}/MeanKL'.format(self._name),
self._optimizer.constraint_val(inputs))
tabular.record('{}/dLoss'.format(self._name), loss_before - loss_after)
self._old_model.parameters = self.parameters
|
Fit regressor based on paths.
Args:
paths (list[dict]): Sample paths.
|
fit
|
python
|
rlworkgroup/garage
|
src/garage/tf/baselines/gaussian_mlp_baseline.py
|
https://github.com/rlworkgroup/garage/blob/master/src/garage/tf/baselines/gaussian_mlp_baseline.py
|
MIT
|
def predict(self, paths):
"""Predict value based on paths.
Args:
paths (list[dict]): Sample paths.
Returns:
numpy.ndarray: Predicted value.
"""
xs = paths['observations']
if not isinstance(xs, np.ndarray) or len(xs.shape) > 2:
xs = self._env_spec.observation_space.flatten_n(xs)
return self._f_predict(xs).flatten()
|
Predict value based on paths.
Args:
paths (list[dict]): Sample paths.
Returns:
numpy.ndarray: Predicted value.
|
predict
|
python
|
rlworkgroup/garage
|
src/garage/tf/baselines/gaussian_mlp_baseline.py
|
https://github.com/rlworkgroup/garage/blob/master/src/garage/tf/baselines/gaussian_mlp_baseline.py
|
MIT
|
def __getstate__(self):
"""Object.__getstate__.
Returns:
dict: The state to be pickled for the instance.
"""
new_dict = super().__getstate__()
del new_dict['_f_predict']
del new_dict['_x_mean']
del new_dict['_x_std']
del new_dict['_y_mean']
del new_dict['_y_std']
del new_dict['_old_network']
return new_dict
|
Object.__getstate__.
Returns:
dict: The state to be pickled for the instance.
|
__getstate__
|
python
|
rlworkgroup/garage
|
src/garage/tf/baselines/gaussian_mlp_baseline.py
|
https://github.com/rlworkgroup/garage/blob/master/src/garage/tf/baselines/gaussian_mlp_baseline.py
|
MIT
|
def network_output_spec(self):
"""Network output spec.
Return:
list[str]: List of key(str) for the network outputs.
"""
return [
'normalized_dist', 'normalized_mean', 'normalized_log_std', 'dist',
'mean', 'log_std', 'x_mean', 'x_std', 'y_mean', 'y_std'
]
|
Network output spec.
Return:
list[str]: List of key(str) for the network outputs.
|
network_output_spec
|
python
|
rlworkgroup/garage
|
src/garage/tf/baselines/gaussian_mlp_baseline_model.py
|
https://github.com/rlworkgroup/garage/blob/master/src/garage/tf/baselines/gaussian_mlp_baseline_model.py
|
MIT
|
def _build(self, state_input, name=None):
"""Build model given input placeholder(s).
Args:
state_input (tf.Tensor): Place holder for state input.
name (str): Inner model name, also the variable scope of the
inner model, if exist. One example is
garage.tf.models.Sequential.
Return:
tfp.distributions.MultivariateNormalDiag: Normlizaed distribution.
tf.Tensor: Normalized mean.
tf.Tensor: Normalized log_std.
tfp.distributions.MultivariateNormalDiag: Vanilla distribution.
tf.Tensor: Vanilla mean.
tf.Tensor: Vanilla log_std.
tf.Tensor: Mean for data.
tf.Tensor: log_std for data.
tf.Tensor: Mean for label.
tf.Tensor: log_std for label.
"""
with tf.compat.v1.variable_scope('normalized_vars'):
x_mean_var = tf.compat.v1.get_variable(
name='x_mean',
shape=(1, ) + self._input_shape,
dtype=np.float32,
initializer=tf.zeros_initializer(),
trainable=False)
x_std_var = tf.compat.v1.get_variable(
name='x_std_var',
shape=(1, ) + self._input_shape,
dtype=np.float32,
initializer=tf.ones_initializer(),
trainable=False)
y_mean_var = tf.compat.v1.get_variable(
name='y_mean_var',
shape=(1, self._output_dim),
dtype=np.float32,
initializer=tf.zeros_initializer(),
trainable=False)
y_std_var = tf.compat.v1.get_variable(
name='y_std_var',
shape=(1, self._output_dim),
dtype=np.float32,
initializer=tf.ones_initializer(),
trainable=False)
normalized_xs_var = (state_input - x_mean_var) / x_std_var
_, normalized_dist_mean, normalized_dist_log_std = super()._build(
normalized_xs_var)
# Since regressor expects [N, *dims], we need to squeeze the extra
# dimension
normalized_dist_log_std = tf.squeeze(normalized_dist_log_std, 1)
with tf.name_scope('mean_network'):
means_var = normalized_dist_mean * y_std_var + y_mean_var
with tf.name_scope('std_network'):
log_stds_var = normalized_dist_log_std + tf.math.log(y_std_var)
normalized_dist = tfp.distributions.MultivariateNormalDiag(
loc=normalized_dist_mean,
scale_diag=tf.exp(normalized_dist_log_std))
vanilla_dist = tfp.distributions.MultivariateNormalDiag(
loc=means_var, scale_diag=tf.exp(log_stds_var))
return (normalized_dist, normalized_dist_mean, normalized_dist_log_std,
vanilla_dist, means_var, log_stds_var, x_mean_var, x_std_var,
y_mean_var, y_std_var)
|
Build model given input placeholder(s).
Args:
state_input (tf.Tensor): Place holder for state input.
name (str): Inner model name, also the variable scope of the
inner model, if exist. One example is
garage.tf.models.Sequential.
Return:
tfp.distributions.MultivariateNormalDiag: Normlizaed distribution.
tf.Tensor: Normalized mean.
tf.Tensor: Normalized log_std.
tfp.distributions.MultivariateNormalDiag: Vanilla distribution.
tf.Tensor: Vanilla mean.
tf.Tensor: Vanilla log_std.
tf.Tensor: Mean for data.
tf.Tensor: log_std for data.
tf.Tensor: Mean for label.
tf.Tensor: log_std for label.
|
_build
|
python
|
rlworkgroup/garage
|
src/garage/tf/baselines/gaussian_mlp_baseline_model.py
|
https://github.com/rlworkgroup/garage/blob/master/src/garage/tf/baselines/gaussian_mlp_baseline_model.py
|
MIT
|
def get_latent(self, input_value):
"""Get a sample of embedding for the given input.
Args:
input_value (numpy.ndarray): Tensor to encode.
Returns:
numpy.ndarray: An embedding sampled from embedding distribution.
dict: Embedding distribution information.
Note:
It returns an embedding and a dict, with keys
- mean (numpy.ndarray): Mean of the distribution.
- log_std (numpy.ndarray): Log standard deviation of the
distribution.
"""
|
Get a sample of embedding for the given input.
Args:
input_value (numpy.ndarray): Tensor to encode.
Returns:
numpy.ndarray: An embedding sampled from embedding distribution.
dict: Embedding distribution information.
Note:
It returns an embedding and a dict, with keys
- mean (numpy.ndarray): Mean of the distribution.
- log_std (numpy.ndarray): Log standard deviation of the
distribution.
|
get_latent
|
python
|
rlworkgroup/garage
|
src/garage/tf/embeddings/encoder.py
|
https://github.com/rlworkgroup/garage/blob/master/src/garage/tf/embeddings/encoder.py
|
MIT
|
def get_latents(self, input_values):
"""Get samples of embedding for the given inputs.
Args:
input_values (numpy.ndarray): Tensors to encode.
Returns:
numpy.ndarray: Embeddings sampled from embedding distribution.
dict: Embedding distribution information.
Note:
It returns an embedding and a dict, with keys
- mean (list[numpy.ndarray]): Means of the distribution.
- log_std (list[numpy.ndarray]): Log standard deviations of the
distribution.
"""
|
Get samples of embedding for the given inputs.
Args:
input_values (numpy.ndarray): Tensors to encode.
Returns:
numpy.ndarray: Embeddings sampled from embedding distribution.
dict: Embedding distribution information.
Note:
It returns an embedding and a dict, with keys
- mean (list[numpy.ndarray]): Means of the distribution.
- log_std (list[numpy.ndarray]): Log standard deviations of the
distribution.
|
get_latents
|
python
|
rlworkgroup/garage
|
src/garage/tf/embeddings/encoder.py
|
https://github.com/rlworkgroup/garage/blob/master/src/garage/tf/embeddings/encoder.py
|
MIT
|
def build(self, embedding_input, name=None):
"""Build encoder.
After buil, self.distribution is a Gaussian distribution conitioned
on embedding_input.
Args:
embedding_input (tf.Tensor) : Embedding input.
name (str): Name of the model, which is also the name scope.
"""
|
Build encoder.
After buil, self.distribution is a Gaussian distribution conitioned
on embedding_input.
Args:
embedding_input (tf.Tensor) : Embedding input.
name (str): Name of the model, which is also the name scope.
|
build
|
python
|
rlworkgroup/garage
|
src/garage/tf/embeddings/encoder.py
|
https://github.com/rlworkgroup/garage/blob/master/src/garage/tf/embeddings/encoder.py
|
MIT
|
def build(self, embedding_input, name=None):
"""Build encoder.
Args:
embedding_input (tf.Tensor) : Embedding input.
name (str): Name of the model, which is also the name scope.
Returns:
tfp.distributions.MultivariateNormalDiag: Distribution.
tf.tensor: Mean.
tf.Tensor: Log of standard deviation.
"""
with tf.compat.v1.variable_scope(self._variable_scope):
return self.model.build(embedding_input, name=name)
|
Build encoder.
Args:
embedding_input (tf.Tensor) : Embedding input.
name (str): Name of the model, which is also the name scope.
Returns:
tfp.distributions.MultivariateNormalDiag: Distribution.
tf.tensor: Mean.
tf.Tensor: Log of standard deviation.
|
build
|
python
|
rlworkgroup/garage
|
src/garage/tf/embeddings/gaussian_mlp_encoder.py
|
https://github.com/rlworkgroup/garage/blob/master/src/garage/tf/embeddings/gaussian_mlp_encoder.py
|
MIT
|
def get_latent(self, input_value):
"""Get a sample of embedding for the given input.
Args:
input_value (numpy.ndarray): Tensor to encode.
Returns:
numpy.ndarray: An embedding sampled from embedding distribution.
dict: Embedding distribution information.
Note:
It returns an embedding and a dict, with keys
- mean (numpy.ndarray): Mean of the distribution.
- log_std (numpy.ndarray): Log standard deviation of the
distribution.
"""
flat_input = self._embedding_spec.input_space.flatten(input_value)
sample, mean, log_std = self._f_dist(np.expand_dims([flat_input], 1))
sample = self._embedding_spec.output_space.unflatten(
np.squeeze(sample, 1)[0])
mean = self._embedding_spec.output_space.unflatten(
np.squeeze(mean, 1)[0])
log_std = self._embedding_spec.output_space.unflatten(
np.squeeze(log_std, 1)[0])
return sample, dict(mean=mean, log_std=log_std)
|
Get a sample of embedding for the given input.
Args:
input_value (numpy.ndarray): Tensor to encode.
Returns:
numpy.ndarray: An embedding sampled from embedding distribution.
dict: Embedding distribution information.
Note:
It returns an embedding and a dict, with keys
- mean (numpy.ndarray): Mean of the distribution.
- log_std (numpy.ndarray): Log standard deviation of the
distribution.
|
get_latent
|
python
|
rlworkgroup/garage
|
src/garage/tf/embeddings/gaussian_mlp_encoder.py
|
https://github.com/rlworkgroup/garage/blob/master/src/garage/tf/embeddings/gaussian_mlp_encoder.py
|
MIT
|
def get_latents(self, input_values):
"""Get samples of embedding for the given inputs.
Args:
input_values (numpy.ndarray): Tensors to encode.
Returns:
numpy.ndarray: Embeddings sampled from embedding distribution.
dict: Embedding distribution information.
Note:
It returns an embedding and a dict, with keys
- mean (list[numpy.ndarray]): Means of the distribution.
- log_std (list[numpy.ndarray]): Log standard deviations of the
distribution.
"""
flat_input = self._embedding_spec.input_space.flatten_n(input_values)
samples, means, log_stds = self._f_dist(np.expand_dims(flat_input, 1))
samples = self._embedding_spec.output_space.unflatten_n(
np.squeeze(samples, 1))
means = self._embedding_spec.output_space.unflatten_n(
np.squeeze(means, 1))
log_stds = self._embedding_spec.output_space.unflatten_n(
np.squeeze(log_stds, 1))
return samples, dict(mean=means, log_std=log_stds)
|
Get samples of embedding for the given inputs.
Args:
input_values (numpy.ndarray): Tensors to encode.
Returns:
numpy.ndarray: Embeddings sampled from embedding distribution.
dict: Embedding distribution information.
Note:
It returns an embedding and a dict, with keys
- mean (list[numpy.ndarray]): Means of the distribution.
- log_std (list[numpy.ndarray]): Log standard deviations of the
distribution.
|
get_latents
|
python
|
rlworkgroup/garage
|
src/garage/tf/embeddings/gaussian_mlp_encoder.py
|
https://github.com/rlworkgroup/garage/blob/master/src/garage/tf/embeddings/gaussian_mlp_encoder.py
|
MIT
|
def __getstate__(self):
"""Object.__getstate__.
Returns:
dict: the state to be pickled for the instance.
"""
new_dict = super().__getstate__()
del new_dict['_f_dist']
del new_dict['_network']
return new_dict
|
Object.__getstate__.
Returns:
dict: the state to be pickled for the instance.
|
__getstate__
|
python
|
rlworkgroup/garage
|
src/garage/tf/embeddings/gaussian_mlp_encoder.py
|
https://github.com/rlworkgroup/garage/blob/master/src/garage/tf/embeddings/gaussian_mlp_encoder.py
|
MIT
|
def _build(self, state_input, name=None):
"""Build model.
Args:
state_input (tf.Tensor): Observation inputs.
name (str): Inner model name, also the variable scope of the
inner model, if exist. One example is
garage.tf.models.Sequential.
Returns:
tfp.distributions.OneHotCategorical: Policy distribution.
"""
if self._is_image:
augmented_state_input = tf.cast(state_input, tf.float32)
augmented_state_input /= 255.0
else:
augmented_state_input = state_input
time_dim = tf.shape(augmented_state_input)[1]
dim = augmented_state_input.get_shape()[2:].as_list()
augmented_state_input = tf.reshape(augmented_state_input, [-1, *dim])
cnn_output = self._cnn_model.build(augmented_state_input,
name=name).outputs
dim = cnn_output.get_shape()[-1]
cnn_output = tf.reshape(cnn_output, [-1, time_dim, dim])
mlp_output = self._mlp_model.build(cnn_output, name=name).dist
return mlp_output
|
Build model.
Args:
state_input (tf.Tensor): Observation inputs.
name (str): Inner model name, also the variable scope of the
inner model, if exist. One example is
garage.tf.models.Sequential.
Returns:
tfp.distributions.OneHotCategorical: Policy distribution.
|
_build
|
python
|
rlworkgroup/garage
|
src/garage/tf/models/categorical_cnn_model.py
|
https://github.com/rlworkgroup/garage/blob/master/src/garage/tf/models/categorical_cnn_model.py
|
MIT
|
def _build(self, state_input, step_input, step_hidden, name=None):
"""Build model.
Args:
state_input (tf.Tensor): Full observation input, with shape
:math:`(N, T, S^*)`.
step_input (tf.Tensor): Step observation input, with shape
:math:`(N, S^*)`.
step_hidden (tf.Tensor): Hidden state for step, with shape
:math:`(N, S^*)`.
name (str): Inner model name, also the variable scope of the
inner model, if exist. One example is
garage.tf.models.Sequential.
Returns:
tfp.distributions.OneHotCategorical: Policy distribution.
tf.Tensor: Step output, with shape :math:`(N, S^*)`.
tf.Tensor: Step hidden state, with shape :math:`(N, S^*)`.
tf.Tensor: Initial hidden state , used to reset the hidden state
when policy resets. Shape: :math:`(S^*)`.
"""
outputs, step_output, step_hidden, init_hidden = super()._build(
state_input, step_input, step_hidden, name=name)
dist = tfp.distributions.OneHotCategorical(probs=outputs)
return dist, step_output, step_hidden, init_hidden
|
Build model.
Args:
state_input (tf.Tensor): Full observation input, with shape
:math:`(N, T, S^*)`.
step_input (tf.Tensor): Step observation input, with shape
:math:`(N, S^*)`.
step_hidden (tf.Tensor): Hidden state for step, with shape
:math:`(N, S^*)`.
name (str): Inner model name, also the variable scope of the
inner model, if exist. One example is
garage.tf.models.Sequential.
Returns:
tfp.distributions.OneHotCategorical: Policy distribution.
tf.Tensor: Step output, with shape :math:`(N, S^*)`.
tf.Tensor: Step hidden state, with shape :math:`(N, S^*)`.
tf.Tensor: Initial hidden state , used to reset the hidden state
when policy resets. Shape: :math:`(S^*)`.
|
_build
|
python
|
rlworkgroup/garage
|
src/garage/tf/models/categorical_gru_model.py
|
https://github.com/rlworkgroup/garage/blob/master/src/garage/tf/models/categorical_gru_model.py
|
MIT
|
def network_output_spec(self):
"""Network output spec.
Returns:
list[str]: Name of the model outputs, in order.
"""
return [
'dist', 'step_output', 'step_hidden', 'step_cell', 'init_hidden',
'init_cell'
]
|
Network output spec.
Returns:
list[str]: Name of the model outputs, in order.
|
network_output_spec
|
python
|
rlworkgroup/garage
|
src/garage/tf/models/categorical_lstm_model.py
|
https://github.com/rlworkgroup/garage/blob/master/src/garage/tf/models/categorical_lstm_model.py
|
MIT
|
def _build(self,
state_input,
step_input,
step_hidden,
step_cell,
name=None):
"""Build model.
Args:
state_input (tf.Tensor): Entire time-series observation input,
with shape :math:`(N, T, S^*)`.
step_input (tf.Tensor): Single timestep observation input,
with shape :math:`(N, S^*)`.
step_hidden (tf.Tensor): Hidden state for step, with shape
:math:`(N, S^*)`.
step_cell (tf.Tensor): Cell state for step, with shape
:math:`(N, S^*)`.
name (str): Inner model name, also the variable scope of the
inner model, if exist. One example is
garage.tf.models.Sequential.
Returns:
tfp.distributions.OneHotCategorical: Policy distribution.
tf.Tensor: Step output, with shape :math:`(N, S^*)`
tf.Tensor: Step hidden state, with shape :math:`(N, S^*)`
tf.Tensor: Step cell state, with shape :math:`(N, S^*)`
tf.Tensor: Initial hidden state, used to reset the hidden state
when policy resets. Shape: :math:`(S^*)`
tf.Tensor: Initial cell state, used to reset the cell state
when policy resets. Shape: :math:`(S^*)`
"""
(outputs, step_output, step_hidden, step_cell, init_hidden,
init_cell) = super()._build(state_input,
step_input,
step_hidden,
step_cell,
name=name)
dist = tfp.distributions.OneHotCategorical(probs=outputs)
return (dist, step_output, step_hidden, step_cell, init_hidden,
init_cell)
|
Build model.
Args:
state_input (tf.Tensor): Entire time-series observation input,
with shape :math:`(N, T, S^*)`.
step_input (tf.Tensor): Single timestep observation input,
with shape :math:`(N, S^*)`.
step_hidden (tf.Tensor): Hidden state for step, with shape
:math:`(N, S^*)`.
step_cell (tf.Tensor): Cell state for step, with shape
:math:`(N, S^*)`.
name (str): Inner model name, also the variable scope of the
inner model, if exist. One example is
garage.tf.models.Sequential.
Returns:
tfp.distributions.OneHotCategorical: Policy distribution.
tf.Tensor: Step output, with shape :math:`(N, S^*)`
tf.Tensor: Step hidden state, with shape :math:`(N, S^*)`
tf.Tensor: Step cell state, with shape :math:`(N, S^*)`
tf.Tensor: Initial hidden state, used to reset the hidden state
when policy resets. Shape: :math:`(S^*)`
tf.Tensor: Initial cell state, used to reset the cell state
when policy resets. Shape: :math:`(S^*)`
|
_build
|
python
|
rlworkgroup/garage
|
src/garage/tf/models/categorical_lstm_model.py
|
https://github.com/rlworkgroup/garage/blob/master/src/garage/tf/models/categorical_lstm_model.py
|
MIT
|
def cnn(input_var,
input_dim,
filters,
strides,
name,
padding,
hidden_nonlinearity=tf.nn.relu,
hidden_w_init=tf.initializers.glorot_uniform(
seed=deterministic.get_tf_seed_stream()),
hidden_b_init=tf.zeros_initializer()):
"""Convolutional neural network (CNN).
Note:
Based on 'NHWC' data format: [batch, height, width, channel].
Args:
input_var (tf.Tensor): Input tf.Tensor to the CNN.
input_dim (Tuple[int, int, int]): Dimensions of unflattened input,
which means [in_height, in_width, in_channels]. If the last 3
dimensions of input_var is not this shape, it will be reshaped.
filters (Tuple[Tuple[int, Tuple[int, int]], ...]): Number and dimension
of filters. For example, ((3, (3, 5)), (32, (3, 3))) means there
are two convolutional layers. The filter for the first layer have 3
channels and its shape is (3 x 5), while the filter for the second
layer have 32 channels and its shape is (3 x 3).
strides (tuple[int]): The stride of the sliding window. For example,
(1, 2) means there are two convolutional layers. The stride of the
filter for first layer is 1 and that of the second layer is 2.
name (str): Network name, also the variable scope.
padding (str): The type of padding algorithm to use,
either 'SAME' or 'VALID'.
hidden_nonlinearity (callable): Activation function for intermediate
dense layer(s). It should return a tf.Tensor. Set it to
None to maintain a linear activation.
hidden_w_init (callable): Initializer function for the weight
of intermediate dense layer(s). The function should return a
tf.Tensor.
hidden_b_init (callable): Initializer function for the bias
of intermediate dense layer(s). The function should return a
tf.Tensor.
Return:
tf.Tensor: The output tf.Tensor of the CNN.
"""
with tf.compat.v1.variable_scope(name):
# unflatten
input_var = tf.reshape(input_var, [-1, *input_dim])
h = input_var
for index, (filter_iter, stride) in enumerate(zip(filters, strides)):
_stride = [1, stride, stride, 1]
h = _conv(h, 'h{}'.format(index), filter_iter[1], filter_iter[0],
_stride, hidden_w_init, hidden_b_init, padding)
if hidden_nonlinearity is not None:
h = hidden_nonlinearity(h)
# flatten
dim = tf.reduce_prod(h.get_shape()[1:].as_list())
return tf.reshape(h, [-1, dim])
|
Convolutional neural network (CNN).
Note:
Based on 'NHWC' data format: [batch, height, width, channel].
Args:
input_var (tf.Tensor): Input tf.Tensor to the CNN.
input_dim (Tuple[int, int, int]): Dimensions of unflattened input,
which means [in_height, in_width, in_channels]. If the last 3
dimensions of input_var is not this shape, it will be reshaped.
filters (Tuple[Tuple[int, Tuple[int, int]], ...]): Number and dimension
of filters. For example, ((3, (3, 5)), (32, (3, 3))) means there
are two convolutional layers. The filter for the first layer have 3
channels and its shape is (3 x 5), while the filter for the second
layer have 32 channels and its shape is (3 x 3).
strides (tuple[int]): The stride of the sliding window. For example,
(1, 2) means there are two convolutional layers. The stride of the
filter for first layer is 1 and that of the second layer is 2.
name (str): Network name, also the variable scope.
padding (str): The type of padding algorithm to use,
either 'SAME' or 'VALID'.
hidden_nonlinearity (callable): Activation function for intermediate
dense layer(s). It should return a tf.Tensor. Set it to
None to maintain a linear activation.
hidden_w_init (callable): Initializer function for the weight
of intermediate dense layer(s). The function should return a
tf.Tensor.
hidden_b_init (callable): Initializer function for the bias
of intermediate dense layer(s). The function should return a
tf.Tensor.
Return:
tf.Tensor: The output tf.Tensor of the CNN.
|
cnn
|
python
|
rlworkgroup/garage
|
src/garage/tf/models/cnn.py
|
https://github.com/rlworkgroup/garage/blob/master/src/garage/tf/models/cnn.py
|
MIT
|
def cnn_with_max_pooling(input_var,
input_dim,
filters,
strides,
name,
pool_shapes,
pool_strides,
padding,
hidden_nonlinearity=tf.nn.relu,
hidden_w_init=tf.initializers.glorot_uniform(
seed=deterministic.get_tf_seed_stream()),
hidden_b_init=tf.zeros_initializer()):
"""Convolutional neural network (CNN) with max-pooling.
Note:
Based on 'NHWC' data format: [batch, height, width, channel].
Args:
input_var (tf.Tensor): Input tf.Tensor to the CNN.
input_dim (Tuple[int, int, int]): Dimensions of unflattened input,
which means [in_height, in_width, in_channels]. If the last 3
dimensions of input_var is not this shape, it will be reshaped.
filters (Tuple[Tuple[int, Tuple[int, int]], ...]): Number and dimension
of filters. For example, ((3, (3, 5)), (32, (3, 3))) means there
are two convolutional layers. The filter for the first layer have 3
channels and its shape is (3 x 5), while the filter for the second
layer have 32 channels and its shape is (3 x 3).
strides (tuple[int]): The stride of the sliding window. For example,
(1, 2) means there are two convolutional layers. The stride of the
filter for first layer is 1 and that of the second layer is 2.
name (str): Model name, also the variable scope of the cnn.
pool_shapes (tuple[int]): Dimension of the pooling layer(s). For
example, (2, 2) means that all the pooling layers have
shape (2, 2).
pool_strides (tuple[int]): The strides of the pooling layer(s). For
example, (2, 2) means that all the pooling layers have
strides (2, 2).
padding (str): The type of padding algorithm to use,
either 'SAME' or 'VALID'.
hidden_nonlinearity (callable): Activation function for intermediate
dense layer(s). It should return a tf.Tensor. Set it to
None to maintain a linear activation.
hidden_w_init (callable): Initializer function for the weight
of intermediate dense layer(s). The function should return a
tf.Tensor.
hidden_b_init (callable): Initializer function for the bias
of intermediate dense layer(s). The function should return a
tf.Tensor.
Return:
tf.Tensor: The output tf.Tensor of the CNN.
"""
pool_strides = [1, pool_strides[0], pool_strides[1], 1]
pool_shapes = [1, pool_shapes[0], pool_shapes[1], 1]
with tf.compat.v1.variable_scope(name):
# unflatten
input_var = tf.reshape(input_var, [-1, *input_dim])
h = input_var
for index, (filter_iter, stride) in enumerate(zip(filters, strides)):
_stride = [1, stride, stride, 1]
h = _conv(h, 'h{}'.format(index), filter_iter[1], filter_iter[0],
_stride, hidden_w_init, hidden_b_init, padding)
if hidden_nonlinearity is not None:
h = hidden_nonlinearity(h)
h = tf.nn.max_pool2d(h,
ksize=pool_shapes,
strides=pool_strides,
padding=padding)
# flatten
dim = tf.reduce_prod(h.get_shape()[1:].as_list())
return tf.reshape(h, [-1, dim])
|
Convolutional neural network (CNN) with max-pooling.
Note:
Based on 'NHWC' data format: [batch, height, width, channel].
Args:
input_var (tf.Tensor): Input tf.Tensor to the CNN.
input_dim (Tuple[int, int, int]): Dimensions of unflattened input,
which means [in_height, in_width, in_channels]. If the last 3
dimensions of input_var is not this shape, it will be reshaped.
filters (Tuple[Tuple[int, Tuple[int, int]], ...]): Number and dimension
of filters. For example, ((3, (3, 5)), (32, (3, 3))) means there
are two convolutional layers. The filter for the first layer have 3
channels and its shape is (3 x 5), while the filter for the second
layer have 32 channels and its shape is (3 x 3).
strides (tuple[int]): The stride of the sliding window. For example,
(1, 2) means there are two convolutional layers. The stride of the
filter for first layer is 1 and that of the second layer is 2.
name (str): Model name, also the variable scope of the cnn.
pool_shapes (tuple[int]): Dimension of the pooling layer(s). For
example, (2, 2) means that all the pooling layers have
shape (2, 2).
pool_strides (tuple[int]): The strides of the pooling layer(s). For
example, (2, 2) means that all the pooling layers have
strides (2, 2).
padding (str): The type of padding algorithm to use,
either 'SAME' or 'VALID'.
hidden_nonlinearity (callable): Activation function for intermediate
dense layer(s). It should return a tf.Tensor. Set it to
None to maintain a linear activation.
hidden_w_init (callable): Initializer function for the weight
of intermediate dense layer(s). The function should return a
tf.Tensor.
hidden_b_init (callable): Initializer function for the bias
of intermediate dense layer(s). The function should return a
tf.Tensor.
Return:
tf.Tensor: The output tf.Tensor of the CNN.
|
cnn_with_max_pooling
|
python
|
rlworkgroup/garage
|
src/garage/tf/models/cnn.py
|
https://github.com/rlworkgroup/garage/blob/master/src/garage/tf/models/cnn.py
|
MIT
|
def _conv(input_var, name, filter_size, num_filter, strides, hidden_w_init,
hidden_b_init, padding):
"""Helper function for performing convolution.
Args:
input_var (tf.Tensor): Input tf.Tensor to the CNN.
name (str): Variable scope of the convolution Op.
filter_size (tuple[int]): Dimension of the filter. For example,
(3, 5) means the dimension of the filter is (3 x 5).
num_filter (int): Number of filters. For example, (3, 32) means
there are two convolutional layers. The filter for the first layer
has 3 channels and the second one with 32 channels.
strides (tuple[int]): The stride of the sliding window. For example,
(1, 2) means there are two convolutional layers. The stride of the
filter for first layer is 1 and that of the second layer is 2.
hidden_w_init (callable): Initializer function for the weight
of intermediate dense layer(s). The function should return a
tf.Tensor.
hidden_b_init (callable): Initializer function for the bias
of intermediate dense layer(s). The function should return a
tf.Tensor.
padding (str): The type of padding algorithm to use,
either 'SAME' or 'VALID'.
Return:
tf.Tensor: The output of the convolution.
"""
# channel from input
input_shape = input_var.get_shape()[-1]
# [filter_height, filter_width, in_channels, out_channels]
w_shape = [filter_size[0], filter_size[1], input_shape, num_filter]
b_shape = [1, 1, 1, num_filter]
with tf.compat.v1.variable_scope(name):
weight = tf.compat.v1.get_variable('weight',
w_shape,
initializer=hidden_w_init)
bias = tf.compat.v1.get_variable('bias',
b_shape,
initializer=hidden_b_init)
return tf.nn.conv2d(
input_var, weight, strides=strides, padding=padding) + bias
|
Helper function for performing convolution.
Args:
input_var (tf.Tensor): Input tf.Tensor to the CNN.
name (str): Variable scope of the convolution Op.
filter_size (tuple[int]): Dimension of the filter. For example,
(3, 5) means the dimension of the filter is (3 x 5).
num_filter (int): Number of filters. For example, (3, 32) means
there are two convolutional layers. The filter for the first layer
has 3 channels and the second one with 32 channels.
strides (tuple[int]): The stride of the sliding window. For example,
(1, 2) means there are two convolutional layers. The stride of the
filter for first layer is 1 and that of the second layer is 2.
hidden_w_init (callable): Initializer function for the weight
of intermediate dense layer(s). The function should return a
tf.Tensor.
hidden_b_init (callable): Initializer function for the bias
of intermediate dense layer(s). The function should return a
tf.Tensor.
padding (str): The type of padding algorithm to use,
either 'SAME' or 'VALID'.
Return:
tf.Tensor: The output of the convolution.
|
_conv
|
python
|
rlworkgroup/garage
|
src/garage/tf/models/cnn.py
|
https://github.com/rlworkgroup/garage/blob/master/src/garage/tf/models/cnn.py
|
MIT
|
def _build(self, state, action, name=None):
"""Build the model and return the outputs.
This builds the model such that the output of the CNN is fed
to the MLP. The CNN receives the state as the input. The MLP
receives two inputs, the output of the CNN and the action
tensor.
Args:
state (tf.Tensor): State placeholder tensor of shape
:math:`(N, O*)`.
action (tf.Tensor): Action placeholder tensor of shape
:math:`(N, A*)`.
name (str): Name of the model.
Returns:
tf.Tensor: Output of the model of shape (N, output_dim).
"""
cnn_out = self.cnn_model.build(state, name=name).outputs
mlp_out = self.mlp_merge_model.build(cnn_out, action,
name=name).outputs
return mlp_out
|
Build the model and return the outputs.
This builds the model such that the output of the CNN is fed
to the MLP. The CNN receives the state as the input. The MLP
receives two inputs, the output of the CNN and the action
tensor.
Args:
state (tf.Tensor): State placeholder tensor of shape
:math:`(N, O*)`.
action (tf.Tensor): Action placeholder tensor of shape
:math:`(N, A*)`.
name (str): Name of the model.
Returns:
tf.Tensor: Output of the model of shape (N, output_dim).
|
_build
|
python
|
rlworkgroup/garage
|
src/garage/tf/models/cnn_mlp_merge_model.py
|
https://github.com/rlworkgroup/garage/blob/master/src/garage/tf/models/cnn_mlp_merge_model.py
|
MIT
|
def _build(self, state_input, name=None):
"""Build model given input placeholder(s).
Args:
state_input (tf.Tensor): Tensor input for state.
name (str): Inner model name, also the variable scope of the
inner model, if exist. One example is
garage.tf.models.Sequential.
Return:
tf.Tensor: Tensor output of the model.
"""
del name
return cnn(input_var=state_input,
input_dim=self._input_dim,
filters=self._filters,
hidden_nonlinearity=self._hidden_nonlinearity,
hidden_w_init=self._hidden_w_init,
hidden_b_init=self._hidden_b_init,
strides=self._strides,
padding=self._padding,
name='cnn')
|
Build model given input placeholder(s).
Args:
state_input (tf.Tensor): Tensor input for state.
name (str): Inner model name, also the variable scope of the
inner model, if exist. One example is
garage.tf.models.Sequential.
Return:
tf.Tensor: Tensor output of the model.
|
_build
|
python
|
rlworkgroup/garage
|
src/garage/tf/models/cnn_model.py
|
https://github.com/rlworkgroup/garage/blob/master/src/garage/tf/models/cnn_model.py
|
MIT
|
def _build(self, state_input, name=None):
"""Build model given input placeholder(s).
Args:
state_input (tf.Tensor): Tensor input for state.
name (str): Inner model name, also the variable scope of the
inner model, if exist. One example is
garage.tf.models.Sequential.
Return:
tf.Tensor: Tensor output of the model.
"""
del name
return cnn_with_max_pooling(
input_var=state_input,
input_dim=self._input_dim,
filters=self._filters,
hidden_nonlinearity=self._hidden_nonlinearity,
hidden_w_init=self._hidden_w_init,
hidden_b_init=self._hidden_b_init,
strides=self._strides,
padding=self._padding,
pool_shapes=self._pool_shapes,
pool_strides=self._pool_strides,
name='cnn')
|
Build model given input placeholder(s).
Args:
state_input (tf.Tensor): Tensor input for state.
name (str): Inner model name, also the variable scope of the
inner model, if exist. One example is
garage.tf.models.Sequential.
Return:
tf.Tensor: Tensor output of the model.
|
_build
|
python
|
rlworkgroup/garage
|
src/garage/tf/models/cnn_model_max_pooling.py
|
https://github.com/rlworkgroup/garage/blob/master/src/garage/tf/models/cnn_model_max_pooling.py
|
MIT
|
def _build(self, state_input, name=None):
"""Build model given input placeholder(s).
Args:
state_input (tf.Tensor): Place holder for state input.
name (str): Inner model name, also the variable scope of the
inner model, if exist. One example is
garage.tf.models.Sequential.
Return:
tf.Tensor: Sampled action.
tf.Tensor: Mean.
tf.Tensor: Parameterized log_std.
tf.Tensor: log_std.
tfp.distributions.MultivariateNormalDiag: Distribution.
"""
del name
action_dim = self._output_dim
with tf.compat.v1.variable_scope('dist_params'):
if self._std_share_network:
# mean and std networks share an CNN
b = np.concatenate([
np.zeros(action_dim),
np.full(action_dim, self._init_std_param)
], axis=0) # yapf: disable
mean_std_conv = cnn(
input_var=state_input,
input_dim=self._input_dim,
filters=self._filters,
hidden_nonlinearity=self._hidden_nonlinearity,
hidden_w_init=self._hidden_w_init,
hidden_b_init=self._hidden_b_init,
strides=self._strides,
padding=self._padding,
name='mean_std_cnn')
mean_std_network = mlp(
mean_std_conv,
output_dim=action_dim * 2,
hidden_sizes=self._hidden_sizes,
hidden_nonlinearity=self._hidden_nonlinearity,
hidden_w_init=self._hidden_w_init,
hidden_b_init=self._hidden_b_init,
output_nonlinearity=self._output_nonlinearity,
output_w_init=self._output_w_init,
output_b_init=tf.constant_initializer(b),
name='mean_std_network',
layer_normalization=self._layer_normalization)
with tf.compat.v1.variable_scope('mean_network'):
mean_network = mean_std_network[..., :action_dim]
with tf.compat.v1.variable_scope('log_std_network'):
log_std_network = mean_std_network[..., action_dim:]
else:
# separate MLPs for mean and std networks
# mean network
mean_conv = cnn(input_var=state_input,
input_dim=self._input_dim,
filters=self._filters,
hidden_nonlinearity=self._hidden_nonlinearity,
hidden_w_init=self._hidden_w_init,
hidden_b_init=self._hidden_b_init,
strides=self._strides,
padding=self._padding,
name='mean_cnn')
mean_network = mlp(
mean_conv,
output_dim=action_dim,
hidden_sizes=self._hidden_sizes,
hidden_nonlinearity=self._hidden_nonlinearity,
hidden_w_init=self._hidden_w_init,
hidden_b_init=self._hidden_b_init,
output_nonlinearity=self._output_nonlinearity,
output_w_init=self._output_w_init,
output_b_init=self._output_b_init,
name='mean_network',
layer_normalization=self._layer_normalization)
# std network
if self._adaptive_std:
log_std_conv = cnn(
input_var=state_input,
input_dim=self._input_dim,
filters=self._std_filters,
hidden_nonlinearity=self._std_hidden_nonlinearity,
hidden_w_init=self._std_hidden_w_init,
hidden_b_init=self._std_hidden_b_init,
strides=self._std_strides,
padding=self._std_padding,
name='log_std_cnn')
log_std_network = mlp(
log_std_conv,
output_dim=action_dim,
hidden_sizes=self._std_hidden_sizes,
hidden_nonlinearity=self._std_hidden_nonlinearity,
hidden_w_init=self._std_hidden_w_init,
hidden_b_init=self._std_hidden_b_init,
output_nonlinearity=self._std_output_nonlinearity,
output_w_init=self._std_output_w_init,
output_b_init=tf.constant_initializer(
self._init_std_param),
name='log_std_network',
layer_normalization=self._layer_normalization)
else:
log_std_network = parameter(
input_var=state_input,
length=action_dim,
initializer=tf.constant_initializer(
self._init_std_param),
trainable=self._learn_std,
name='log_std_network')
mean_var = mean_network
std_param = log_std_network
with tf.compat.v1.variable_scope('std_limits'):
if self._min_std_param is not None:
std_param = tf.maximum(std_param, self._min_std_param)
if self._max_std_param is not None:
std_param = tf.minimum(std_param, self._max_std_param)
with tf.compat.v1.variable_scope('std_parameterization'):
# build std_var with std parameterization
if self._std_parameterization == 'exp':
log_std_var = std_param
else: # we know it must be softplus here
log_std_var = tf.math.log(tf.math.log(1. + tf.exp(std_param)))
dist = tfp.distributions.MultivariateNormalDiag(
loc=mean_var, scale_diag=tf.exp(log_std_var))
rnd = tf.random.normal(shape=mean_var.get_shape().as_list()[1:],
seed=deterministic.get_tf_seed_stream())
action_var = rnd * tf.exp(log_std_var) + mean_var
return action_var, mean_var, log_std_var, std_param, dist
|
Build model given input placeholder(s).
Args:
state_input (tf.Tensor): Place holder for state input.
name (str): Inner model name, also the variable scope of the
inner model, if exist. One example is
garage.tf.models.Sequential.
Return:
tf.Tensor: Sampled action.
tf.Tensor: Mean.
tf.Tensor: Parameterized log_std.
tf.Tensor: log_std.
tfp.distributions.MultivariateNormalDiag: Distribution.
|
_build
|
python
|
rlworkgroup/garage
|
src/garage/tf/models/gaussian_cnn_model.py
|
https://github.com/rlworkgroup/garage/blob/master/src/garage/tf/models/gaussian_cnn_model.py
|
MIT
|
def network_output_spec(self):
"""Network output spec.
Returns:
list[str]: Name of the model outputs, in order.
"""
return [
'dist', 'step_mean', 'step_log_std', 'step_hidden', 'init_hidden'
]
|
Network output spec.
Returns:
list[str]: Name of the model outputs, in order.
|
network_output_spec
|
python
|
rlworkgroup/garage
|
src/garage/tf/models/gaussian_gru_model.py
|
https://github.com/rlworkgroup/garage/blob/master/src/garage/tf/models/gaussian_gru_model.py
|
MIT
|
def _build(self, state_input, step_input, step_hidden, name=None):
"""Build model.
Args:
state_input (tf.Tensor): Entire time-series observation input,
with shape :math:`(N, T, S^*)`.
step_input (tf.Tensor): Single timestep observation input,
with shape :math:`(N, S^*)`.
step_hidden (tf.Tensor): Hidden state for step, with shape
:math:`(N, S^*)`.
name (str): Inner model name, also the variable scope of the
inner model, if exist. One example is
garage.tf.models.Sequential.
Returns:
tfp.distributions.MultivariateNormalDiag: Policy distribution.
tf.Tensor: Step means, with shape :math:`(N, S^*)`.
tf.Tensor: Step log std, with shape :math:`(N, S^*)`.
tf.Tensor: Step hidden state, with shape :math:`(N, S^*)`.
tf.Tensor: Initial hidden state, with shape :math:`(S^*)`.
"""
del name
action_dim = self._output_dim
with tf.compat.v1.variable_scope('dist_params'):
if self._std_share_network:
# mean and std networks share an MLP
(outputs, step_outputs, step_hidden, hidden_init_var) = gru(
name='mean_std_network',
gru_cell=self._mean_std_gru_cell,
all_input_var=state_input,
step_input_var=step_input,
step_hidden_var=step_hidden,
hidden_state_init=self._hidden_state_init,
hidden_state_init_trainable=self.
_hidden_state_init_trainable,
output_nonlinearity_layer=self.
_mean_std_output_nonlinearity_layer)
with tf.compat.v1.variable_scope('mean_network'):
mean_var = outputs[..., :action_dim]
step_mean_var = step_outputs[..., :action_dim]
with tf.compat.v1.variable_scope('log_std_network'):
log_std_var = outputs[..., action_dim:]
step_log_std_var = step_outputs[..., action_dim:]
else:
# separate MLPs for mean and std networks
# mean network
(mean_var, step_mean_var, step_hidden, hidden_init_var) = gru(
name='mean_network',
gru_cell=self._mean_gru_cell,
all_input_var=state_input,
step_input_var=step_input,
step_hidden_var=step_hidden,
hidden_state_init=self._hidden_state_init,
hidden_state_init_trainable=self.
_hidden_state_init_trainable,
output_nonlinearity_layer=self.
_mean_output_nonlinearity_layer)
log_std_var, step_log_std_var = recurrent_parameter(
input_var=state_input,
step_input_var=step_input,
length=action_dim,
initializer=tf.constant_initializer(self._init_std_param),
trainable=self._learn_std,
name='log_std_param')
dist = tfp.distributions.MultivariateNormalDiag(
loc=mean_var, scale_diag=tf.exp(log_std_var))
return (dist, step_mean_var, step_log_std_var, step_hidden,
hidden_init_var)
|
Build model.
Args:
state_input (tf.Tensor): Entire time-series observation input,
with shape :math:`(N, T, S^*)`.
step_input (tf.Tensor): Single timestep observation input,
with shape :math:`(N, S^*)`.
step_hidden (tf.Tensor): Hidden state for step, with shape
:math:`(N, S^*)`.
name (str): Inner model name, also the variable scope of the
inner model, if exist. One example is
garage.tf.models.Sequential.
Returns:
tfp.distributions.MultivariateNormalDiag: Policy distribution.
tf.Tensor: Step means, with shape :math:`(N, S^*)`.
tf.Tensor: Step log std, with shape :math:`(N, S^*)`.
tf.Tensor: Step hidden state, with shape :math:`(N, S^*)`.
tf.Tensor: Initial hidden state, with shape :math:`(S^*)`.
|
_build
|
python
|
rlworkgroup/garage
|
src/garage/tf/models/gaussian_gru_model.py
|
https://github.com/rlworkgroup/garage/blob/master/src/garage/tf/models/gaussian_gru_model.py
|
MIT
|
def __getstate__(self):
"""Object.__getstate__.
Returns:
dict: the state to be pickled for the instance.
"""
new_dict = super().__getstate__()
del new_dict['_mean_std_gru_cell']
del new_dict['_mean_gru_cell']
del new_dict['_mean_std_output_nonlinearity_layer']
del new_dict['_mean_output_nonlinearity_layer']
return new_dict
|
Object.__getstate__.
Returns:
dict: the state to be pickled for the instance.
|
__getstate__
|
python
|
rlworkgroup/garage
|
src/garage/tf/models/gaussian_gru_model.py
|
https://github.com/rlworkgroup/garage/blob/master/src/garage/tf/models/gaussian_gru_model.py
|
MIT
|
def network_input_spec(self):
"""Network input spec.
Returns:
list[str]: Name of the model inputs, in order.
"""
return [
'full_input', 'step_input', 'step_hidden_input', 'step_cell_input'
]
|
Network input spec.
Returns:
list[str]: Name of the model inputs, in order.
|
network_input_spec
|
python
|
rlworkgroup/garage
|
src/garage/tf/models/gaussian_lstm_model.py
|
https://github.com/rlworkgroup/garage/blob/master/src/garage/tf/models/gaussian_lstm_model.py
|
MIT
|
def network_output_spec(self):
"""Network output spec.
Returns:
list[str]: Name of the model outputs, in order.
"""
return [
'dist', 'step_mean', 'step_log_std', 'step_hidden', 'step_cell',
'init_hidden', 'init_cell'
]
|
Network output spec.
Returns:
list[str]: Name of the model outputs, in order.
|
network_output_spec
|
python
|
rlworkgroup/garage
|
src/garage/tf/models/gaussian_lstm_model.py
|
https://github.com/rlworkgroup/garage/blob/master/src/garage/tf/models/gaussian_lstm_model.py
|
MIT
|
def _build(self,
state_input,
step_input,
step_hidden,
step_cell,
name=None):
"""Build model.
Args:
state_input (tf.Tensor): Entire time-series observation input,
with shape :math:`(N, T, S^*)`.
step_input (tf.Tensor): Single timestep observation input,
with shape :math:`(N, S^*)`.
step_hidden (tf.Tensor): Hidden state for step, with shape
:math:`(N, S^*)`.
step_cell (tf.Tensor): Cell state for step, with shape
:math:`(N, S^*)`.
name (str): Inner model name, also the variable scope of the
inner model, if exist. One example is
garage.tf.models.Sequential.
Returns:
tfp.distributions.MultivariateNormalDiag: Policy distribution.
tf.Tensor: Step means, with shape :math:`(N, S^*)`.
tf.Tensor: Step log std, with shape :math:`(N, S^*)`.
tf.Tensor: Step hidden state, with shape :math:`(N, S^*)`.
tf.Tensor: Step cell state, with shape :math:`(N, S^*)`.
tf.Tensor: Initial hidden state, with shape :math:`(S^*)`.
tf.Tensor: Initial cell state, with shape :math:`(S^*)`
"""
del name
action_dim = self._output_dim
with tf.compat.v1.variable_scope('dist_params'):
if self._std_share_network:
# mean and std networks share an MLP
(outputs, step_outputs, step_hidden, step_cell,
hidden_init_var, cell_init_var) = lstm(
name='mean_std_network',
lstm_cell=self._mean_std_lstm_cell,
all_input_var=state_input,
step_input_var=step_input,
step_hidden_var=step_hidden,
step_cell_var=step_cell,
hidden_state_init=self._hidden_state_init,
hidden_state_init_trainable=self.
_hidden_state_init_trainable,
cell_state_init=self._cell_state_init,
cell_state_init_trainable=self._cell_state_init_trainable,
output_nonlinearity_layer=self.
_mean_std_output_nonlinearity_layer)
with tf.compat.v1.variable_scope('mean_network'):
mean_var = outputs[..., :action_dim]
step_mean_var = step_outputs[..., :action_dim]
with tf.compat.v1.variable_scope('log_std_network'):
log_std_var = outputs[..., action_dim:]
step_log_std_var = step_outputs[..., action_dim:]
else:
# separate MLPs for mean and std networks
# mean network
(mean_var, step_mean_var, step_hidden, step_cell,
hidden_init_var, cell_init_var) = lstm(
name='mean_network',
lstm_cell=self._mean_lstm_cell,
all_input_var=state_input,
step_input_var=step_input,
step_hidden_var=step_hidden,
step_cell_var=step_cell,
hidden_state_init=self._hidden_state_init,
hidden_state_init_trainable=self.
_hidden_state_init_trainable,
cell_state_init=self._cell_state_init,
cell_state_init_trainable=self._cell_state_init_trainable,
output_nonlinearity_layer=self.
_mean_output_nonlinearity_layer)
log_std_var, step_log_std_var = recurrent_parameter(
input_var=state_input,
step_input_var=step_input,
length=action_dim,
initializer=tf.constant_initializer(self._init_std_param),
trainable=self._learn_std,
name='log_std_param')
dist = tfp.distributions.MultivariateNormalDiag(
loc=mean_var, scale_diag=tf.exp(log_std_var))
return (dist, step_mean_var, step_log_std_var, step_hidden, step_cell,
hidden_init_var, cell_init_var)
|
Build model.
Args:
state_input (tf.Tensor): Entire time-series observation input,
with shape :math:`(N, T, S^*)`.
step_input (tf.Tensor): Single timestep observation input,
with shape :math:`(N, S^*)`.
step_hidden (tf.Tensor): Hidden state for step, with shape
:math:`(N, S^*)`.
step_cell (tf.Tensor): Cell state for step, with shape
:math:`(N, S^*)`.
name (str): Inner model name, also the variable scope of the
inner model, if exist. One example is
garage.tf.models.Sequential.
Returns:
tfp.distributions.MultivariateNormalDiag: Policy distribution.
tf.Tensor: Step means, with shape :math:`(N, S^*)`.
tf.Tensor: Step log std, with shape :math:`(N, S^*)`.
tf.Tensor: Step hidden state, with shape :math:`(N, S^*)`.
tf.Tensor: Step cell state, with shape :math:`(N, S^*)`.
tf.Tensor: Initial hidden state, with shape :math:`(S^*)`.
tf.Tensor: Initial cell state, with shape :math:`(S^*)`
|
_build
|
python
|
rlworkgroup/garage
|
src/garage/tf/models/gaussian_lstm_model.py
|
https://github.com/rlworkgroup/garage/blob/master/src/garage/tf/models/gaussian_lstm_model.py
|
MIT
|
def __getstate__(self):
"""Object.__getstate__.
Returns:
dict: the state to be pickled for the instance.
"""
new_dict = super().__getstate__()
del new_dict['_mean_std_lstm_cell']
del new_dict['_mean_lstm_cell']
del new_dict['_mean_std_output_nonlinearity_layer']
del new_dict['_mean_output_nonlinearity_layer']
return new_dict
|
Object.__getstate__.
Returns:
dict: the state to be pickled for the instance.
|
__getstate__
|
python
|
rlworkgroup/garage
|
src/garage/tf/models/gaussian_lstm_model.py
|
https://github.com/rlworkgroup/garage/blob/master/src/garage/tf/models/gaussian_lstm_model.py
|
MIT
|
def _build(self, state_input, name=None):
"""Build model.
Args:
state_input (tf.Tensor): Entire time-series observation input.
name (str): Inner model name, also the variable scope of the
inner model, if exist. One example is
garage.tf.models.Sequential.
Returns:
tfp.distributions.MultivariateNormalDiag: Distribution.
tf.tensor: Mean.
tf.Tensor: Log of standard deviation.
"""
del name
action_dim = self._output_dim
with tf.compat.v1.variable_scope('dist_params'):
if self._std_share_network:
# mean and std networks share an MLP
b = np.concatenate([
np.zeros(action_dim),
np.full(action_dim, self._init_std_param)
], axis=0) # yapf: disable
mean_std_network = mlp(
state_input,
output_dim=action_dim * 2,
hidden_sizes=self._hidden_sizes,
hidden_nonlinearity=self._hidden_nonlinearity,
hidden_w_init=self._hidden_w_init,
hidden_b_init=self._hidden_b_init,
output_nonlinearity=self._output_nonlinearity,
output_w_init=self._output_w_init,
output_b_init=tf.constant_initializer(b),
name='mean_std_network',
layer_normalization=self._layer_normalization)
with tf.compat.v1.variable_scope('mean_network'):
mean_network = mean_std_network[..., :action_dim]
with tf.compat.v1.variable_scope('log_std_network'):
log_std_network = mean_std_network[..., action_dim:]
else:
# separate MLPs for mean and std networks
# mean network
mean_network = mlp(
state_input,
output_dim=action_dim,
hidden_sizes=self._hidden_sizes,
hidden_nonlinearity=self._hidden_nonlinearity,
hidden_w_init=self._hidden_w_init,
hidden_b_init=self._hidden_b_init,
output_nonlinearity=self._output_nonlinearity,
output_w_init=self._output_w_init,
output_b_init=self._output_b_init,
name='mean_network',
layer_normalization=self._layer_normalization)
# std network
if self._adaptive_std:
log_std_network = mlp(
state_input,
output_dim=action_dim,
hidden_sizes=self._std_hidden_sizes,
hidden_nonlinearity=self._std_hidden_nonlinearity,
hidden_w_init=self._std_hidden_w_init,
hidden_b_init=self._std_hidden_b_init,
output_nonlinearity=self._std_output_nonlinearity,
output_w_init=self._std_output_w_init,
output_b_init=tf.constant_initializer(
self._init_std_param),
name='log_std_network',
layer_normalization=self._layer_normalization)
else:
log_std_network = parameter(
input_var=state_input,
length=action_dim,
initializer=tf.constant_initializer(
self._init_std_param),
trainable=self._learn_std,
name='log_std_network')
log_std_network = tf.expand_dims(log_std_network, 1)
mean_var = mean_network
std_param = log_std_network
with tf.compat.v1.variable_scope('std_limits'):
if self._min_std_param is not None:
std_param = tf.maximum(std_param, self._min_std_param)
if self._max_std_param is not None:
std_param = tf.minimum(std_param, self._max_std_param)
with tf.compat.v1.variable_scope('std_parameterization'):
# build std_var with std parameterization
if self._std_parameterization == 'exp':
log_std_var = std_param
else: # we know it must be softplus here
log_std_var = tf.math.log(tf.math.log(1. + tf.exp(std_param)))
return tfp.distributions.MultivariateNormalDiag(
loc=mean_var,
scale_diag=tf.exp(log_std_var)), mean_var, log_std_var
|
Build model.
Args:
state_input (tf.Tensor): Entire time-series observation input.
name (str): Inner model name, also the variable scope of the
inner model, if exist. One example is
garage.tf.models.Sequential.
Returns:
tfp.distributions.MultivariateNormalDiag: Distribution.
tf.tensor: Mean.
tf.Tensor: Log of standard deviation.
|
_build
|
python
|
rlworkgroup/garage
|
src/garage/tf/models/gaussian_mlp_model.py
|
https://github.com/rlworkgroup/garage/blob/master/src/garage/tf/models/gaussian_mlp_model.py
|
MIT
|
def gru(name,
gru_cell,
all_input_var,
step_input_var,
step_hidden_var,
output_nonlinearity_layer,
hidden_state_init=tf.zeros_initializer(),
hidden_state_init_trainable=False):
r"""Gated Recurrent Unit (GRU).
Args:
name (str): Name of the variable scope.
gru_cell (tf.keras.layers.Layer): GRU cell used to generate
outputs.
all_input_var (tf.Tensor): Place holder for entire time-series inputs,
with shape :math:`(N, T, S^*)`.
step_input_var (tf.Tensor): Place holder for step inputs, with shape
:math:`(N, S^*)`.
step_hidden_var (tf.Tensor): Place holder for step hidden state, with
shape :math:`(N, H)`.
output_nonlinearity_layer (callable): Activation function for output
dense layer. It should return a tf.Tensor. Set it to None to
maintain a linear activation.
hidden_state_init (callable): Initializer function for the
initial hidden state. The functino should return a tf.Tensor.
hidden_state_init_trainable (bool): Bool for whether the initial
hidden state is trainable.
Return:
tf.Tensor: Entire time-series outputs, with shape :math:`(N, T, S^*)`.
tf.Tensor: Step output, with shape :math:`(N, S^*)`.
tf.Tensor: Step hidden state, with shape :math:`(N, H)`
tf.Tensor: Initial hidden state, with shape :math:`(H, )`
"""
with tf.compat.v1.variable_scope(name):
hidden_dim = gru_cell.units
output, [hidden] = gru_cell(step_input_var, states=[step_hidden_var])
output = output_nonlinearity_layer(output)
hidden_init_var = tf.compat.v1.get_variable(
name='initial_hidden',
shape=(hidden_dim, ),
initializer=hidden_state_init,
trainable=hidden_state_init_trainable,
dtype=tf.float32)
hidden_init_var_b = tf.broadcast_to(
hidden_init_var, shape=[tf.shape(all_input_var)[0], hidden_dim])
rnn = tf.keras.layers.RNN(gru_cell, return_sequences=True)
hs = rnn(all_input_var, initial_state=hidden_init_var_b)
outputs = output_nonlinearity_layer(hs)
return outputs, output, hidden, hidden_init_var
|
Gated Recurrent Unit (GRU).
Args:
name (str): Name of the variable scope.
gru_cell (tf.keras.layers.Layer): GRU cell used to generate
outputs.
all_input_var (tf.Tensor): Place holder for entire time-series inputs,
with shape :math:`(N, T, S^*)`.
step_input_var (tf.Tensor): Place holder for step inputs, with shape
:math:`(N, S^*)`.
step_hidden_var (tf.Tensor): Place holder for step hidden state, with
shape :math:`(N, H)`.
output_nonlinearity_layer (callable): Activation function for output
dense layer. It should return a tf.Tensor. Set it to None to
maintain a linear activation.
hidden_state_init (callable): Initializer function for the
initial hidden state. The functino should return a tf.Tensor.
hidden_state_init_trainable (bool): Bool for whether the initial
hidden state is trainable.
Return:
tf.Tensor: Entire time-series outputs, with shape :math:`(N, T, S^*)`.
tf.Tensor: Step output, with shape :math:`(N, S^*)`.
tf.Tensor: Step hidden state, with shape :math:`(N, H)`
tf.Tensor: Initial hidden state, with shape :math:`(H, )`
|
gru
|
python
|
rlworkgroup/garage
|
src/garage/tf/models/gru.py
|
https://github.com/rlworkgroup/garage/blob/master/src/garage/tf/models/gru.py
|
MIT
|
def _build(self,
all_input_var,
step_input_var,
step_hidden_var,
name=None):
"""Build model given input placeholder(s).
Args:
all_input_var (tf.Tensor): Place holder for entire time-series
inputs.
step_input_var (tf.Tensor): Place holder for step inputs.
step_hidden_var (tf.Tensor): Place holder for step hidden state.
name (str): Inner model name, also the variable scope of the
inner model, if exist. One example is
garage.tf.models.Sequential.
Return:
tf.Tensor: Entire time-series outputs.
tf.Tensor: Step output.
tf.Tensor: Step hidden state.
tf.Tensor: Initial hidden state.
"""
del name
return gru(
name='gru',
gru_cell=self._gru_cell,
all_input_var=all_input_var,
step_input_var=step_input_var,
step_hidden_var=step_hidden_var,
hidden_state_init=self._hidden_state_init,
hidden_state_init_trainable=self._hidden_state_init_trainable,
output_nonlinearity_layer=self._output_nonlinearity_layer)
|
Build model given input placeholder(s).
Args:
all_input_var (tf.Tensor): Place holder for entire time-series
inputs.
step_input_var (tf.Tensor): Place holder for step inputs.
step_hidden_var (tf.Tensor): Place holder for step hidden state.
name (str): Inner model name, also the variable scope of the
inner model, if exist. One example is
garage.tf.models.Sequential.
Return:
tf.Tensor: Entire time-series outputs.
tf.Tensor: Step output.
tf.Tensor: Step hidden state.
tf.Tensor: Initial hidden state.
|
_build
|
python
|
rlworkgroup/garage
|
src/garage/tf/models/gru_model.py
|
https://github.com/rlworkgroup/garage/blob/master/src/garage/tf/models/gru_model.py
|
MIT
|
def __getstate__(self):
"""Object.__getstate__.
Returns:
dict: The state to be pickled for the instance.
"""
new_dict = super().__getstate__()
del new_dict['_gru_cell']
del new_dict['_output_nonlinearity_layer']
return new_dict
|
Object.__getstate__.
Returns:
dict: The state to be pickled for the instance.
|
__getstate__
|
python
|
rlworkgroup/garage
|
src/garage/tf/models/gru_model.py
|
https://github.com/rlworkgroup/garage/blob/master/src/garage/tf/models/gru_model.py
|
MIT
|
def lstm(name,
lstm_cell,
all_input_var,
step_input_var,
step_hidden_var,
step_cell_var,
output_nonlinearity_layer,
hidden_state_init=tf.zeros_initializer(),
hidden_state_init_trainable=False,
cell_state_init=tf.zeros_initializer(),
cell_state_init_trainable=False):
r"""Long Short-Term Memory (LSTM).
Args:
name (str): Name of the variable scope.
lstm_cell (tf.keras.layers.Layer): LSTM cell used to generate
outputs.
all_input_var (tf.Tensor): Place holder for entire time-seried inputs,
with shape :math:`(N, T, S^*)`.
step_input_var (tf.Tensor): Place holder for step inputs, with shape
:math:`(N, S^*)`.
step_hidden_var (tf.Tensor): Place holder for step hidden state, with
shape :math:`(N, H)`.
step_cell_var (tf.Tensor): Place holder for cell state, with shape
:math:`(N, H)`.
output_nonlinearity_layer (callable): Activation function for output
dense layer. It should return a tf.Tensor. Set it to None to
maintain a linear activation.
hidden_state_init (callable): Initializer function for the
initial hidden state. The functino should return a tf.Tensor.
hidden_state_init_trainable (bool): Bool for whether the initial
hidden state is trainable.
cell_state_init (callable): Initializer function for the
initial cell state. The functino should return a tf.Tensor.
cell_state_init_trainable (bool): Bool for whether the initial
cell state is trainable.
Return:
tf.Tensor: Entire time-seried outputs, with shape :math:`(N, T, S^*)`.
tf.Tensor: Step output, with shape :math:`(N, S^*)`.
tf.Tensor: Step hidden state, with shape :math:`(N, H)`.
tf.Tensor: Step cell state, with shape :math:`(N, H)`.
tf.Tensor: Initial hidden state, with shape :math:`(H, )`.
tf.Tensor: Initial cell state, with shape :math:`(H, )`.
"""
with tf.compat.v1.variable_scope(name):
hidden_dim = lstm_cell.units
output, [hidden,
cell] = lstm_cell(step_input_var,
states=(step_hidden_var, step_cell_var))
output = output_nonlinearity_layer(output)
hidden_init_var = tf.compat.v1.get_variable(
name='initial_hidden',
shape=(hidden_dim, ),
initializer=hidden_state_init,
trainable=hidden_state_init_trainable,
dtype=tf.float32)
cell_init_var = tf.compat.v1.get_variable(
name='initial_cell',
shape=(hidden_dim, ),
initializer=cell_state_init,
trainable=cell_state_init_trainable,
dtype=tf.float32)
hidden_init_var_b = tf.broadcast_to(
hidden_init_var, shape=[tf.shape(all_input_var)[0], hidden_dim])
cell_init_var_b = tf.broadcast_to(
cell_init_var, shape=[tf.shape(all_input_var)[0], hidden_dim])
rnn = tf.keras.layers.RNN(lstm_cell, return_sequences=True)
hs = rnn(all_input_var,
initial_state=[hidden_init_var_b, cell_init_var_b])
outputs = output_nonlinearity_layer(hs)
return outputs, output, hidden, cell, hidden_init_var, cell_init_var
|
Long Short-Term Memory (LSTM).
Args:
name (str): Name of the variable scope.
lstm_cell (tf.keras.layers.Layer): LSTM cell used to generate
outputs.
all_input_var (tf.Tensor): Place holder for entire time-seried inputs,
with shape :math:`(N, T, S^*)`.
step_input_var (tf.Tensor): Place holder for step inputs, with shape
:math:`(N, S^*)`.
step_hidden_var (tf.Tensor): Place holder for step hidden state, with
shape :math:`(N, H)`.
step_cell_var (tf.Tensor): Place holder for cell state, with shape
:math:`(N, H)`.
output_nonlinearity_layer (callable): Activation function for output
dense layer. It should return a tf.Tensor. Set it to None to
maintain a linear activation.
hidden_state_init (callable): Initializer function for the
initial hidden state. The functino should return a tf.Tensor.
hidden_state_init_trainable (bool): Bool for whether the initial
hidden state is trainable.
cell_state_init (callable): Initializer function for the
initial cell state. The functino should return a tf.Tensor.
cell_state_init_trainable (bool): Bool for whether the initial
cell state is trainable.
Return:
tf.Tensor: Entire time-seried outputs, with shape :math:`(N, T, S^*)`.
tf.Tensor: Step output, with shape :math:`(N, S^*)`.
tf.Tensor: Step hidden state, with shape :math:`(N, H)`.
tf.Tensor: Step cell state, with shape :math:`(N, H)`.
tf.Tensor: Initial hidden state, with shape :math:`(H, )`.
tf.Tensor: Initial cell state, with shape :math:`(H, )`.
|
lstm
|
python
|
rlworkgroup/garage
|
src/garage/tf/models/lstm.py
|
https://github.com/rlworkgroup/garage/blob/master/src/garage/tf/models/lstm.py
|
MIT
|
def network_input_spec(self):
"""Network input spec.
Return:
list[str]: List of key(str) for the network outputs.
"""
return [
'full_input', 'step_input', 'step_hidden_input', 'step_cell_input'
]
|
Network input spec.
Return:
list[str]: List of key(str) for the network outputs.
|
network_input_spec
|
python
|
rlworkgroup/garage
|
src/garage/tf/models/lstm_model.py
|
https://github.com/rlworkgroup/garage/blob/master/src/garage/tf/models/lstm_model.py
|
MIT
|
def network_output_spec(self):
"""Network output spec.
Return:
list[str]: List of key(str) for the network outputs.
"""
return [
'all_output', 'step_output', 'step_hidden', 'step_cell',
'init_hidden', 'init_cell'
]
|
Network output spec.
Return:
list[str]: List of key(str) for the network outputs.
|
network_output_spec
|
python
|
rlworkgroup/garage
|
src/garage/tf/models/lstm_model.py
|
https://github.com/rlworkgroup/garage/blob/master/src/garage/tf/models/lstm_model.py
|
MIT
|
def _build(self,
all_input_var,
step_input_var,
step_hidden_var,
step_cell_var,
name=None):
"""Build model given input placeholder(s).
Args:
all_input_var (tf.Tensor): Place holder for entire time-series
inputs.
step_input_var (tf.Tensor): Place holder for step inputs.
step_hidden_var (tf.Tensor): Place holder for step hidden state.
step_cell_var (tf.Tensor): Place holder for step cell state.
name (str): Inner model name, also the variable scope of the
inner model, if exist. One example is
garage.tf.models.Sequential.
Return:
tf.Tensor: Entire time-series outputs.
tf.Tensor: Step output.
tf.Tensor: Step hidden state.
tf.Tensor: Step cell state.
tf.Tensor: Initial hidden state.
tf.Tensor: Initial cell state.
"""
del name
return lstm(
name='lstm',
lstm_cell=self._lstm_cell,
all_input_var=all_input_var,
step_input_var=step_input_var,
step_hidden_var=step_hidden_var,
step_cell_var=step_cell_var,
hidden_state_init=self._hidden_state_init,
hidden_state_init_trainable=self._hidden_state_init_trainable,
cell_state_init=self._cell_state_init,
cell_state_init_trainable=self._cell_state_init_trainable,
output_nonlinearity_layer=self._output_nonlinearity_layer)
|
Build model given input placeholder(s).
Args:
all_input_var (tf.Tensor): Place holder for entire time-series
inputs.
step_input_var (tf.Tensor): Place holder for step inputs.
step_hidden_var (tf.Tensor): Place holder for step hidden state.
step_cell_var (tf.Tensor): Place holder for step cell state.
name (str): Inner model name, also the variable scope of the
inner model, if exist. One example is
garage.tf.models.Sequential.
Return:
tf.Tensor: Entire time-series outputs.
tf.Tensor: Step output.
tf.Tensor: Step hidden state.
tf.Tensor: Step cell state.
tf.Tensor: Initial hidden state.
tf.Tensor: Initial cell state.
|
_build
|
python
|
rlworkgroup/garage
|
src/garage/tf/models/lstm_model.py
|
https://github.com/rlworkgroup/garage/blob/master/src/garage/tf/models/lstm_model.py
|
MIT
|
def __getstate__(self):
"""Object.__getstate__.
Returns:
dict: The state to be pickled for the instance.
"""
new_dict = super().__getstate__()
del new_dict['_lstm_cell']
del new_dict['_output_nonlinearity_layer']
return new_dict
|
Object.__getstate__.
Returns:
dict: The state to be pickled for the instance.
|
__getstate__
|
python
|
rlworkgroup/garage
|
src/garage/tf/models/lstm_model.py
|
https://github.com/rlworkgroup/garage/blob/master/src/garage/tf/models/lstm_model.py
|
MIT
|
def mlp(input_var,
output_dim,
hidden_sizes,
name,
input_var2=None,
concat_layer=-2,
hidden_nonlinearity=tf.nn.relu,
hidden_w_init=tf.initializers.glorot_uniform(
seed=deterministic.get_tf_seed_stream()),
hidden_b_init=tf.zeros_initializer(),
output_nonlinearity=None,
output_w_init=tf.initializers.glorot_uniform(
seed=deterministic.get_tf_seed_stream()),
output_b_init=tf.zeros_initializer(),
layer_normalization=False):
"""Multi-layer perceptron (MLP).
It maps real-valued inputs to real-valued outputs.
Args:
input_var (tf.Tensor): Input tf.Tensor to the MLP.
output_dim (int): Dimension of the network output.
hidden_sizes (list[int]): Output dimension of dense layer(s).
For example, (32, 32) means this MLP consists of two
hidden layers, each with 32 hidden units.
name (str): Network name, also the variable scope.
input_var2 (tf.Tensor): Second input tf.Tensor to the MLP if input
needs to be concatenated with a layer in the model.
concat_layer (int): The index of layers at which to concatenate
input_var2 with the network. If input_var2 is not supplied, this
arguments is ignored. The indexing works like standard python list
indexing. Index of 0 refers to the input layer (input_var) while
an index of -1 points to the last hidden layer. Default parameter
points to second layer from the end. If the model has only one
layer, input_var2 is concatenated with that layer.
hidden_nonlinearity (callable): Activation function for intermediate
dense layer(s). It should return a tf.Tensor. Set it to
None to maintain a linear activation.
hidden_w_init (callable): Initializer function for the weight
of intermediate dense layer(s). The function should return a
tf.Tensor.
hidden_b_init (callable): Initializer function for the bias
of intermediate dense layer(s). The function should return a
tf.Tensor.
output_nonlinearity (callable): Activation function for output dense
layer. It should return a tf.Tensor. Set it to None to
maintain a linear activation.
output_w_init (callable): Initializer function for the weight
of output dense layer(s). The function should return a
tf.Tensor.
output_b_init (callable): Initializer function for the bias
of output dense layer(s). The function should return a
tf.Tensor.
layer_normalization (bool): Bool for using layer normalization or not.
Return:
tf.Tensor: The output tf.Tensor of the MLP.
"""
n_layers = len(hidden_sizes) + 1
_merge_inputs = False
if input_var2 is not None:
_merge_inputs = True
if n_layers > 1:
_concat_layer = (concat_layer % n_layers + n_layers) % n_layers
else:
_concat_layer = 0
with tf.compat.v1.variable_scope(name):
l_hid = input_var
for idx, hidden_size in enumerate(hidden_sizes):
if _merge_inputs and idx == _concat_layer:
l_hid = tf.keras.layers.concatenate([l_hid, input_var2])
l_hid = tf.compat.v1.layers.dense(inputs=l_hid,
units=hidden_size,
activation=hidden_nonlinearity,
kernel_initializer=hidden_w_init,
bias_initializer=hidden_b_init,
name='hidden_{}'.format(idx))
if layer_normalization:
l_hid = tf.keras.layers.LayerNormalization()(l_hid)
if _merge_inputs and _concat_layer == len(hidden_sizes):
l_hid = tf.keras.layers.concatenate([l_hid, input_var2])
l_out = tf.compat.v1.layers.dense(inputs=l_hid,
units=output_dim,
activation=output_nonlinearity,
kernel_initializer=output_w_init,
bias_initializer=output_b_init,
name='output')
return l_out
|
Multi-layer perceptron (MLP).
It maps real-valued inputs to real-valued outputs.
Args:
input_var (tf.Tensor): Input tf.Tensor to the MLP.
output_dim (int): Dimension of the network output.
hidden_sizes (list[int]): Output dimension of dense layer(s).
For example, (32, 32) means this MLP consists of two
hidden layers, each with 32 hidden units.
name (str): Network name, also the variable scope.
input_var2 (tf.Tensor): Second input tf.Tensor to the MLP if input
needs to be concatenated with a layer in the model.
concat_layer (int): The index of layers at which to concatenate
input_var2 with the network. If input_var2 is not supplied, this
arguments is ignored. The indexing works like standard python list
indexing. Index of 0 refers to the input layer (input_var) while
an index of -1 points to the last hidden layer. Default parameter
points to second layer from the end. If the model has only one
layer, input_var2 is concatenated with that layer.
hidden_nonlinearity (callable): Activation function for intermediate
dense layer(s). It should return a tf.Tensor. Set it to
None to maintain a linear activation.
hidden_w_init (callable): Initializer function for the weight
of intermediate dense layer(s). The function should return a
tf.Tensor.
hidden_b_init (callable): Initializer function for the bias
of intermediate dense layer(s). The function should return a
tf.Tensor.
output_nonlinearity (callable): Activation function for output dense
layer. It should return a tf.Tensor. Set it to None to
maintain a linear activation.
output_w_init (callable): Initializer function for the weight
of output dense layer(s). The function should return a
tf.Tensor.
output_b_init (callable): Initializer function for the bias
of output dense layer(s). The function should return a
tf.Tensor.
layer_normalization (bool): Bool for using layer normalization or not.
Return:
tf.Tensor: The output tf.Tensor of the MLP.
|
mlp
|
python
|
rlworkgroup/garage
|
src/garage/tf/models/mlp.py
|
https://github.com/rlworkgroup/garage/blob/master/src/garage/tf/models/mlp.py
|
MIT
|
def _build(self, state_input, name=None):
"""Build model given input placeholder(s).
Args:
state_input (tf.Tensor): Tensor input for state.
name (str): Inner model name, also the variable scope of the
inner model, if exist. One example is
garage.tf.models.Sequential.
Return:
tf.Tensor: Tensor output of the model.
"""
del name
action_out = mlp(input_var=state_input,
output_dim=self._output_dim,
hidden_sizes=self._hidden_sizes,
name='action_value',
hidden_nonlinearity=self._hidden_nonlinearity,
hidden_w_init=self._hidden_w_init,
hidden_b_init=self._hidden_b_init,
output_nonlinearity=self._output_nonlinearity,
output_w_init=self._output_w_init,
output_b_init=self._output_b_init,
layer_normalization=self._layer_normalization)
state_out = mlp(input_var=state_input,
output_dim=1,
hidden_sizes=self._hidden_sizes,
name='state_value',
hidden_nonlinearity=self._hidden_nonlinearity,
hidden_w_init=self._hidden_w_init,
hidden_b_init=self._hidden_b_init,
output_nonlinearity=self._output_nonlinearity,
output_w_init=self._output_w_init,
output_b_init=self._output_b_init,
layer_normalization=self._layer_normalization)
action_out_mean = tf.reduce_mean(action_out, 1)
# calculate the advantage of performing certain action
# over other action in a particular state
action_out_advantage = action_out - tf.expand_dims(action_out_mean, 1)
q_func_out = state_out + action_out_advantage
return q_func_out
|
Build model given input placeholder(s).
Args:
state_input (tf.Tensor): Tensor input for state.
name (str): Inner model name, also the variable scope of the
inner model, if exist. One example is
garage.tf.models.Sequential.
Return:
tf.Tensor: Tensor output of the model.
|
_build
|
python
|
rlworkgroup/garage
|
src/garage/tf/models/mlp_dueling_model.py
|
https://github.com/rlworkgroup/garage/blob/master/src/garage/tf/models/mlp_dueling_model.py
|
MIT
|
def _build(self, state_input, action_input, name=None):
"""Build model given input placeholder(s).
Args:
state_input (tf.Tensor): Tensor input for state.
action_input (tf.Tensor): Tensor input for action.
name (str): Inner model name, also the variable scope of the
inner model, if exist. One example is
garage.tf.models.Sequential.
Return:
tf.Tensor: Tensor output of the model.
"""
del name
return mlp(input_var=state_input,
output_dim=self._output_dim,
hidden_sizes=self._hidden_sizes,
input_var2=action_input,
concat_layer=self._concat_layer,
name='mlp_concat',
hidden_nonlinearity=self._hidden_nonlinearity,
hidden_w_init=self._hidden_w_init,
hidden_b_init=self._hidden_b_init,
output_nonlinearity=self._output_nonlinearity,
output_w_init=self._output_w_init,
output_b_init=self._output_b_init,
layer_normalization=self._layer_normalization)
|
Build model given input placeholder(s).
Args:
state_input (tf.Tensor): Tensor input for state.
action_input (tf.Tensor): Tensor input for action.
name (str): Inner model name, also the variable scope of the
inner model, if exist. One example is
garage.tf.models.Sequential.
Return:
tf.Tensor: Tensor output of the model.
|
_build
|
python
|
rlworkgroup/garage
|
src/garage/tf/models/mlp_merge_model.py
|
https://github.com/rlworkgroup/garage/blob/master/src/garage/tf/models/mlp_merge_model.py
|
MIT
|
def _build(self, state_input, name=None):
"""Build model given input placeholder(s).
Args:
state_input (tf.Tensor): Tensor input for state.
name (str): Inner model name, also the variable scope of the
inner model, if exist. One example is
garage.tf.models.Sequential.
Return:
tf.Tensor: Tensor output of the model.
"""
del name
return mlp(input_var=state_input,
output_dim=self._output_dim,
hidden_sizes=self._hidden_sizes,
name='mlp',
hidden_nonlinearity=self._hidden_nonlinearity,
hidden_w_init=self._hidden_w_init,
hidden_b_init=self._hidden_b_init,
output_nonlinearity=self._output_nonlinearity,
output_w_init=self._output_w_init,
output_b_init=self._output_b_init,
layer_normalization=self._layer_normalization)
|
Build model given input placeholder(s).
Args:
state_input (tf.Tensor): Tensor input for state.
name (str): Inner model name, also the variable scope of the
inner model, if exist. One example is
garage.tf.models.Sequential.
Return:
tf.Tensor: Tensor output of the model.
|
_build
|
python
|
rlworkgroup/garage
|
src/garage/tf/models/mlp_model.py
|
https://github.com/rlworkgroup/garage/blob/master/src/garage/tf/models/mlp_model.py
|
MIT
|
def build(self, *inputs, name=None):
"""Output of model with the given input placeholder(s).
This function is implemented by subclasses to create their computation
graphs, which will be managed by Model. Generally, subclasses should
implement `build()` directly.
Args:
inputs (object): Input(s) for the model.
name (str): Name of the model.
Return:
list[tf.Tensor]: Output(s) of the model.
"""
|
Output of model with the given input placeholder(s).
This function is implemented by subclasses to create their computation
graphs, which will be managed by Model. Generally, subclasses should
implement `build()` directly.
Args:
inputs (object): Input(s) for the model.
name (str): Name of the model.
Return:
list[tf.Tensor]: Output(s) of the model.
|
build
|
python
|
rlworkgroup/garage
|
src/garage/tf/models/model.py
|
https://github.com/rlworkgroup/garage/blob/master/src/garage/tf/models/model.py
|
MIT
|
def parameters(self):
"""Parameters of the Model.
The output of a model is determined by its parameter. It could be
the weights of a neural network model or parameters of a loss
function model.
Returns:
list[tf.Tensor]: Parameters.
"""
|
Parameters of the Model.
The output of a model is determined by its parameter. It could be
the weights of a neural network model or parameters of a loss
function model.
Returns:
list[tf.Tensor]: Parameters.
|
parameters
|
python
|
rlworkgroup/garage
|
src/garage/tf/models/model.py
|
https://github.com/rlworkgroup/garage/blob/master/src/garage/tf/models/model.py
|
MIT
|
def parameters(self, parameters):
"""Set parameters of the Model.
Args:
parameters (list[tf.Tensor]): Parameters.
"""
|
Set parameters of the Model.
Args:
parameters (list[tf.Tensor]): Parameters.
|
parameters
|
python
|
rlworkgroup/garage
|
src/garage/tf/models/model.py
|
https://github.com/rlworkgroup/garage/blob/master/src/garage/tf/models/model.py
|
MIT
|
def build(self, *inputs, name=None):
"""Build a Network with the given input(s).
***
Do not call tf.global_variable_initializers() after building a model
as it will reassign random weights to the model.
The parameters inside a model will be initialized when calling build().
***
It uses the same, fixed variable scope for all Networks, to ensure
parameter sharing. Different Networks must have an unique name.
Args:
inputs (list[tf.Tensor]) : Tensor input(s), recommended to be
positional arguments, for example,
def build(self, state_input, action_input, name=None).
name (str): Name of the model, which is also the name scope of the
model.
Raises:
ValueError: When a Network with the same name is already built.
Returns:
list[tf.Tensor]: Output tensors of the model with the given
inputs.
"""
network_name = name or 'default'
if not self._networks:
# First time building the model, so self._networks are empty
# We store the variable_scope to reenter later when we reuse it
with tf.compat.v1.variable_scope(self._name) as vs:
self._variable_scope = vs
with tf.name_scope(name=network_name):
network = Network()
network._inputs = inputs
network._outputs = self._build(*inputs, name)
variables = self._get_variables().values()
tf.compat.v1.get_default_session().run(
tf.compat.v1.variables_initializer(variables))
if self._default_parameters:
self.parameters = self._default_parameters
else:
if network_name in self._networks:
raise ValueError(
'Network {} already exists!'.format(network_name))
with tf.compat.v1.variable_scope(self._variable_scope,
reuse=True,
auxiliary_name_scope=False):
with tf.name_scope(name=network_name):
network = Network()
network._inputs = inputs
network._outputs = self._build(*inputs, name)
custom_in_spec = self.network_input_spec()
custom_out_spec = self.network_output_spec()
in_spec = ['input', 'inputs']
out_spec = ['output', 'outputs']
in_args = [network.input, network.inputs]
out_args = [network.output, network.outputs]
if isinstance(network.inputs, tuple) and len(network.inputs) > 1:
assert len(custom_in_spec) == len(network.inputs), (
'network_input_spec must have same length as inputs!')
in_spec.extend(custom_in_spec)
in_args.extend(network.inputs)
if isinstance(network.outputs, tuple) and len(network.outputs) > 1:
assert len(custom_out_spec) == len(network.outputs), (
'network_output_spec must have same length as outputs!')
out_spec.extend(custom_out_spec)
out_args.extend(network.outputs)
elif len(custom_out_spec) > 0:
if not isinstance(network.outputs, tuple):
assert len(custom_out_spec) == 1, (
'network_input_spec must have same length as outputs!')
out_spec.extend(custom_out_spec)
out_args.extend([network.outputs])
else:
assert len(custom_out_spec) == len(network.outputs), (
'network_input_spec must have same length as outputs!')
out_spec.extend(custom_out_spec)
out_args.extend(network.outputs)
c = namedtuple(network_name, [*in_spec, *out_spec])
all_args = in_args + out_args
out_network = c(*all_args)
self._networks[network_name] = out_network
return out_network
|
Build a Network with the given input(s).
***
Do not call tf.global_variable_initializers() after building a model
as it will reassign random weights to the model.
The parameters inside a model will be initialized when calling build().
***
It uses the same, fixed variable scope for all Networks, to ensure
parameter sharing. Different Networks must have an unique name.
Args:
inputs (list[tf.Tensor]) : Tensor input(s), recommended to be
positional arguments, for example,
def build(self, state_input, action_input, name=None).
name (str): Name of the model, which is also the name scope of the
model.
Raises:
ValueError: When a Network with the same name is already built.
Returns:
list[tf.Tensor]: Output tensors of the model with the given
inputs.
|
build
|
python
|
rlworkgroup/garage
|
src/garage/tf/models/model.py
|
https://github.com/rlworkgroup/garage/blob/master/src/garage/tf/models/model.py
|
MIT
|
def _build(self, *inputs, name=None):
"""Build this model given input placeholder(s).
User should implement _build() inside their subclassed model,
and construct the computation graphs in this function.
Args:
inputs: Tensor input(s), recommended to be position arguments, e.g.
def _build(self, state_input, action_input, name=None).
It would be usually same as the inputs in build().
name (str): Inner model name, also the variable scope of the
inner model, if exist. One example is
garage.tf.models.Sequential.
Return:
list[tf.Tensor]: Tensor output(s) of the model.
"""
|
Build this model given input placeholder(s).
User should implement _build() inside their subclassed model,
and construct the computation graphs in this function.
Args:
inputs: Tensor input(s), recommended to be position arguments, e.g.
def _build(self, state_input, action_input, name=None).
It would be usually same as the inputs in build().
name (str): Inner model name, also the variable scope of the
inner model, if exist. One example is
garage.tf.models.Sequential.
Return:
list[tf.Tensor]: Tensor output(s) of the model.
|
_build
|
python
|
rlworkgroup/garage
|
src/garage/tf/models/model.py
|
https://github.com/rlworkgroup/garage/blob/master/src/garage/tf/models/model.py
|
MIT
|
def parameters(self):
"""Parameters of the model.
Returns:
np.ndarray: Parameters
"""
_variables = self._get_variables()
if _variables:
return tf.compat.v1.get_default_session().run(_variables)
else:
return _variables
|
Parameters of the model.
Returns:
np.ndarray: Parameters
|
parameters
|
python
|
rlworkgroup/garage
|
src/garage/tf/models/model.py
|
https://github.com/rlworkgroup/garage/blob/master/src/garage/tf/models/model.py
|
MIT
|
def parameters(self, parameters):
"""Set model parameters.
Args:
parameters (tf.Tensor): Parameters.
"""
variables = self._get_variables()
for name, var in variables.items():
found = False
# param name without model name
param_name = name[name.find(self.name) + len(self.name) + 1:]
for k, v in parameters.items():
if param_name in k:
var.load(v)
found = True
continue
if not found:
warnings.warn('No value provided for variable {}'.format(name))
|
Set model parameters.
Args:
parameters (tf.Tensor): Parameters.
|
parameters
|
python
|
rlworkgroup/garage
|
src/garage/tf/models/model.py
|
https://github.com/rlworkgroup/garage/blob/master/src/garage/tf/models/model.py
|
MIT
|
def _get_variables(self):
"""Get variables of this model.
Returns:
dict[str: tf.Tensor]: Variables of this model.
"""
if self._variable_scope:
return {v.name: v for v in self._variable_scope.global_variables()}
else:
return dict()
|
Get variables of this model.
Returns:
dict[str: tf.Tensor]: Variables of this model.
|
_get_variables
|
python
|
rlworkgroup/garage
|
src/garage/tf/models/model.py
|
https://github.com/rlworkgroup/garage/blob/master/src/garage/tf/models/model.py
|
MIT
|
def __getstate__(self):
"""Get the pickle state.
Returns:
dict: The pickled state.
"""
new_dict = super().__getstate__()
del new_dict['_networks']
new_dict['_default_parameters'] = self.parameters
return new_dict
|
Get the pickle state.
Returns:
dict: The pickled state.
|
__getstate__
|
python
|
rlworkgroup/garage
|
src/garage/tf/models/model.py
|
https://github.com/rlworkgroup/garage/blob/master/src/garage/tf/models/model.py
|
MIT
|
def __setstate__(self, state):
"""Object.__setstate__.
Args:
state (dict): unpickled state.
"""
super().__setstate__(state)
self._networks = {}
|
Object.__setstate__.
Args:
state (dict): unpickled state.
|
__setstate__
|
python
|
rlworkgroup/garage
|
src/garage/tf/models/model.py
|
https://github.com/rlworkgroup/garage/blob/master/src/garage/tf/models/model.py
|
MIT
|
def reset(self, do_resets=None):
"""Reset the module.
This is effective only to recurrent modules. do_resets is effective
only to vectoried modules.
For a vectorized modules, do_resets is an array of boolean indicating
which internal states to be reset. The length of do_resets should be
equal to the length of inputs.
Args:
do_resets (numpy.ndarray): Bool array indicating which states
to be reset.
"""
|
Reset the module.
This is effective only to recurrent modules. do_resets is effective
only to vectoried modules.
For a vectorized modules, do_resets is an array of boolean indicating
which internal states to be reset. The length of do_resets should be
equal to the length of inputs.
Args:
do_resets (numpy.ndarray): Bool array indicating which states
to be reset.
|
reset
|
python
|
rlworkgroup/garage
|
src/garage/tf/models/module.py
|
https://github.com/rlworkgroup/garage/blob/master/src/garage/tf/models/module.py
|
MIT
|
def get_regularizable_vars(self):
"""Get all network weight variables in the current scope.
Returns:
List[tf.Variable]: A list of network weight variables in the
current variable scope.
"""
trainable = self._variable_scope.global_variables()
return [
var for var in trainable
if 'hidden' in var.name and 'kernel' in var.name
]
|
Get all network weight variables in the current scope.
Returns:
List[tf.Variable]: A list of network weight variables in the
current variable scope.
|
get_regularizable_vars
|
python
|
rlworkgroup/garage
|
src/garage/tf/models/module.py
|
https://github.com/rlworkgroup/garage/blob/master/src/garage/tf/models/module.py
|
MIT
|
def get_params(self):
"""Get the trainable variables.
Returns:
List[tf.Variable]: A list of trainable variables in the current
variable scope.
"""
if self._cached_params is None:
self._cached_params = self.get_trainable_vars()
return self._cached_params
|
Get the trainable variables.
Returns:
List[tf.Variable]: A list of trainable variables in the current
variable scope.
|
get_params
|
python
|
rlworkgroup/garage
|
src/garage/tf/models/module.py
|
https://github.com/rlworkgroup/garage/blob/master/src/garage/tf/models/module.py
|
MIT
|
def get_param_shapes(self):
"""Get parameter shapes.
Returns:
List[tuple]: A list of variable shapes.
"""
if self._cached_param_shapes is None:
params = self.get_params()
param_values = tf.compat.v1.get_default_session().run(params)
self._cached_param_shapes = [val.shape for val in param_values]
return self._cached_param_shapes
|
Get parameter shapes.
Returns:
List[tuple]: A list of variable shapes.
|
get_param_shapes
|
python
|
rlworkgroup/garage
|
src/garage/tf/models/module.py
|
https://github.com/rlworkgroup/garage/blob/master/src/garage/tf/models/module.py
|
MIT
|
def get_param_values(self):
"""Get param values.
Returns:
np.ndarray: Values of the parameters evaluated in
the current session
"""
params = self.get_params()
param_values = tf.compat.v1.get_default_session().run(params)
return flatten_tensors(param_values)
|
Get param values.
Returns:
np.ndarray: Values of the parameters evaluated in
the current session
|
get_param_values
|
python
|
rlworkgroup/garage
|
src/garage/tf/models/module.py
|
https://github.com/rlworkgroup/garage/blob/master/src/garage/tf/models/module.py
|
MIT
|
def set_param_values(self, param_values):
"""Set param values.
Args:
param_values (np.ndarray): A numpy array of parameter values.
"""
param_values = unflatten_tensors(param_values, self.get_param_shapes())
for param, value in zip(self.get_params(), param_values):
param.load(value)
|
Set param values.
Args:
param_values (np.ndarray): A numpy array of parameter values.
|
set_param_values
|
python
|
rlworkgroup/garage
|
src/garage/tf/models/module.py
|
https://github.com/rlworkgroup/garage/blob/master/src/garage/tf/models/module.py
|
MIT
|
def __getstate__(self):
"""Object.__getstate__.
Returns:
dict: The state to be pickled for the instance.
"""
new_dict = self.__dict__.copy()
del new_dict['_cached_params']
return new_dict
|
Object.__getstate__.
Returns:
dict: The state to be pickled for the instance.
|
__getstate__
|
python
|
rlworkgroup/garage
|
src/garage/tf/models/module.py
|
https://github.com/rlworkgroup/garage/blob/master/src/garage/tf/models/module.py
|
MIT
|
def _build(self, state_input, name=None):
"""Build model given input placeholder(s).
Args:
state_input (tf.Tensor): Tensor input for state.
name (str): Inner model name, also the variable scope of the
inner model, if exist. One example is
garage.tf.models.Sequential.
Return:
tf.Tensor: Tensor output of the model.
"""
with tf.compat.v1.variable_scope('normalized_vars'):
x_mean_var = tf.compat.v1.get_variable(
name='x_mean',
shape=(1, ) + self._input_shape,
dtype=np.float32,
initializer=tf.zeros_initializer(),
trainable=False)
x_std_var = tf.compat.v1.get_variable(
name='x_std_var',
shape=(1, ) + self._input_shape,
dtype=np.float32,
initializer=tf.ones_initializer(),
trainable=False)
normalized_xs_var = (state_input - x_mean_var) / x_std_var
y_hat = super()._build(normalized_xs_var)
return y_hat, x_mean_var, x_std_var
|
Build model given input placeholder(s).
Args:
state_input (tf.Tensor): Tensor input for state.
name (str): Inner model name, also the variable scope of the
inner model, if exist. One example is
garage.tf.models.Sequential.
Return:
tf.Tensor: Tensor output of the model.
|
_build
|
python
|
rlworkgroup/garage
|
src/garage/tf/models/normalized_input_mlp_model.py
|
https://github.com/rlworkgroup/garage/blob/master/src/garage/tf/models/normalized_input_mlp_model.py
|
MIT
|
def parameter(input_var,
length,
initializer=tf.zeros_initializer(),
dtype=tf.float32,
trainable=True,
name='parameter'):
"""Parameter layer.
Used as layer that could be broadcast to a certain shape to
match with input variable during training.
For recurrent usage, use garage.tf.models.recurrent_parameter().
Example: A trainable parameter variable with shape (2,), it needs to be
broadcasted to (32, 2) when applied to a batch with size 32.
Args:
input_var (tf.Tensor): Input tf.Tensor.
length (int): Integer dimension of the variable.
initializer (callable): Initializer of the variable. The function
should return a tf.Tensor.
dtype: Data type of the variable (default is tf.float32).
trainable (bool): Whether the variable is trainable.
name (str): Variable scope of the variable.
Return:
A tensor of the broadcasted variables.
"""
with tf.compat.v1.variable_scope(name):
p = tf.compat.v1.get_variable('parameter',
shape=(length, ),
dtype=dtype,
initializer=initializer,
trainable=trainable)
batch_dim = tf.shape(input_var)[0]
broadcast_shape = tf.concat(axis=0, values=[[batch_dim], [length]])
p_broadcast = tf.broadcast_to(p, shape=broadcast_shape)
return p_broadcast
|
Parameter layer.
Used as layer that could be broadcast to a certain shape to
match with input variable during training.
For recurrent usage, use garage.tf.models.recurrent_parameter().
Example: A trainable parameter variable with shape (2,), it needs to be
broadcasted to (32, 2) when applied to a batch with size 32.
Args:
input_var (tf.Tensor): Input tf.Tensor.
length (int): Integer dimension of the variable.
initializer (callable): Initializer of the variable. The function
should return a tf.Tensor.
dtype: Data type of the variable (default is tf.float32).
trainable (bool): Whether the variable is trainable.
name (str): Variable scope of the variable.
Return:
A tensor of the broadcasted variables.
|
parameter
|
python
|
rlworkgroup/garage
|
src/garage/tf/models/parameter.py
|
https://github.com/rlworkgroup/garage/blob/master/src/garage/tf/models/parameter.py
|
MIT
|
def recurrent_parameter(input_var,
step_input_var,
length,
initializer=tf.zeros_initializer(),
dtype=tf.float32,
trainable=True,
name='recurrent_parameter'):
"""Parameter layer for recurrent networks.
Used as layer that could be broadcast to a certain shape to
match with input variable during training.
Example: A trainable parameter variable with shape (2,), it needs to be
broadcasted to (32, 4, 2) when applied to a batch with size 32 and
time-length 4.
Args:
input_var (tf.Tensor): Input tf.Tensor for full time-series inputs.
step_input_var (tf.Tensor): Input tf.Tensor for step inputs.
length (int): Integer dimension of the variable.
initializer (callable): Initializer of the variable. The function
should return a tf.Tensor.
dtype: Data type of the variable (default is tf.float32).
trainable (bool): Whether the variable is trainable.
name (str): Variable scope of the variable.
Return:
A tensor of the two broadcasted variables: one for full time-series
inputs, one for step inputs.
"""
with tf.compat.v1.variable_scope(name):
p = tf.compat.v1.get_variable('parameter',
shape=(length, ),
dtype=dtype,
initializer=initializer,
trainable=trainable)
batch_dim = tf.shape(input_var)[:2]
step_batch_dim = tf.shape(step_input_var)[:1]
broadcast_shape = tf.concat(axis=0, values=[batch_dim, [length]])
step_broadcast_shape = tf.concat(axis=0,
values=[step_batch_dim, [length]])
p_broadcast = tf.broadcast_to(p, shape=broadcast_shape)
step_p_broadcast = tf.broadcast_to(p, shape=step_broadcast_shape)
return p_broadcast, step_p_broadcast
|
Parameter layer for recurrent networks.
Used as layer that could be broadcast to a certain shape to
match with input variable during training.
Example: A trainable parameter variable with shape (2,), it needs to be
broadcasted to (32, 4, 2) when applied to a batch with size 32 and
time-length 4.
Args:
input_var (tf.Tensor): Input tf.Tensor for full time-series inputs.
step_input_var (tf.Tensor): Input tf.Tensor for step inputs.
length (int): Integer dimension of the variable.
initializer (callable): Initializer of the variable. The function
should return a tf.Tensor.
dtype: Data type of the variable (default is tf.float32).
trainable (bool): Whether the variable is trainable.
name (str): Variable scope of the variable.
Return:
A tensor of the two broadcasted variables: one for full time-series
inputs, one for step inputs.
|
recurrent_parameter
|
python
|
rlworkgroup/garage
|
src/garage/tf/models/parameter.py
|
https://github.com/rlworkgroup/garage/blob/master/src/garage/tf/models/parameter.py
|
MIT
|
def _build(self, input_var, name=None):
"""Build model given input placeholder(s).
Args:
input_var (tf.Tensor): Tensor input.
name (str): Inner model name, also the variable scope of the
inner model.
Return:
tf.Tensor: Tensor output of the model.
"""
out = input_var
for model in self._models:
self._last_network = model.build(out, name=name)
if self._first_network is None:
self._first_network = self._last_network
out = self._last_network.outputs
return out
|
Build model given input placeholder(s).
Args:
input_var (tf.Tensor): Tensor input.
name (str): Inner model name, also the variable scope of the
inner model.
Return:
tf.Tensor: Tensor output of the model.
|
_build
|
python
|
rlworkgroup/garage
|
src/garage/tf/models/sequential.py
|
https://github.com/rlworkgroup/garage/blob/master/src/garage/tf/models/sequential.py
|
MIT
|
def __getstate__(self):
"""Object.__getstate__.
Returns:
dict: The state to be pickled for the instance.
"""
new_dict = super().__getstate__()
del new_dict['_first_network']
del new_dict['_last_network']
return new_dict
|
Object.__getstate__.
Returns:
dict: The state to be pickled for the instance.
|
__getstate__
|
python
|
rlworkgroup/garage
|
src/garage/tf/models/sequential.py
|
https://github.com/rlworkgroup/garage/blob/master/src/garage/tf/models/sequential.py
|
MIT
|
def __setstate__(self, state):
"""Object.__setstate__.
Args:
state (dict): Unpickled state.
"""
super().__setstate__(state)
self._first_network = None
self._last_network = None
|
Object.__setstate__.
Args:
state (dict): Unpickled state.
|
__setstate__
|
python
|
rlworkgroup/garage
|
src/garage/tf/models/sequential.py
|
https://github.com/rlworkgroup/garage/blob/master/src/garage/tf/models/sequential.py
|
MIT
|
def update_hvp(self, f, target, inputs, reg_coeff, name=None):
"""Build the symbolic graph to compute the Hessian-vector product.
Args:
f (tf.Tensor): The function whose Hessian needs to be computed.
target (garage.tf.policies.Policy): A parameterized object to
optimize over.
inputs (tuple[tf.Tensor]): The inputs for function f.
reg_coeff (float): A small value so that A -> A + reg*I.
name (str): Name to be used in tf.name_scope.
"""
|
Build the symbolic graph to compute the Hessian-vector product.
Args:
f (tf.Tensor): The function whose Hessian needs to be computed.
target (garage.tf.policies.Policy): A parameterized object to
optimize over.
inputs (tuple[tf.Tensor]): The inputs for function f.
reg_coeff (float): A small value so that A -> A + reg*I.
name (str): Name to be used in tf.name_scope.
|
update_hvp
|
python
|
rlworkgroup/garage
|
src/garage/tf/optimizers/conjugate_gradient_optimizer.py
|
https://github.com/rlworkgroup/garage/blob/master/src/garage/tf/optimizers/conjugate_gradient_optimizer.py
|
MIT
|
def build_eval(self, inputs):
"""Build the evaluation function. # noqa: D202, E501 # https://github.com/PyCQA/pydocstyle/pull/395.
Args:
inputs (tuple[numpy.ndarray]): Function f will be evaluated on
these inputs.
Returns:
function: It can be called to get the final result.
"""
def _eval(v):
"""The evaluation function.
Args:
v (numpy.ndarray): The vector to be multiplied with Hessian.
Returns:
numpy.ndarray: The product of Hessian of function f and v.
"""
xs = tuple(self._target.flat_to_params(v))
ret = _sliced_fn(self._hvp_fun['f_hx_plain'], self._num_slices)(
inputs, xs) + self._reg_coeff * v
return ret
return _eval
|
Build the evaluation function. # noqa: D202, E501 # https://github.com/PyCQA/pydocstyle/pull/395.
Args:
inputs (tuple[numpy.ndarray]): Function f will be evaluated on
these inputs.
Returns:
function: It can be called to get the final result.
|
build_eval
|
python
|
rlworkgroup/garage
|
src/garage/tf/optimizers/conjugate_gradient_optimizer.py
|
https://github.com/rlworkgroup/garage/blob/master/src/garage/tf/optimizers/conjugate_gradient_optimizer.py
|
MIT
|
def _eval(v):
"""The evaluation function.
Args:
v (numpy.ndarray): The vector to be multiplied with Hessian.
Returns:
numpy.ndarray: The product of Hessian of function f and v.
"""
xs = tuple(self._target.flat_to_params(v))
ret = _sliced_fn(self._hvp_fun['f_hx_plain'], self._num_slices)(
inputs, xs) + self._reg_coeff * v
return ret
|
The evaluation function.
Args:
v (numpy.ndarray): The vector to be multiplied with Hessian.
Returns:
numpy.ndarray: The product of Hessian of function f and v.
|
_eval
|
python
|
rlworkgroup/garage
|
src/garage/tf/optimizers/conjugate_gradient_optimizer.py
|
https://github.com/rlworkgroup/garage/blob/master/src/garage/tf/optimizers/conjugate_gradient_optimizer.py
|
MIT
|
def __getstate__(self):
"""Object.__getstate__.
Returns:
dict: the state to be pickled for the instance.
"""
new_dict = self.__dict__.copy()
del new_dict['_hvp_fun']
return new_dict
|
Object.__getstate__.
Returns:
dict: the state to be pickled for the instance.
|
__getstate__
|
python
|
rlworkgroup/garage
|
src/garage/tf/optimizers/conjugate_gradient_optimizer.py
|
https://github.com/rlworkgroup/garage/blob/master/src/garage/tf/optimizers/conjugate_gradient_optimizer.py
|
MIT
|
def update_hvp(self, f, target, inputs, reg_coeff, name='PearlmutterHVP'):
"""Build the symbolic graph to compute the Hessian-vector product.
Args:
f (tf.Tensor): The function whose Hessian needs to be computed.
target (garage.tf.policies.Policy): A parameterized object to
optimize over.
inputs (tuple[tf.Tensor]): The inputs for function f.
reg_coeff (float): A small value so that A -> A + reg*I.
name (str): Name to be used in tf.name_scope.
"""
self._target = target
self._reg_coeff = reg_coeff
params = target.get_params()
with tf.name_scope(name):
constraint_grads = tf.gradients(f,
xs=params,
name='gradients_constraint')
for idx, (grad, param) in enumerate(zip(constraint_grads, params)):
if grad is None:
constraint_grads[idx] = tf.zeros_like(param)
xs = tuple(
[new_tensor_like(p.name.split(':')[0], p) for p in params])
def hx_plain():
"""Computes product of Hessian(f) and vector v.
Returns:
tf.Tensor: Symbolic result.
"""
with tf.name_scope('hx_plain'):
with tf.name_scope('hx_function'):
hx_f = tf.reduce_sum(
tf.stack([
tf.reduce_sum(g * x)
for g, x in zip(constraint_grads, xs)
]))
hx_plain_splits = tf.gradients(hx_f,
params,
name='gradients_hx_plain')
for idx, (hx,
param) in enumerate(zip(hx_plain_splits,
params)):
if hx is None:
hx_plain_splits[idx] = tf.zeros_like(param)
return flatten_tensor_variables(hx_plain_splits)
self._hvp_fun = LazyDict(f_hx_plain=lambda: compile_function(
inputs=inputs + xs,
outputs=hx_plain(),
), )
|
Build the symbolic graph to compute the Hessian-vector product.
Args:
f (tf.Tensor): The function whose Hessian needs to be computed.
target (garage.tf.policies.Policy): A parameterized object to
optimize over.
inputs (tuple[tf.Tensor]): The inputs for function f.
reg_coeff (float): A small value so that A -> A + reg*I.
name (str): Name to be used in tf.name_scope.
|
update_hvp
|
python
|
rlworkgroup/garage
|
src/garage/tf/optimizers/conjugate_gradient_optimizer.py
|
https://github.com/rlworkgroup/garage/blob/master/src/garage/tf/optimizers/conjugate_gradient_optimizer.py
|
MIT
|
def hx_plain():
"""Computes product of Hessian(f) and vector v.
Returns:
tf.Tensor: Symbolic result.
"""
with tf.name_scope('hx_plain'):
with tf.name_scope('hx_function'):
hx_f = tf.reduce_sum(
tf.stack([
tf.reduce_sum(g * x)
for g, x in zip(constraint_grads, xs)
]))
hx_plain_splits = tf.gradients(hx_f,
params,
name='gradients_hx_plain')
for idx, (hx,
param) in enumerate(zip(hx_plain_splits,
params)):
if hx is None:
hx_plain_splits[idx] = tf.zeros_like(param)
return flatten_tensor_variables(hx_plain_splits)
|
Computes product of Hessian(f) and vector v.
Returns:
tf.Tensor: Symbolic result.
|
hx_plain
|
python
|
rlworkgroup/garage
|
src/garage/tf/optimizers/conjugate_gradient_optimizer.py
|
https://github.com/rlworkgroup/garage/blob/master/src/garage/tf/optimizers/conjugate_gradient_optimizer.py
|
MIT
|
def update_hvp(self,
f,
target,
inputs,
reg_coeff,
name='FiniteDifferenceHVP'):
"""Build the symbolic graph to compute the Hessian-vector product.
Args:
f (tf.Tensor): The function whose Hessian needs to be computed.
target (garage.tf.policies.Policy): A parameterized object to
optimize over.
inputs (tuple[tf.Tensor]): The inputs for function f.
reg_coeff (float): A small value so that A -> A + reg*I.
name (str): Name to be used in tf.name_scope.
"""
self._target = target
self._reg_coeff = reg_coeff
params = target.get_params()
with tf.name_scope(name):
constraint_grads = tf.gradients(f,
xs=params,
name='gradients_constraint')
for idx, (grad, param) in enumerate(zip(constraint_grads, params)):
if grad is None:
constraint_grads[idx] = tf.zeros_like(param)
flat_grad = flatten_tensor_variables(constraint_grads)
def f_hx_plain(*args):
"""Computes product of Hessian(f) and vector v.
Args:
args (tuple[numpy.ndarray]): Contains inputs of function f
, and vector v.
Returns:
tf.Tensor: Symbolic result.
"""
with tf.name_scope('f_hx_plain'):
inputs_ = args[:len(inputs)]
xs = args[len(inputs):]
flat_xs = np.concatenate(
[np.reshape(x, (-1, )) for x in xs])
param_val = self._target.get_param_values()
eps = np.cast['float32'](
self.base_eps / (np.linalg.norm(param_val) + 1e-8))
self._target.set_param_values(param_val + eps * flat_xs)
flat_grad_dvplus = self._hvp_fun['f_grad'](*inputs_)
self._target.set_param_values(param_val)
if self.symmetric:
self._target.set_param_values(param_val -
eps * flat_xs)
flat_grad_dvminus = self._hvp_fun['f_grad'](*inputs_)
hx = (flat_grad_dvplus - flat_grad_dvminus) / (2 * eps)
self._target.set_param_values(param_val)
else:
flat_grad = self._hvp_fun['f_grad'](*inputs_)
hx = (flat_grad_dvplus - flat_grad) / eps
return hx
self._hvp_fun = LazyDict(
f_grad=lambda: compile_function(
inputs=inputs,
outputs=flat_grad,
),
f_hx_plain=lambda: f_hx_plain,
)
|
Build the symbolic graph to compute the Hessian-vector product.
Args:
f (tf.Tensor): The function whose Hessian needs to be computed.
target (garage.tf.policies.Policy): A parameterized object to
optimize over.
inputs (tuple[tf.Tensor]): The inputs for function f.
reg_coeff (float): A small value so that A -> A + reg*I.
name (str): Name to be used in tf.name_scope.
|
update_hvp
|
python
|
rlworkgroup/garage
|
src/garage/tf/optimizers/conjugate_gradient_optimizer.py
|
https://github.com/rlworkgroup/garage/blob/master/src/garage/tf/optimizers/conjugate_gradient_optimizer.py
|
MIT
|
def f_hx_plain(*args):
"""Computes product of Hessian(f) and vector v.
Args:
args (tuple[numpy.ndarray]): Contains inputs of function f
, and vector v.
Returns:
tf.Tensor: Symbolic result.
"""
with tf.name_scope('f_hx_plain'):
inputs_ = args[:len(inputs)]
xs = args[len(inputs):]
flat_xs = np.concatenate(
[np.reshape(x, (-1, )) for x in xs])
param_val = self._target.get_param_values()
eps = np.cast['float32'](
self.base_eps / (np.linalg.norm(param_val) + 1e-8))
self._target.set_param_values(param_val + eps * flat_xs)
flat_grad_dvplus = self._hvp_fun['f_grad'](*inputs_)
self._target.set_param_values(param_val)
if self.symmetric:
self._target.set_param_values(param_val -
eps * flat_xs)
flat_grad_dvminus = self._hvp_fun['f_grad'](*inputs_)
hx = (flat_grad_dvplus - flat_grad_dvminus) / (2 * eps)
self._target.set_param_values(param_val)
else:
flat_grad = self._hvp_fun['f_grad'](*inputs_)
hx = (flat_grad_dvplus - flat_grad) / eps
return hx
|
Computes product of Hessian(f) and vector v.
Args:
args (tuple[numpy.ndarray]): Contains inputs of function f
, and vector v.
Returns:
tf.Tensor: Symbolic result.
|
f_hx_plain
|
python
|
rlworkgroup/garage
|
src/garage/tf/optimizers/conjugate_gradient_optimizer.py
|
https://github.com/rlworkgroup/garage/blob/master/src/garage/tf/optimizers/conjugate_gradient_optimizer.py
|
MIT
|
def update_opt(
self,
loss,
target,
leq_constraint,
inputs,
extra_inputs=None,
name='ConjugateGradientOptimizer',
constraint_name='constraint',
):
"""Update the optimizer.
Build the functions for computing loss, gradient, and
the constraint value.
Args:
loss (tf.Tensor): Symbolic expression for the loss function.
target (garage.tf.policies.Policy): A parameterized object to
optimize over.
leq_constraint (tuple[tf.Tensor, float]): A constraint provided
as a tuple (f, epsilon), of the form f(*inputs) <= epsilon.
inputs (list(tf.Tenosr)): A list of symbolic variables as inputs,
which could be subsampled if needed. It is assumed that the
first dimension of these inputs should correspond to the
number of data points.
extra_inputs (list[tf.Tenosr]): A list of symbolic variables as
extra inputs which should not be subsampled.
name (str): Name to be passed to tf.name_scope.
constraint_name (str): A constraint name for prupose of logging
and variable names.
"""
params = target.get_params()
with tf.name_scope(name):
inputs = tuple(inputs)
if extra_inputs is None:
extra_inputs = tuple()
else:
extra_inputs = tuple(extra_inputs)
constraint_term, constraint_value = leq_constraint
with tf.name_scope('loss_gradients'):
grads = tf.gradients(loss, xs=params)
for idx, (grad, param) in enumerate(zip(grads, params)):
if grad is None:
grads[idx] = tf.zeros_like(param)
flat_grad = flatten_tensor_variables(grads)
self._hvp_approach.update_hvp(f=constraint_term,
target=target,
inputs=inputs + extra_inputs,
reg_coeff=self._reg_coeff,
name='update_opt_' + constraint_name)
self._target = target
self._max_constraint_val = constraint_value
self._constraint_name = constraint_name
self._opt_fun = LazyDict(
f_loss=lambda: compile_function(
inputs=inputs + extra_inputs,
outputs=loss,
),
f_grad=lambda: compile_function(
inputs=inputs + extra_inputs,
outputs=flat_grad,
),
f_constraint=lambda: compile_function(
inputs=inputs + extra_inputs,
outputs=constraint_term,
),
f_loss_constraint=lambda: compile_function(
inputs=inputs + extra_inputs,
outputs=[loss, constraint_term],
),
)
|
Update the optimizer.
Build the functions for computing loss, gradient, and
the constraint value.
Args:
loss (tf.Tensor): Symbolic expression for the loss function.
target (garage.tf.policies.Policy): A parameterized object to
optimize over.
leq_constraint (tuple[tf.Tensor, float]): A constraint provided
as a tuple (f, epsilon), of the form f(*inputs) <= epsilon.
inputs (list(tf.Tenosr)): A list of symbolic variables as inputs,
which could be subsampled if needed. It is assumed that the
first dimension of these inputs should correspond to the
number of data points.
extra_inputs (list[tf.Tenosr]): A list of symbolic variables as
extra inputs which should not be subsampled.
name (str): Name to be passed to tf.name_scope.
constraint_name (str): A constraint name for prupose of logging
and variable names.
|
update_opt
|
python
|
rlworkgroup/garage
|
src/garage/tf/optimizers/conjugate_gradient_optimizer.py
|
https://github.com/rlworkgroup/garage/blob/master/src/garage/tf/optimizers/conjugate_gradient_optimizer.py
|
MIT
|
def loss(self, inputs, extra_inputs=None):
"""Compute the loss value.
Args:
inputs (list[numpy.ndarray]): A list inputs, which could be
subsampled if needed. It is assumed that the first dimension
of these inputs should correspond to the number of data points
extra_inputs (list[numpy.ndarray]): A list of extra inputs which
should not be subsampled.
Returns:
float: Loss value.
"""
inputs = tuple(inputs)
if extra_inputs is None:
extra_inputs = tuple()
return _sliced_fn(self._opt_fun['f_loss'],
self._num_slices)(inputs, extra_inputs)
|
Compute the loss value.
Args:
inputs (list[numpy.ndarray]): A list inputs, which could be
subsampled if needed. It is assumed that the first dimension
of these inputs should correspond to the number of data points
extra_inputs (list[numpy.ndarray]): A list of extra inputs which
should not be subsampled.
Returns:
float: Loss value.
|
loss
|
python
|
rlworkgroup/garage
|
src/garage/tf/optimizers/conjugate_gradient_optimizer.py
|
https://github.com/rlworkgroup/garage/blob/master/src/garage/tf/optimizers/conjugate_gradient_optimizer.py
|
MIT
|
def constraint_val(self, inputs, extra_inputs=None):
"""Constraint value.
Args:
inputs (list[numpy.ndarray]): A list inputs, which could be
subsampled if needed. It is assumed that the first dimension
of these inputs should correspond to the number of data points
extra_inputs (list[numpy.ndarray]): A list of extra inputs which
should not be subsampled.
Returns:
float: Constraint value.
"""
inputs = tuple(inputs)
if extra_inputs is None:
extra_inputs = tuple()
return _sliced_fn(self._opt_fun['f_constraint'],
self._num_slices)(inputs, extra_inputs)
|
Constraint value.
Args:
inputs (list[numpy.ndarray]): A list inputs, which could be
subsampled if needed. It is assumed that the first dimension
of these inputs should correspond to the number of data points
extra_inputs (list[numpy.ndarray]): A list of extra inputs which
should not be subsampled.
Returns:
float: Constraint value.
|
constraint_val
|
python
|
rlworkgroup/garage
|
src/garage/tf/optimizers/conjugate_gradient_optimizer.py
|
https://github.com/rlworkgroup/garage/blob/master/src/garage/tf/optimizers/conjugate_gradient_optimizer.py
|
MIT
|
def optimize(self,
inputs,
extra_inputs=None,
subsample_grouped_inputs=None,
name='optimize'):
"""Optimize the function.
Args:
inputs (list[numpy.ndarray]): A list inputs, which could be
subsampled if needed. It is assumed that the first dimension
of these inputs should correspond to the number of data points
extra_inputs (list[numpy.ndarray]): A list of extra inputs which
should not be subsampled.
subsample_grouped_inputs (list[numpy.ndarray]): Subsampled inputs
to be used when subsample_factor is less than one.
name (str): The name argument for tf.name_scope.
"""
with tf.name_scope(name):
prev_param = np.copy(self._target.get_param_values())
inputs = tuple(inputs)
if extra_inputs is None:
extra_inputs = tuple()
subsample_inputs = inputs
if self._subsample_factor < 1:
if subsample_grouped_inputs is None:
subsample_grouped_inputs = [inputs]
subsample_inputs = tuple()
for inputs_grouped in subsample_grouped_inputs:
n_samples = len(inputs_grouped[0])
inds = np.random.choice(n_samples,
int(n_samples *
self._subsample_factor),
replace=False)
subsample_inputs += tuple(
[x[inds] for x in inputs_grouped])
logger.log(
('Start CG optimization: '
'#parameters: %d, #inputs: %d, #subsample_inputs: %d') %
(len(prev_param), len(inputs[0]), len(subsample_inputs[0])))
logger.log('computing loss before')
loss_before = _sliced_fn(self._opt_fun['f_loss'],
self._num_slices)(inputs, extra_inputs)
logger.log('computing gradient')
flat_g = _sliced_fn(self._opt_fun['f_grad'],
self._num_slices)(inputs, extra_inputs)
logger.log('gradient computed')
logger.log('computing descent direction')
hx = self._hvp_approach.build_eval(subsample_inputs + extra_inputs)
descent_direction = _cg(hx, flat_g, cg_iters=self._cg_iters)
initial_step_size = np.sqrt(
2.0 * self._max_constraint_val *
(1. / (descent_direction.dot(hx(descent_direction)) + 1e-8)))
if np.isnan(initial_step_size):
initial_step_size = 1.
flat_descent_step = initial_step_size * descent_direction
logger.log('descent direction computed')
n_iter = 0
for n_iter, ratio in enumerate(self._backtrack_ratio**np.arange(
self._max_backtracks)): # yapf: disable
cur_step = ratio * flat_descent_step
cur_param = prev_param - cur_step
self._target.set_param_values(cur_param)
loss, constraint_val = _sliced_fn(
self._opt_fun['f_loss_constraint'],
self._num_slices)(inputs, extra_inputs)
if (loss < loss_before
and constraint_val <= self._max_constraint_val):
break
if (np.isnan(loss) or np.isnan(constraint_val)
or loss >= loss_before or constraint_val >=
self._max_constraint_val) and not self._accept_violation:
logger.log(
'Line search condition violated. Rejecting the step!')
if np.isnan(loss):
logger.log('Violated because loss is NaN')
if np.isnan(constraint_val):
logger.log('Violated because constraint %s is NaN' %
self._constraint_name)
if loss >= loss_before:
logger.log('Violated because loss not improving')
if constraint_val >= self._max_constraint_val:
logger.log('Violated because constraint %s is violated' %
self._constraint_name)
self._target.set_param_values(prev_param)
logger.log('backtrack iters: %d' % n_iter)
logger.log('optimization finished')
|
Optimize the function.
Args:
inputs (list[numpy.ndarray]): A list inputs, which could be
subsampled if needed. It is assumed that the first dimension
of these inputs should correspond to the number of data points
extra_inputs (list[numpy.ndarray]): A list of extra inputs which
should not be subsampled.
subsample_grouped_inputs (list[numpy.ndarray]): Subsampled inputs
to be used when subsample_factor is less than one.
name (str): The name argument for tf.name_scope.
|
optimize
|
python
|
rlworkgroup/garage
|
src/garage/tf/optimizers/conjugate_gradient_optimizer.py
|
https://github.com/rlworkgroup/garage/blob/master/src/garage/tf/optimizers/conjugate_gradient_optimizer.py
|
MIT
|
def __getstate__(self):
"""Object.__getstate__.
Returns:
dict: the state to be pickled for the instance.
"""
new_dict = self.__dict__.copy()
del new_dict['_opt_fun']
return new_dict
|
Object.__getstate__.
Returns:
dict: the state to be pickled for the instance.
|
__getstate__
|
python
|
rlworkgroup/garage
|
src/garage/tf/optimizers/conjugate_gradient_optimizer.py
|
https://github.com/rlworkgroup/garage/blob/master/src/garage/tf/optimizers/conjugate_gradient_optimizer.py
|
MIT
|
def _cg(f_Ax, b, cg_iters=10, residual_tol=1e-10):
"""Use Conjugate Gradient iteration to solve Ax = b. Demmel p 312.
Args:
f_Ax (function): A function to compute Hessian vector product.
b (numpy.ndarray): Right hand side of the equation to solve.
cg_iters (int): Number of iterations to run conjugate gradient
algorithm.
residual_tol (float): Tolerence for convergence.
Returns:
numpy.ndarray: Solution x* for equation Ax = b.
"""
p = b.copy()
r = b.copy()
x = np.zeros_like(b)
rdotr = r.dot(r)
for _ in range(cg_iters):
z = f_Ax(p)
v = rdotr / p.dot(z)
x += v * p
r -= v * z
newrdotr = r.dot(r)
mu = newrdotr / rdotr
p = r + mu * p
rdotr = newrdotr
if rdotr < residual_tol:
break
return x
|
Use Conjugate Gradient iteration to solve Ax = b. Demmel p 312.
Args:
f_Ax (function): A function to compute Hessian vector product.
b (numpy.ndarray): Right hand side of the equation to solve.
cg_iters (int): Number of iterations to run conjugate gradient
algorithm.
residual_tol (float): Tolerence for convergence.
Returns:
numpy.ndarray: Solution x* for equation Ax = b.
|
_cg
|
python
|
rlworkgroup/garage
|
src/garage/tf/optimizers/conjugate_gradient_optimizer.py
|
https://github.com/rlworkgroup/garage/blob/master/src/garage/tf/optimizers/conjugate_gradient_optimizer.py
|
MIT
|
def _sliced_fn(f, n_slices):
"""Divide function f's inputs into several slices.
Evaluate f on those slices, and then average the result. It is useful when
memory is not enough to process all data at once.
Assume:
1. each of f's inputs is iterable and composed of multiple "samples"
2. outputs can be averaged over "samples"
Args:
f (Callable): Function to evaluate.
n_slices (int): Number of slices to evaluate over.
Returns:
Callable: Sliced version of f.
"""
def _sliced_f(sliced_inputs, non_sliced_inputs=None): # pylint: disable=missing-return-doc, missing-return-type-doc # noqa: E501
if non_sliced_inputs is None:
non_sliced_inputs = []
if isinstance(non_sliced_inputs, tuple):
non_sliced_inputs = list(non_sliced_inputs)
n_paths = len(sliced_inputs[0])
slice_size = max(1, n_paths // n_slices)
ret_vals = None
for start in range(0, n_paths, slice_size):
inputs_slice = [v[start:start + slice_size] for v in sliced_inputs]
slice_ret_vals = f(*(inputs_slice + non_sliced_inputs))
if not isinstance(slice_ret_vals, (tuple, list)):
slice_ret_vals_as_list = [slice_ret_vals]
else:
slice_ret_vals_as_list = slice_ret_vals
scaled_ret_vals = [
np.asarray(v) * len(inputs_slice[0])
for v in slice_ret_vals_as_list
]
if ret_vals is None:
ret_vals = scaled_ret_vals
else:
ret_vals = [x + y for x, y in zip(ret_vals, scaled_ret_vals)]
ret_vals = [v / n_paths for v in ret_vals]
if not isinstance(slice_ret_vals, (tuple, list)):
ret_vals = ret_vals[0]
elif isinstance(slice_ret_vals, tuple):
ret_vals = tuple(ret_vals)
return ret_vals
return _sliced_f
|
Divide function f's inputs into several slices.
Evaluate f on those slices, and then average the result. It is useful when
memory is not enough to process all data at once.
Assume:
1. each of f's inputs is iterable and composed of multiple "samples"
2. outputs can be averaged over "samples"
Args:
f (Callable): Function to evaluate.
n_slices (int): Number of slices to evaluate over.
Returns:
Callable: Sliced version of f.
|
_sliced_fn
|
python
|
rlworkgroup/garage
|
src/garage/tf/optimizers/conjugate_gradient_optimizer.py
|
https://github.com/rlworkgroup/garage/blob/master/src/garage/tf/optimizers/conjugate_gradient_optimizer.py
|
MIT
|
def update_opt(self, loss, target, inputs, extra_inputs=None, **kwargs):
"""Construct operation graph for the optimizer.
Args:
loss (tf.Tensor): Loss objective to minimize.
target (object): Target object to optimize. The object should
implemenet `get_params()` and `get_param_values`.
inputs (list[tf.Tensor]): List of input placeholders.
extra_inputs (list[tf.Tensor]): List of extra input placeholders.
kwargs (dict): Extra unused keyword arguments. Some optimizers
have extra input, e.g. KL constraint.
"""
del kwargs
with tf.name_scope(self._name):
self._target = target
tf_optimizer = make_optimizer(self._tf_optimizer,
**self._learning_rate)
self._train_op = tf_optimizer.minimize(
loss, var_list=target.get_params())
if extra_inputs is None:
extra_inputs = list()
self._input_vars = inputs + extra_inputs
self._opt_fun = LazyDict(
f_loss=lambda: compile_function(inputs + extra_inputs, loss), )
|
Construct operation graph for the optimizer.
Args:
loss (tf.Tensor): Loss objective to minimize.
target (object): Target object to optimize. The object should
implemenet `get_params()` and `get_param_values`.
inputs (list[tf.Tensor]): List of input placeholders.
extra_inputs (list[tf.Tensor]): List of extra input placeholders.
kwargs (dict): Extra unused keyword arguments. Some optimizers
have extra input, e.g. KL constraint.
|
update_opt
|
python
|
rlworkgroup/garage
|
src/garage/tf/optimizers/first_order_optimizer.py
|
https://github.com/rlworkgroup/garage/blob/master/src/garage/tf/optimizers/first_order_optimizer.py
|
MIT
|
def loss(self, inputs, extra_inputs=None):
"""The loss.
Args:
inputs (list[numpy.ndarray]): List of input values.
extra_inputs (list[numpy.ndarray]): List of extra input values.
Returns:
float: Loss.
Raises:
Exception: If loss function is None, i.e. not defined.
"""
if self._opt_fun is None:
raise Exception(
'Use update_opt() to setup the loss function first.')
if extra_inputs is None:
extra_inputs = tuple()
return self._opt_fun['f_loss'](*(tuple(inputs) + extra_inputs))
# pylint: disable=too-many-branches
|
The loss.
Args:
inputs (list[numpy.ndarray]): List of input values.
extra_inputs (list[numpy.ndarray]): List of extra input values.
Returns:
float: Loss.
Raises:
Exception: If loss function is None, i.e. not defined.
|
loss
|
python
|
rlworkgroup/garage
|
src/garage/tf/optimizers/first_order_optimizer.py
|
https://github.com/rlworkgroup/garage/blob/master/src/garage/tf/optimizers/first_order_optimizer.py
|
MIT
|
def optimize(self, inputs, extra_inputs=None, callback=None):
"""Perform optimization.
Args:
inputs (list[numpy.ndarray]): List of input values.
extra_inputs (list[numpy.ndarray]): List of extra input values.
callback (callable): Function to call during each epoch. Default
is None.
Raises:
NotImplementedError: If inputs are invalid.
Exception: If loss function is None, i.e. not defined.
"""
if not inputs:
# Assumes that we should always sample mini-batches
raise NotImplementedError('No inputs are fed to optimizer.')
if self._opt_fun is None:
raise Exception(
'Use update_opt() to setup the loss function first.')
f_loss = self._opt_fun['f_loss']
if extra_inputs is None:
extra_inputs = tuple()
last_loss = f_loss(*(tuple(inputs) + extra_inputs))
start_time = time.time()
dataset = BatchDataset(inputs,
self._batch_size,
extra_inputs=extra_inputs)
sess = tf.compat.v1.get_default_session()
for epoch in range(self._max_optimization_epochs):
if self._verbose:
logger.log('Epoch {}'.format(epoch))
with click.progressbar(length=len(inputs[0]),
label='Optimizing minibatches') as pbar:
for batch in dataset.iterate(update=True):
sess.run(self._train_op,
dict(list(zip(self._input_vars, batch))))
pbar.update(len(batch[0]))
new_loss = f_loss(*(tuple(inputs) + extra_inputs))
if self._verbose:
logger.log('Epoch: {} | Loss: {}'.format(epoch, new_loss))
if self._callback or callback:
elapsed = time.time() - start_time
callback_args = dict(
loss=new_loss,
params=self._target.get_param_values()
if self._target else None,
itr=epoch,
elapsed=elapsed,
)
if self._callback:
self._callback(callback_args)
if callback:
callback(**callback_args)
if abs(last_loss - new_loss) < self._tolerance:
break
last_loss = new_loss
|
Perform optimization.
Args:
inputs (list[numpy.ndarray]): List of input values.
extra_inputs (list[numpy.ndarray]): List of extra input values.
callback (callable): Function to call during each epoch. Default
is None.
Raises:
NotImplementedError: If inputs are invalid.
Exception: If loss function is None, i.e. not defined.
|
optimize
|
python
|
rlworkgroup/garage
|
src/garage/tf/optimizers/first_order_optimizer.py
|
https://github.com/rlworkgroup/garage/blob/master/src/garage/tf/optimizers/first_order_optimizer.py
|
MIT
|
def __getstate__(self):
"""Object.__getstate__.
Returns:
dict: The state to be pickled for the instance.
"""
new_dict = self.__dict__.copy()
del new_dict['_opt_fun']
del new_dict['_tf_optimizer']
del new_dict['_train_op']
del new_dict['_input_vars']
return new_dict
|
Object.__getstate__.
Returns:
dict: The state to be pickled for the instance.
|
__getstate__
|
python
|
rlworkgroup/garage
|
src/garage/tf/optimizers/first_order_optimizer.py
|
https://github.com/rlworkgroup/garage/blob/master/src/garage/tf/optimizers/first_order_optimizer.py
|
MIT
|
def update_opt(self,
loss,
target,
inputs,
extra_inputs=None,
name='LBFGSOptimizer',
**kwargs):
"""Construct operation graph for the optimizer.
Args:
loss (tf.Tensor): Loss objective to minimize.
target (object): Target object to optimize. The object should
implemenet `get_params()` and `get_param_values`.
inputs (list[tf.Tensor]): List of input placeholders.
extra_inputs (list[tf.Tensor]): List of extra input placeholders.
name (str): Name scope.
kwargs (dict): Extra unused keyword arguments. Some optimizers
have extra input, e.g. KL constraint.
"""
del kwargs
self._target = target
params = target.get_params()
with tf.name_scope(name):
def get_opt_output():
"""Helper function to construct graph.
Returns:
list[tf.Tensor]: Loss and gradient tensor.
"""
with tf.name_scope('get_opt_output'):
flat_grad = flatten_tensor_variables(
tf.gradients(loss, params))
return [
tf.cast(loss, tf.float64),
tf.cast(flat_grad, tf.float64)
]
if extra_inputs is None:
extra_inputs = list()
self._opt_fun = LazyDict(
f_loss=lambda: compile_function(inputs + extra_inputs, loss),
f_opt=lambda: compile_function(
inputs=inputs + extra_inputs,
outputs=get_opt_output(),
))
|
Construct operation graph for the optimizer.
Args:
loss (tf.Tensor): Loss objective to minimize.
target (object): Target object to optimize. The object should
implemenet `get_params()` and `get_param_values`.
inputs (list[tf.Tensor]): List of input placeholders.
extra_inputs (list[tf.Tensor]): List of extra input placeholders.
name (str): Name scope.
kwargs (dict): Extra unused keyword arguments. Some optimizers
have extra input, e.g. KL constraint.
|
update_opt
|
python
|
rlworkgroup/garage
|
src/garage/tf/optimizers/lbfgs_optimizer.py
|
https://github.com/rlworkgroup/garage/blob/master/src/garage/tf/optimizers/lbfgs_optimizer.py
|
MIT
|
def get_opt_output():
"""Helper function to construct graph.
Returns:
list[tf.Tensor]: Loss and gradient tensor.
"""
with tf.name_scope('get_opt_output'):
flat_grad = flatten_tensor_variables(
tf.gradients(loss, params))
return [
tf.cast(loss, tf.float64),
tf.cast(flat_grad, tf.float64)
]
|
Helper function to construct graph.
Returns:
list[tf.Tensor]: Loss and gradient tensor.
|
get_opt_output
|
python
|
rlworkgroup/garage
|
src/garage/tf/optimizers/lbfgs_optimizer.py
|
https://github.com/rlworkgroup/garage/blob/master/src/garage/tf/optimizers/lbfgs_optimizer.py
|
MIT
|
def loss(self, inputs, extra_inputs=None):
"""The loss.
Args:
inputs (list[numpy.ndarray]): List of input values.
extra_inputs (list[numpy.ndarray]): List of extra input values.
Returns:
float: Loss.
Raises:
Exception: If loss function is None, i.e. not defined.
"""
if self._opt_fun is None:
raise Exception(
'Use update_opt() to setup the loss function first.')
if extra_inputs is None:
extra_inputs = list()
return self._opt_fun['f_loss'](*(list(inputs) + list(extra_inputs)))
|
The loss.
Args:
inputs (list[numpy.ndarray]): List of input values.
extra_inputs (list[numpy.ndarray]): List of extra input values.
Returns:
float: Loss.
Raises:
Exception: If loss function is None, i.e. not defined.
|
loss
|
python
|
rlworkgroup/garage
|
src/garage/tf/optimizers/lbfgs_optimizer.py
|
https://github.com/rlworkgroup/garage/blob/master/src/garage/tf/optimizers/lbfgs_optimizer.py
|
MIT
|
def optimize(self, inputs, extra_inputs=None, name='optimize'):
"""Perform optimization.
Args:
inputs (list[numpy.ndarray]): List of input values.
extra_inputs (list[numpy.ndarray]): List of extra input values.
name (str): Name scope.
Raises:
Exception: If loss function is None, i.e. not defined.
"""
if self._opt_fun is None:
raise Exception(
'Use update_opt() to setup the loss function first.')
with tf.name_scope(name):
f_opt = self._opt_fun['f_opt']
if extra_inputs is None:
extra_inputs = list()
def f_opt_wrapper(flat_params):
"""Helper function to set parameters values.
Args:
flat_params (numpy.ndarray): Flattened parameter values.
Returns:
list[tf.Tensor]: Loss and gradient tensor.
"""
self._target.set_param_values(flat_params)
ret = f_opt(*inputs)
return ret
itr = [0]
start_time = time.time()
if self._callback:
def opt_callback(params):
"""Callback function wrapper.
Args:
params (numpy.ndarray): Parameters.
"""
loss = self._opt_fun['f_loss'](*(inputs + extra_inputs))
elapsed = time.time() - start_time
self._callback(
dict(
loss=loss,
params=params,
itr=itr[0],
elapsed=elapsed,
))
itr[0] += 1
else:
opt_callback = None
scipy.optimize.fmin_l_bfgs_b(
func=f_opt_wrapper,
x0=self._target.get_param_values(),
maxiter=self._max_opt_itr,
callback=opt_callback,
)
|
Perform optimization.
Args:
inputs (list[numpy.ndarray]): List of input values.
extra_inputs (list[numpy.ndarray]): List of extra input values.
name (str): Name scope.
Raises:
Exception: If loss function is None, i.e. not defined.
|
optimize
|
python
|
rlworkgroup/garage
|
src/garage/tf/optimizers/lbfgs_optimizer.py
|
https://github.com/rlworkgroup/garage/blob/master/src/garage/tf/optimizers/lbfgs_optimizer.py
|
MIT
|
def f_opt_wrapper(flat_params):
"""Helper function to set parameters values.
Args:
flat_params (numpy.ndarray): Flattened parameter values.
Returns:
list[tf.Tensor]: Loss and gradient tensor.
"""
self._target.set_param_values(flat_params)
ret = f_opt(*inputs)
return ret
|
Helper function to set parameters values.
Args:
flat_params (numpy.ndarray): Flattened parameter values.
Returns:
list[tf.Tensor]: Loss and gradient tensor.
|
f_opt_wrapper
|
python
|
rlworkgroup/garage
|
src/garage/tf/optimizers/lbfgs_optimizer.py
|
https://github.com/rlworkgroup/garage/blob/master/src/garage/tf/optimizers/lbfgs_optimizer.py
|
MIT
|
def opt_callback(params):
"""Callback function wrapper.
Args:
params (numpy.ndarray): Parameters.
"""
loss = self._opt_fun['f_loss'](*(inputs + extra_inputs))
elapsed = time.time() - start_time
self._callback(
dict(
loss=loss,
params=params,
itr=itr[0],
elapsed=elapsed,
))
itr[0] += 1
|
Callback function wrapper.
Args:
params (numpy.ndarray): Parameters.
|
opt_callback
|
python
|
rlworkgroup/garage
|
src/garage/tf/optimizers/lbfgs_optimizer.py
|
https://github.com/rlworkgroup/garage/blob/master/src/garage/tf/optimizers/lbfgs_optimizer.py
|
MIT
|
def __getstate__(self):
"""Object.__getstate__.
Returns:
dict: The state to be pickled for the instance.
"""
new_dict = self.__dict__.copy()
del new_dict['_opt_fun']
return new_dict
|
Object.__getstate__.
Returns:
dict: The state to be pickled for the instance.
|
__getstate__
|
python
|
rlworkgroup/garage
|
src/garage/tf/optimizers/lbfgs_optimizer.py
|
https://github.com/rlworkgroup/garage/blob/master/src/garage/tf/optimizers/lbfgs_optimizer.py
|
MIT
|
def update_opt(self,
loss,
target,
leq_constraint,
inputs,
constraint_name='constraint',
name='PenaltyLBFGSOptimizer',
**kwargs):
"""Construct operation graph for the optimizer.
Args:
loss (tf.Tensor): Loss objective to minimize.
target (object): Target object to optimize. The object should
implemenet `get_params()` and `get_param_values`.
leq_constraint (tuple): It contains a tf.Tensor and a float value.
The tf.Tensor represents the constraint term, and the float
value is the constraint value.
inputs (list[tf.Tensor]): List of input placeholders.
constraint_name (str): Constraint name for logging.
name (str): Name scope.
kwargs (dict): Extra unused keyword arguments. Some optimizers
have extra input, e.g. KL constraint.
"""
del kwargs
params = target.get_params()
with tf.name_scope(name):
constraint_term, constraint_value = leq_constraint
penalty_var = tf.compat.v1.placeholder(tf.float32,
tuple(),
name='penalty')
penalized_loss = loss + penalty_var * constraint_term
self._target = target
self._max_constraint_val = constraint_value
self._constraint_name = constraint_name
def get_opt_output():
"""Helper function to construct graph.
Returns:
list[tf.Tensor]: Penalized loss and gradient tensor.
"""
with tf.name_scope('get_opt_output'):
grads = tf.gradients(penalized_loss, params)
for idx, (grad, param) in enumerate(zip(grads, params)):
if grad is None:
grads[idx] = tf.zeros_like(param)
flat_grad = flatten_tensor_variables(grads)
return [
tf.cast(penalized_loss, tf.float64),
tf.cast(flat_grad, tf.float64),
]
self._opt_fun = LazyDict(
f_loss=lambda: compile_function(inputs, loss),
f_constraint=lambda: compile_function(inputs, constraint_term),
f_penalized_loss=lambda: compile_function(
inputs=inputs + [penalty_var],
outputs=[penalized_loss, loss, constraint_term],
),
f_opt=lambda: compile_function(
inputs=inputs + [penalty_var],
outputs=get_opt_output(),
))
|
Construct operation graph for the optimizer.
Args:
loss (tf.Tensor): Loss objective to minimize.
target (object): Target object to optimize. The object should
implemenet `get_params()` and `get_param_values`.
leq_constraint (tuple): It contains a tf.Tensor and a float value.
The tf.Tensor represents the constraint term, and the float
value is the constraint value.
inputs (list[tf.Tensor]): List of input placeholders.
constraint_name (str): Constraint name for logging.
name (str): Name scope.
kwargs (dict): Extra unused keyword arguments. Some optimizers
have extra input, e.g. KL constraint.
|
update_opt
|
python
|
rlworkgroup/garage
|
src/garage/tf/optimizers/penalty_lbfgs_optimizer.py
|
https://github.com/rlworkgroup/garage/blob/master/src/garage/tf/optimizers/penalty_lbfgs_optimizer.py
|
MIT
|
def get_opt_output():
"""Helper function to construct graph.
Returns:
list[tf.Tensor]: Penalized loss and gradient tensor.
"""
with tf.name_scope('get_opt_output'):
grads = tf.gradients(penalized_loss, params)
for idx, (grad, param) in enumerate(zip(grads, params)):
if grad is None:
grads[idx] = tf.zeros_like(param)
flat_grad = flatten_tensor_variables(grads)
return [
tf.cast(penalized_loss, tf.float64),
tf.cast(flat_grad, tf.float64),
]
|
Helper function to construct graph.
Returns:
list[tf.Tensor]: Penalized loss and gradient tensor.
|
get_opt_output
|
python
|
rlworkgroup/garage
|
src/garage/tf/optimizers/penalty_lbfgs_optimizer.py
|
https://github.com/rlworkgroup/garage/blob/master/src/garage/tf/optimizers/penalty_lbfgs_optimizer.py
|
MIT
|
def loss(self, inputs):
"""The loss.
Args:
inputs (list[numpy.ndarray]): List of input values.
Returns:
float: Loss.
Raises:
Exception: If loss function is None, i.e. not defined.
"""
if self._opt_fun is None:
raise Exception(
'Use update_opt() to setup the loss function first.')
return self._opt_fun['f_loss'](*inputs)
|
The loss.
Args:
inputs (list[numpy.ndarray]): List of input values.
Returns:
float: Loss.
Raises:
Exception: If loss function is None, i.e. not defined.
|
loss
|
python
|
rlworkgroup/garage
|
src/garage/tf/optimizers/penalty_lbfgs_optimizer.py
|
https://github.com/rlworkgroup/garage/blob/master/src/garage/tf/optimizers/penalty_lbfgs_optimizer.py
|
MIT
|
def constraint_val(self, inputs):
"""The constraint value.
Args:
inputs (list[numpy.ndarray]): List of input values.
Returns:
float: Constraint value.
Raises:
Exception: If loss function is None, i.e. not defined.
"""
if self._opt_fun is None:
raise Exception(
'Use update_opt() to setup the loss function first.')
return self._opt_fun['f_constraint'](*inputs)
|
The constraint value.
Args:
inputs (list[numpy.ndarray]): List of input values.
Returns:
float: Constraint value.
Raises:
Exception: If loss function is None, i.e. not defined.
|
constraint_val
|
python
|
rlworkgroup/garage
|
src/garage/tf/optimizers/penalty_lbfgs_optimizer.py
|
https://github.com/rlworkgroup/garage/blob/master/src/garage/tf/optimizers/penalty_lbfgs_optimizer.py
|
MIT
|
def optimize(self, inputs, name='optimize'):
"""Perform optimization.
Args:
inputs (list[numpy.ndarray]): List of input values.
name (str): Name scope.
Raises:
Exception: If loss function is None, i.e. not defined.
"""
if self._opt_fun is None:
raise Exception(
'Use update_opt() to setup the loss function first.')
with tf.name_scope(name):
inputs = tuple(inputs)
try_penalty = np.clip(self._penalty, self._min_penalty,
self._max_penalty)
penalty_scale_factor = None
f_opt = self._opt_fun['f_opt']
f_penalized_loss = self._opt_fun['f_penalized_loss']
def gen_f_opt(penalty): # noqa: D202
"""Return a function that set parameters values.
Args:
penalty (float): Penalty.
Returns:
callable: Function that set parameters values.
"""
def f(flat_params):
"""Helper function to set parameters values.
Args:
flat_params (numpy.ndarray): Flatten parameter values.
Returns:
list[tf.Tensor]: Penalized loss and gradient tensor.
"""
self._target.set_param_values(flat_params)
return f_opt(*(inputs + (penalty, )))
return f
cur_params = self._target.get_param_values().astype('float64')
opt_params = cur_params
for penalty_itr in range(self._max_penalty_itr):
logger.log('trying penalty=%.3f...' % try_penalty)
itr_opt_params, _, _ = scipy.optimize.fmin_l_bfgs_b(
func=gen_f_opt(try_penalty),
x0=cur_params,
maxiter=self._max_opt_itr)
_, try_loss, try_constraint_val = f_penalized_loss(*(
inputs + (try_penalty, )))
logger.log('penalty %f => loss %f, %s %f' %
(try_penalty, try_loss, self._constraint_name,
try_constraint_val))
# Either constraint satisfied, or we are at the last iteration
# already and no alternative parameter satisfies the constraint
if try_constraint_val < self._max_constraint_val or \
(penalty_itr == self._max_penalty_itr - 1 and
opt_params is None):
opt_params = itr_opt_params
if not self._adapt_penalty:
break
# Decide scale factor on the first iteration, or if constraint
# violation yields numerical error
if (penalty_scale_factor is None
or np.isnan(try_constraint_val)):
# Increase penalty if constraint violated, or if constraint
# term is NAN
if (try_constraint_val > self._max_constraint_val
or np.isnan(try_constraint_val)):
penalty_scale_factor = self._increase_penalty_factor
else:
# Otherwise (i.e. constraint satisfied), shrink penalty
penalty_scale_factor = self._decrease_penalty_factor
opt_params = itr_opt_params
else:
if (penalty_scale_factor > 1 and
try_constraint_val <= self._max_constraint_val):
break
if (penalty_scale_factor < 1 and
try_constraint_val >= self._max_constraint_val):
break
try_penalty *= penalty_scale_factor
try_penalty = np.clip(try_penalty, self._min_penalty,
self._max_penalty)
self._penalty = try_penalty
self._target.set_param_values(opt_params)
|
Perform optimization.
Args:
inputs (list[numpy.ndarray]): List of input values.
name (str): Name scope.
Raises:
Exception: If loss function is None, i.e. not defined.
|
optimize
|
python
|
rlworkgroup/garage
|
src/garage/tf/optimizers/penalty_lbfgs_optimizer.py
|
https://github.com/rlworkgroup/garage/blob/master/src/garage/tf/optimizers/penalty_lbfgs_optimizer.py
|
MIT
|
def gen_f_opt(penalty): # noqa: D202
"""Return a function that set parameters values.
Args:
penalty (float): Penalty.
Returns:
callable: Function that set parameters values.
"""
def f(flat_params):
"""Helper function to set parameters values.
Args:
flat_params (numpy.ndarray): Flatten parameter values.
Returns:
list[tf.Tensor]: Penalized loss and gradient tensor.
"""
self._target.set_param_values(flat_params)
return f_opt(*(inputs + (penalty, )))
return f
|
Return a function that set parameters values.
Args:
penalty (float): Penalty.
Returns:
callable: Function that set parameters values.
|
gen_f_opt
|
python
|
rlworkgroup/garage
|
src/garage/tf/optimizers/penalty_lbfgs_optimizer.py
|
https://github.com/rlworkgroup/garage/blob/master/src/garage/tf/optimizers/penalty_lbfgs_optimizer.py
|
MIT
|
def f(flat_params):
"""Helper function to set parameters values.
Args:
flat_params (numpy.ndarray): Flatten parameter values.
Returns:
list[tf.Tensor]: Penalized loss and gradient tensor.
"""
self._target.set_param_values(flat_params)
return f_opt(*(inputs + (penalty, )))
|
Helper function to set parameters values.
Args:
flat_params (numpy.ndarray): Flatten parameter values.
Returns:
list[tf.Tensor]: Penalized loss and gradient tensor.
|
f
|
python
|
rlworkgroup/garage
|
src/garage/tf/optimizers/penalty_lbfgs_optimizer.py
|
https://github.com/rlworkgroup/garage/blob/master/src/garage/tf/optimizers/penalty_lbfgs_optimizer.py
|
MIT
|
def __getstate__(self):
"""Object.__getstate__.
Returns:
dict: The state to be pickled for the instance.
"""
new_dict = self.__dict__.copy()
del new_dict['_opt_fun']
return new_dict
|
Object.__getstate__.
Returns:
dict: The state to be pickled for the instance.
|
__getstate__
|
python
|
rlworkgroup/garage
|
src/garage/tf/optimizers/penalty_lbfgs_optimizer.py
|
https://github.com/rlworkgroup/garage/blob/master/src/garage/tf/optimizers/penalty_lbfgs_optimizer.py
|
MIT
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.