code
stringlengths 66
870k
| docstring
stringlengths 19
26.7k
| func_name
stringlengths 1
138
| language
stringclasses 1
value | repo
stringlengths 7
68
| path
stringlengths 5
324
| url
stringlengths 46
389
| license
stringclasses 7
values |
---|---|---|---|---|---|---|---|
def __getstate__(self):
"""Refuse to be pickled.
Raises:
ValueError: Always raised, since pickling Workers is not supported.
"""
raise ValueError('Workers are not pickleable. '
'Please pickle the WorkerFactory instead.')
|
Refuse to be pickled.
Raises:
ValueError: Always raised, since pickling Workers is not supported.
|
__getstate__
|
python
|
rlworkgroup/garage
|
src/garage/sampler/worker.py
|
https://github.com/rlworkgroup/garage/blob/master/src/garage/sampler/worker.py
|
MIT
|
def prepare_worker_messages(self, objs, preprocess=identity_function):
"""Take an argument and canonicalize it into a list for all workers.
This helper function is used to handle arguments in the sampler API
which may (optionally) be lists. Specifically, these are agent, env,
agent_update, and env_update. Checks that the number of parameters is
correct.
Args:
objs(object or list): Must be either a single object or a list
of length n_workers.
preprocess(function): Function to call on each single object before
creating the list.
Raises:
ValueError: If a list is passed of a length other than `n_workers`.
Returns:
List[object]: A list of length self.n_workers.
"""
if isinstance(objs, list):
if len(objs) != self.n_workers:
raise ValueError(
'Length of list doesn\'t match number of workers')
return [preprocess(obj) for obj in objs]
else:
return [preprocess(objs) for _ in range(self.n_workers)]
|
Take an argument and canonicalize it into a list for all workers.
This helper function is used to handle arguments in the sampler API
which may (optionally) be lists. Specifically, these are agent, env,
agent_update, and env_update. Checks that the number of parameters is
correct.
Args:
objs(object or list): Must be either a single object or a list
of length n_workers.
preprocess(function): Function to call on each single object before
creating the list.
Raises:
ValueError: If a list is passed of a length other than `n_workers`.
Returns:
List[object]: A list of length self.n_workers.
|
prepare_worker_messages
|
python
|
rlworkgroup/garage
|
src/garage/sampler/worker_factory.py
|
https://github.com/rlworkgroup/garage/blob/master/src/garage/sampler/worker_factory.py
|
MIT
|
def __call__(self, worker_number):
"""Construct a worker given its number.
Args:
worker_number(int): The worker number. Should be at least 0 and
less than or equal to `n_workers`.
Raises:
ValueError: If the worker number is greater than `n_workers`.
Returns:
garage.sampler.Worker: The constructed worker.
"""
if worker_number >= self.n_workers:
raise ValueError('Worker number is too big')
return self._worker_class(worker_number=worker_number,
seed=self._seed,
max_episode_length=self._max_episode_length,
**self._worker_args)
|
Construct a worker given its number.
Args:
worker_number(int): The worker number. Should be at least 0 and
less than or equal to `n_workers`.
Raises:
ValueError: If the worker number is greater than `n_workers`.
Returns:
garage.sampler.Worker: The constructed worker.
|
__call__
|
python
|
rlworkgroup/garage
|
src/garage/sampler/worker_factory.py
|
https://github.com/rlworkgroup/garage/blob/master/src/garage/sampler/worker_factory.py
|
MIT
|
def step(self, action, agent_info):
"""Step the episode using an action from an agent.
Args:
action (np.ndarray): The action taken by the agent.
agent_info (dict[str, np.ndarray]): Extra agent information.
Returns:
np.ndarray: The new observation from the environment.
"""
es = self.env.step(action)
self.observations.append(es.observation)
self.rewards.append(es.reward)
self.actions.append(es.action)
for k, v in agent_info.items():
self.agent_infos[k].append(v)
for k, v in es.env_info.items():
self.env_infos[k].append(v)
self.step_types.append(es.step_type)
return es.observation
|
Step the episode using an action from an agent.
Args:
action (np.ndarray): The action taken by the agent.
agent_info (dict[str, np.ndarray]): Extra agent information.
Returns:
np.ndarray: The new observation from the environment.
|
step
|
python
|
rlworkgroup/garage
|
src/garage/sampler/_dtypes.py
|
https://github.com/rlworkgroup/garage/blob/master/src/garage/sampler/_dtypes.py
|
MIT
|
def to_batch(self):
"""Convert this in-progress episode into a EpisodeBatch.
Returns:
EpisodeBatch: This episode as a batch.
Raises:
AssertionError: If this episode contains no time steps.
"""
assert len(self.rewards) > 0
env_infos = dict(self.env_infos)
agent_infos = dict(self.agent_infos)
episode_infos = dict(self.episode_info)
for k, v in env_infos.items():
env_infos[k] = np.asarray(v)
for k, v in agent_infos.items():
agent_infos[k] = np.asarray(v)
for k, v in episode_infos.items():
episode_infos[k] = np.asarray([v])
return EpisodeBatch(episode_infos=episode_infos,
env_spec=self.env.spec,
observations=np.asarray(self.observations[:-1]),
last_observations=np.asarray([self.last_obs]),
actions=np.asarray(self.actions),
rewards=np.asarray(self.rewards),
step_types=np.asarray(self.step_types,
dtype=StepType),
env_infos=env_infos,
agent_infos=agent_infos,
lengths=np.asarray([len(self.rewards)], dtype='l'))
|
Convert this in-progress episode into a EpisodeBatch.
Returns:
EpisodeBatch: This episode as a batch.
Raises:
AssertionError: If this episode contains no time steps.
|
to_batch
|
python
|
rlworkgroup/garage
|
src/garage/sampler/_dtypes.py
|
https://github.com/rlworkgroup/garage/blob/master/src/garage/sampler/_dtypes.py
|
MIT
|
def _apply_env_update(old_env, env_update):
"""Use any non-None env_update as a new environment.
A simple env update function. If env_update is not None, it should be
the complete new environment.
This allows changing environments by passing the new environment as
`env_update` into `obtain_samples`.
Args:
old_env (Environment): Environment to updated.
env_update (Environment or EnvUpdate or None): The environment to
replace the existing env with. Note that other implementations
of `Worker` may take different types for this parameter.
Returns:
Environment: The updated environment (may be a different object from
`old_env`).
bool: True if an update happened.
Raises:
TypeError: If env_update is not one of the documented types.
"""
if env_update is not None:
if isinstance(env_update, EnvUpdate):
return env_update(old_env), True
elif isinstance(env_update, Environment):
if old_env is not None:
old_env.close()
return env_update, True
else:
raise TypeError('Unknown environment update type.')
else:
return old_env, False
|
Use any non-None env_update as a new environment.
A simple env update function. If env_update is not None, it should be
the complete new environment.
This allows changing environments by passing the new environment as
`env_update` into `obtain_samples`.
Args:
old_env (Environment): Environment to updated.
env_update (Environment or EnvUpdate or None): The environment to
replace the existing env with. Note that other implementations
of `Worker` may take different types for this parameter.
Returns:
Environment: The updated environment (may be a different object from
`old_env`).
bool: True if an update happened.
Raises:
TypeError: If env_update is not one of the documented types.
|
_apply_env_update
|
python
|
rlworkgroup/garage
|
src/garage/sampler/_functions.py
|
https://github.com/rlworkgroup/garage/blob/master/src/garage/sampler/_functions.py
|
MIT
|
def compile_function(inputs, outputs):
"""Compiles a tensorflow function using the current session.
Args:
inputs (list[tf.Tensor]): Inputs to the function. Can be a list of
inputs or just one.
outputs (list[tf.Tensor]): Outputs of the function. Can be a list of
outputs or just one.
Returns:
Callable: Compiled TensorFlow function.
"""
def _run(*input_vals):
# pylint: disable=missing-return-doc, missing-return-type-doc
sess = tf.compat.v1.get_default_session()
return sess.run(outputs, feed_dict=dict(list(zip(inputs, input_vals))))
return _run
|
Compiles a tensorflow function using the current session.
Args:
inputs (list[tf.Tensor]): Inputs to the function. Can be a list of
inputs or just one.
outputs (list[tf.Tensor]): Outputs of the function. Can be a list of
outputs or just one.
Returns:
Callable: Compiled TensorFlow function.
|
compile_function
|
python
|
rlworkgroup/garage
|
src/garage/tf/_functions.py
|
https://github.com/rlworkgroup/garage/blob/master/src/garage/tf/_functions.py
|
MIT
|
def get_target_ops(variables, target_variables, tau=None):
"""Get target variables update operations.
In RL algorithms we often update target network every n
steps. This function returns the tf.Operation for updating
target variables (denoted by target_var) from variables
(denote by var) with fraction tau. In other words, each time
we want to keep tau of the var and add (1 - tau) of target_var
to var.
Args:
variables (list[tf.Variable]): Soure variables for update.
target_variables (list[tf.Variable]): Target variables to
be updated.
tau (float): Fraction to update. Set it to be None for
hard-update.
Returns:
tf.Operation: Operation for updating the target variables.
"""
update_ops = []
init_ops = []
assert len(variables) == len(target_variables)
for var, target_var in zip(variables, target_variables):
init_ops.append(tf.compat.v1.assign(target_var, var))
if tau is not None:
update_ops.append(
tf.compat.v1.assign(target_var,
tau * var + (1.0 - tau) * target_var))
if tau is not None:
return init_ops, update_ops
else:
return init_ops
|
Get target variables update operations.
In RL algorithms we often update target network every n
steps. This function returns the tf.Operation for updating
target variables (denoted by target_var) from variables
(denote by var) with fraction tau. In other words, each time
we want to keep tau of the var and add (1 - tau) of target_var
to var.
Args:
variables (list[tf.Variable]): Soure variables for update.
target_variables (list[tf.Variable]): Target variables to
be updated.
tau (float): Fraction to update. Set it to be None for
hard-update.
Returns:
tf.Operation: Operation for updating the target variables.
|
get_target_ops
|
python
|
rlworkgroup/garage
|
src/garage/tf/_functions.py
|
https://github.com/rlworkgroup/garage/blob/master/src/garage/tf/_functions.py
|
MIT
|
def flatten_batch_dict(d, name='flatten_batch_dict'):
"""Flatten a batch of observations represented as a dict.
Args:
d (dict[tf.Tensor]): A dict of Tensors to flatten.
name (string): The name of the operation (None by default).
Returns:
dict[tf.Tensor]: A dict with flattened tensors.
"""
with tf.name_scope(name):
return {k: flatten_batch(v) for k, v in d.items()}
|
Flatten a batch of observations represented as a dict.
Args:
d (dict[tf.Tensor]): A dict of Tensors to flatten.
name (string): The name of the operation (None by default).
Returns:
dict[tf.Tensor]: A dict with flattened tensors.
|
flatten_batch_dict
|
python
|
rlworkgroup/garage
|
src/garage/tf/_functions.py
|
https://github.com/rlworkgroup/garage/blob/master/src/garage/tf/_functions.py
|
MIT
|
def filter_valids(t, valid, name='filter_valids'):
"""Filter out tensor using valid array.
Args:
t (tf.Tensor): The tensor to filter.
valid (list[float]): Array of length of the valid values (either
0 or 1).
name (string): Name of the operation.
Returns:
tf.Tensor: Filtered Tensor.
"""
# Must round before cast to prevent floating-error
return tf.dynamic_partition(t,
tf.cast(tf.round(valid), tf.int32),
2,
name=name)[1]
|
Filter out tensor using valid array.
Args:
t (tf.Tensor): The tensor to filter.
valid (list[float]): Array of length of the valid values (either
0 or 1).
name (string): Name of the operation.
Returns:
tf.Tensor: Filtered Tensor.
|
filter_valids
|
python
|
rlworkgroup/garage
|
src/garage/tf/_functions.py
|
https://github.com/rlworkgroup/garage/blob/master/src/garage/tf/_functions.py
|
MIT
|
def filter_valids_dict(d, valid, name='filter_valids_dict'):
"""Filter valid values on a dict.
Args:
d (dict[tf.Tensor]): Dict of tensors to be filtered.
valid (list[float]): Array of length of the valid values (elements
can be either 0 or 1).
name (string): Name of the operation. None by default.
Returns:
dict[tf.Tensor]: Dict with filtered tensors.
"""
with tf.name_scope(name):
return {k: filter_valids(v, valid) for k, v in d.items()}
|
Filter valid values on a dict.
Args:
d (dict[tf.Tensor]): Dict of tensors to be filtered.
valid (list[float]): Array of length of the valid values (elements
can be either 0 or 1).
name (string): Name of the operation. None by default.
Returns:
dict[tf.Tensor]: Dict with filtered tensors.
|
filter_valids_dict
|
python
|
rlworkgroup/garage
|
src/garage/tf/_functions.py
|
https://github.com/rlworkgroup/garage/blob/master/src/garage/tf/_functions.py
|
MIT
|
def flatten_inputs(deep):
"""Flattens an :class:`collections.abc.Iterable` recursively.
Args:
deep (Iterable): An :class:`~collections.abc.Iterable` to flatten.
Returns:
list: The flattened result.
"""
def flatten(deep):
# pylint: disable=missing-yield-doc,missing-yield-type-doc
for d in deep:
if isinstance(d, collections.abc.Iterable) and not isinstance(
d, (str, bytes, tf.Tensor, np.ndarray)):
yield from flatten(d)
else:
yield d
return list(flatten(deep))
|
Flattens an :class:`collections.abc.Iterable` recursively.
Args:
deep (Iterable): An :class:`~collections.abc.Iterable` to flatten.
Returns:
list: The flattened result.
|
flatten_inputs
|
python
|
rlworkgroup/garage
|
src/garage/tf/_functions.py
|
https://github.com/rlworkgroup/garage/blob/master/src/garage/tf/_functions.py
|
MIT
|
def new_tensor(name, ndim, dtype):
"""Creates a placeholder :class:`tf.Tensor` with the specified arguments.
Args:
name (string): Name of the tf.Tensor.
ndim (int): Number of dimensions of the tf.Tensor.
dtype (type): Data type of the tf.Tensor's contents.
Returns:
tf.Tensor: Placeholder tensor.
"""
return tf.compat.v1.placeholder(dtype=dtype,
shape=[None] * ndim,
name=name)
|
Creates a placeholder :class:`tf.Tensor` with the specified arguments.
Args:
name (string): Name of the tf.Tensor.
ndim (int): Number of dimensions of the tf.Tensor.
dtype (type): Data type of the tf.Tensor's contents.
Returns:
tf.Tensor: Placeholder tensor.
|
new_tensor
|
python
|
rlworkgroup/garage
|
src/garage/tf/_functions.py
|
https://github.com/rlworkgroup/garage/blob/master/src/garage/tf/_functions.py
|
MIT
|
def concat_tensor_dict_list(tensor_dict_list):
"""Concatenates a dict of tensors lists.
Each list of tensors gets concatenated into one tensor.
Args:
tensor_dict_list (dict[list[ndarray]]): Dict with lists of tensors.
Returns:
dict[ndarray]: A dict with the concatenated tensors.
"""
keys = list(tensor_dict_list[0].keys())
ret = dict()
for k in keys:
example = tensor_dict_list[0][k]
if isinstance(example, dict):
v = concat_tensor_dict_list([x[k] for x in tensor_dict_list])
else:
v = concat_tensor_list([x[k] for x in tensor_dict_list])
ret[k] = v
return ret
|
Concatenates a dict of tensors lists.
Each list of tensors gets concatenated into one tensor.
Args:
tensor_dict_list (dict[list[ndarray]]): Dict with lists of tensors.
Returns:
dict[ndarray]: A dict with the concatenated tensors.
|
concat_tensor_dict_list
|
python
|
rlworkgroup/garage
|
src/garage/tf/_functions.py
|
https://github.com/rlworkgroup/garage/blob/master/src/garage/tf/_functions.py
|
MIT
|
def stack_tensor_dict_list(tensor_dict_list):
"""Stack a list of dictionaries of {tensors or dictionary of tensors}.
Args:
tensor_dict_list (dict): a list of dictionaries of {tensors or
dictionary of tensors}.
Returns:
dict: a dictionary of {stacked tensors or dictionary of stacked
tensors}.
"""
keys = list(tensor_dict_list[0].keys())
ret = dict()
for k in keys:
example = tensor_dict_list[0][k]
if isinstance(example, dict):
v = stack_tensor_dict_list([x[k] for x in tensor_dict_list])
else:
v = np.array([x[k] for x in tensor_dict_list])
ret[k] = v
return ret
|
Stack a list of dictionaries of {tensors or dictionary of tensors}.
Args:
tensor_dict_list (dict): a list of dictionaries of {tensors or
dictionary of tensors}.
Returns:
dict: a dictionary of {stacked tensors or dictionary of stacked
tensors}.
|
stack_tensor_dict_list
|
python
|
rlworkgroup/garage
|
src/garage/tf/_functions.py
|
https://github.com/rlworkgroup/garage/blob/master/src/garage/tf/_functions.py
|
MIT
|
def split_tensor_dict_list(tensor_dict):
"""Split a list of dictionaries of {tensors or dictionary of tensors}.
Args:
tensor_dict (dict): a list of dictionaries of {tensors or
dictionary of tensors}.
Returns:
dict: a dictionary of {split tensors or dictionary of split tensors}.
"""
keys = list(tensor_dict.keys())
ret = None
for k in keys:
vals = tensor_dict[k]
if isinstance(vals, dict):
vals = split_tensor_dict_list(vals)
if ret is None:
ret = [{k: v} for v in vals]
else:
for v, cur_dict in zip(vals, ret):
cur_dict[k] = v
return ret
|
Split a list of dictionaries of {tensors or dictionary of tensors}.
Args:
tensor_dict (dict): a list of dictionaries of {tensors or
dictionary of tensors}.
Returns:
dict: a dictionary of {split tensors or dictionary of split tensors}.
|
split_tensor_dict_list
|
python
|
rlworkgroup/garage
|
src/garage/tf/_functions.py
|
https://github.com/rlworkgroup/garage/blob/master/src/garage/tf/_functions.py
|
MIT
|
def pad_tensor(x, max_len):
"""Pad tensors with zeros.
Args:
x (numpy.ndarray): Tensors to be padded.
max_len (int): Maximum length.
Returns:
numpy.ndarray: Padded tensor.
"""
return np.concatenate([
x,
np.tile(np.zeros_like(x[0]),
(max_len - len(x), ) + (1, ) * np.ndim(x[0]))
])
|
Pad tensors with zeros.
Args:
x (numpy.ndarray): Tensors to be padded.
max_len (int): Maximum length.
Returns:
numpy.ndarray: Padded tensor.
|
pad_tensor
|
python
|
rlworkgroup/garage
|
src/garage/tf/_functions.py
|
https://github.com/rlworkgroup/garage/blob/master/src/garage/tf/_functions.py
|
MIT
|
def pad_tensor_n(xs, max_len):
"""Pad array of tensors.
Args:
xs (numpy.ndarray): Tensors to be padded.
max_len (int): Maximum length.
Returns:
numpy.ndarray: Padded tensor.
"""
ret = np.zeros((len(xs), max_len) + xs[0].shape[1:], dtype=xs[0].dtype)
for idx, x in enumerate(xs):
ret[idx][:len(x)] = x
return ret
|
Pad array of tensors.
Args:
xs (numpy.ndarray): Tensors to be padded.
max_len (int): Maximum length.
Returns:
numpy.ndarray: Padded tensor.
|
pad_tensor_n
|
python
|
rlworkgroup/garage
|
src/garage/tf/_functions.py
|
https://github.com/rlworkgroup/garage/blob/master/src/garage/tf/_functions.py
|
MIT
|
def pad_tensor_dict(tensor_dict, max_len):
"""Pad dictionary of tensors with zeros.
Args:
tensor_dict (dict[numpy.ndarray]): Tensors to be padded.
max_len (int): Maximum length.
Returns:
dict[numpy.ndarray]: Padded tensor.
"""
keys = list(tensor_dict.keys())
ret = dict()
for k in keys:
if isinstance(tensor_dict[k], dict):
ret[k] = pad_tensor_dict(tensor_dict[k], max_len)
else:
ret[k] = pad_tensor(tensor_dict[k], max_len)
return ret
|
Pad dictionary of tensors with zeros.
Args:
tensor_dict (dict[numpy.ndarray]): Tensors to be padded.
max_len (int): Maximum length.
Returns:
dict[numpy.ndarray]: Padded tensor.
|
pad_tensor_dict
|
python
|
rlworkgroup/garage
|
src/garage/tf/_functions.py
|
https://github.com/rlworkgroup/garage/blob/master/src/garage/tf/_functions.py
|
MIT
|
def compute_advantages(discount,
gae_lambda,
max_len,
baselines,
rewards,
name='compute_advantages'):
"""Calculate advantages.
Advantages are a discounted cumulative sum.
The discount cumulative sum can be represented as an IIR
filter ob the reversed input vectors, i.e.
y[t] - discount*y[t+1] = x[t], or
rev(y)[t] - discount*rev(y)[t-1] = rev(x)[t]
Given the time-domain IIR filter step response, we can
calculate the filter response to our signal by convolving the
signal with the filter response function. The time-domain IIR
step response is calculated below as discount_filter:
discount_filter = [1, discount, discount^2, ..., discount^N-1]
where the epsiode length is N.
We convolve discount_filter with the reversed time-domain
signal deltas to calculate the reversed advantages:
rev(advantages) = discount_filter (X) rev(deltas)
TensorFlow's tf.nn.conv1d op is not a true convolution, but
actually a cross-correlation, so its input and output are
already implicitly reversed for us.
advantages = discount_filter (tf.nn.conv1d) deltas
Args:
discount (float): Discount factor.
gae_lambda (float): Lambda, as used for Generalized Advantage
Estimation (GAE).
max_len (int): Maximum length of a single episode.
baselines (tf.Tensor): A 2D vector of value function estimates with
shape :math:`(N, T)`, where :math:`N` is the batch dimension
(number of episodes) and :math:`T` is the maximum episode length
experienced by the agent.
rewards (tf.Tensor): A 2D vector of per-step rewards with shape
:math:`(N, T)`, where :math:`N` is the batch dimension (number of
episodes) and :math:`T` is the maximum episode length experienced
by the agent.
name (string): Name of the operation.
Returns:
tf.Tensor: A 2D vector of calculated advantage values with shape
:math:`(N, T)`, where :math:`N` is the batch dimension (number of
episodes) and :math:`T` is the maximum episode length experienced
by the agent.
"""
with tf.name_scope(name):
# Prepare convolutional IIR filter to calculate advantages
gamma_lambda = tf.constant(float(discount) * float(gae_lambda),
dtype=tf.float32,
shape=[max_len, 1, 1])
advantage_filter = tf.compat.v1.cumprod(gamma_lambda,
axis=0,
exclusive=True)
# Calculate deltas
pad = tf.zeros_like(baselines[:, :1])
baseline_shift = tf.concat([baselines[:, 1:], pad], 1)
deltas = rewards + discount * baseline_shift - baselines
# Convolve deltas with the discount filter to get advantages
deltas_pad = tf.expand_dims(tf.concat(
[deltas, tf.zeros_like(deltas[:, :-1])], 1),
axis=2)
adv = tf.nn.conv1d(deltas_pad,
advantage_filter,
stride=1,
padding='VALID')
advantages = tf.reshape(adv, [-1])
return advantages
|
Calculate advantages.
Advantages are a discounted cumulative sum.
The discount cumulative sum can be represented as an IIR
filter ob the reversed input vectors, i.e.
y[t] - discount*y[t+1] = x[t], or
rev(y)[t] - discount*rev(y)[t-1] = rev(x)[t]
Given the time-domain IIR filter step response, we can
calculate the filter response to our signal by convolving the
signal with the filter response function. The time-domain IIR
step response is calculated below as discount_filter:
discount_filter = [1, discount, discount^2, ..., discount^N-1]
where the epsiode length is N.
We convolve discount_filter with the reversed time-domain
signal deltas to calculate the reversed advantages:
rev(advantages) = discount_filter (X) rev(deltas)
TensorFlow's tf.nn.conv1d op is not a true convolution, but
actually a cross-correlation, so its input and output are
already implicitly reversed for us.
advantages = discount_filter (tf.nn.conv1d) deltas
Args:
discount (float): Discount factor.
gae_lambda (float): Lambda, as used for Generalized Advantage
Estimation (GAE).
max_len (int): Maximum length of a single episode.
baselines (tf.Tensor): A 2D vector of value function estimates with
shape :math:`(N, T)`, where :math:`N` is the batch dimension
(number of episodes) and :math:`T` is the maximum episode length
experienced by the agent.
rewards (tf.Tensor): A 2D vector of per-step rewards with shape
:math:`(N, T)`, where :math:`N` is the batch dimension (number of
episodes) and :math:`T` is the maximum episode length experienced
by the agent.
name (string): Name of the operation.
Returns:
tf.Tensor: A 2D vector of calculated advantage values with shape
:math:`(N, T)`, where :math:`N` is the batch dimension (number of
episodes) and :math:`T` is the maximum episode length experienced
by the agent.
|
compute_advantages
|
python
|
rlworkgroup/garage
|
src/garage/tf/_functions.py
|
https://github.com/rlworkgroup/garage/blob/master/src/garage/tf/_functions.py
|
MIT
|
def center_advs(advs, axes, eps, offset=0, scale=1, name='center_adv'):
"""Normalize the advs tensor.
This calculates the mean and variance using the axes specified
and normalizes the tensor using those values.
Args:
advs (tf.Tensor): Tensor to normalize.
axes (array[int]): Axes along which to compute the mean and variance.
eps (float): Small number to avoid dividing by zero.
offset (tf.Tensor): Offset added to the normalized tensor.
This is zero by default.
scale (tf.Tensor): Scale to apply to the normalized tensor. This is
1 by default but can also be None.
name (string): Name of the operation. None by default.
Returns:
tf.Tensor: Normalized, scaled and offset tensor.
"""
with tf.name_scope(name):
mean, var = tf.nn.moments(advs, axes=axes)
advs = tf.nn.batch_normalization(advs, mean, var, offset, scale, eps)
return advs
|
Normalize the advs tensor.
This calculates the mean and variance using the axes specified
and normalizes the tensor using those values.
Args:
advs (tf.Tensor): Tensor to normalize.
axes (array[int]): Axes along which to compute the mean and variance.
eps (float): Small number to avoid dividing by zero.
offset (tf.Tensor): Offset added to the normalized tensor.
This is zero by default.
scale (tf.Tensor): Scale to apply to the normalized tensor. This is
1 by default but can also be None.
name (string): Name of the operation. None by default.
Returns:
tf.Tensor: Normalized, scaled and offset tensor.
|
center_advs
|
python
|
rlworkgroup/garage
|
src/garage/tf/_functions.py
|
https://github.com/rlworkgroup/garage/blob/master/src/garage/tf/_functions.py
|
MIT
|
def positive_advs(advs, eps, name='positive_adv'):
"""Make all the values in the advs tensor positive.
Offsets all values in advs by the minimum value in
the tensor, plus an epsilon value to avoid dividing by zero.
Args:
advs (tf.Tensor): The tensor to offset.
eps (tf.float32): A small value to avoid by-zero division.
name (string): Name of the operation.
Returns:
tf.Tensor: Tensor with modified (postiive) values.
"""
with tf.name_scope(name):
m = tf.reduce_min(advs)
advs = (advs - m) + eps
return advs
|
Make all the values in the advs tensor positive.
Offsets all values in advs by the minimum value in
the tensor, plus an epsilon value to avoid dividing by zero.
Args:
advs (tf.Tensor): The tensor to offset.
eps (tf.float32): A small value to avoid by-zero division.
name (string): Name of the operation.
Returns:
tf.Tensor: Tensor with modified (postiive) values.
|
positive_advs
|
python
|
rlworkgroup/garage
|
src/garage/tf/_functions.py
|
https://github.com/rlworkgroup/garage/blob/master/src/garage/tf/_functions.py
|
MIT
|
def discounted_returns(discount, max_len, rewards, name='discounted_returns'):
"""Calculate discounted returns.
Args:
discount (float): Discount factor.
max_len (int): Maximum length of a single episode.
rewards (tf.Tensor): A 2D vector of per-step rewards with shape
:math:`(N, T)`, where :math:`N` is the batch dimension (number of
episodes) and :math:`T` is the maximum episode length experienced
by the agent.
name (string): Name of the operation. None by default.
Returns:
tf.Tensor: Tensor of discounted returns.
"""
with tf.name_scope(name):
gamma = tf.constant(float(discount),
dtype=tf.float32,
shape=[max_len, 1, 1])
return_filter = tf.math.cumprod(gamma, axis=0, exclusive=True)
rewards_pad = tf.expand_dims(tf.concat(
[rewards, tf.zeros_like(rewards[:, :-1])], 1),
axis=2)
returns = tf.nn.conv1d(rewards_pad,
return_filter,
stride=1,
padding='VALID')
return returns
|
Calculate discounted returns.
Args:
discount (float): Discount factor.
max_len (int): Maximum length of a single episode.
rewards (tf.Tensor): A 2D vector of per-step rewards with shape
:math:`(N, T)`, where :math:`N` is the batch dimension (number of
episodes) and :math:`T` is the maximum episode length experienced
by the agent.
name (string): Name of the operation. None by default.
Returns:
tf.Tensor: Tensor of discounted returns.
|
discounted_returns
|
python
|
rlworkgroup/garage
|
src/garage/tf/_functions.py
|
https://github.com/rlworkgroup/garage/blob/master/src/garage/tf/_functions.py
|
MIT
|
def _init_opt(self):
"""Build the loss function and init the optimizer."""
with tf.name_scope(self._name):
# Create target policy and qf network
with tf.name_scope('inputs'):
obs_dim = self._env_spec.observation_space.flat_dim
input_y = tf.compat.v1.placeholder(tf.float32,
shape=(None, 1),
name='input_y')
obs = tf.compat.v1.placeholder(tf.float32,
shape=(None, obs_dim),
name='input_observation')
actions = tf.compat.v1.placeholder(
tf.float32,
shape=(None, self._env_spec.action_space.flat_dim),
name='input_action')
policy_network_outputs = self._target_policy.build(obs,
name='policy')
target_qf_outputs = self._target_qf.build(obs, actions, name='qf')
self._target_policy_f_prob_online = compile_function(
inputs=[obs], outputs=policy_network_outputs)
self._target_qf_f_prob_online = compile_function(
inputs=[obs, actions], outputs=target_qf_outputs)
# Set up target init and update function
with tf.name_scope('setup_target'):
ops = get_target_ops(self.policy.get_global_vars(),
self._target_policy.get_global_vars(),
self._tau)
policy_init_ops, policy_update_ops = ops
qf_init_ops, qf_update_ops = get_target_ops(
self._qf.get_global_vars(),
self._target_qf.get_global_vars(), self._tau)
target_init_op = policy_init_ops + qf_init_ops
target_update_op = policy_update_ops + qf_update_ops
f_init_target = compile_function(inputs=[], outputs=target_init_op)
f_update_target = compile_function(inputs=[],
outputs=target_update_op)
with tf.name_scope('inputs'):
obs_dim = self._env_spec.observation_space.flat_dim
input_y = tf.compat.v1.placeholder(tf.float32,
shape=(None, 1),
name='input_y')
obs = tf.compat.v1.placeholder(tf.float32,
shape=(None, obs_dim),
name='input_observation')
actions = tf.compat.v1.placeholder(
tf.float32,
shape=(None, self._env_spec.action_space.flat_dim),
name='input_action')
# Set up policy training function
next_action = self.policy.build(obs, name='policy_action')
next_qval = self._qf.build(obs,
next_action,
name='policy_action_qval')
with tf.name_scope('action_loss'):
action_loss = -tf.reduce_mean(next_qval)
if self._policy_weight_decay > 0.:
regularizer = tf.keras.regularizers.l2(
self._policy_weight_decay)
for var in self.policy.get_regularizable_vars():
policy_reg = regularizer(var)
action_loss += policy_reg
with tf.name_scope('minimize_action_loss'):
policy_optimizer = make_optimizer(
self._policy_optimizer,
learning_rate=self._policy_lr,
name='PolicyOptimizer')
policy_train_op = policy_optimizer.minimize(
action_loss, var_list=self.policy.get_trainable_vars())
f_train_policy = compile_function(
inputs=[obs], outputs=[policy_train_op, action_loss])
# Set up qf training function
qval = self._qf.build(obs, actions, name='q_value')
with tf.name_scope('qval_loss'):
qval_loss = tf.reduce_mean(
tf.compat.v1.squared_difference(input_y, qval))
if self._qf_weight_decay > 0.:
regularizer = tf.keras.regularizers.l2(
self._qf_weight_decay)
for var in self._qf.get_regularizable_vars():
qf_reg = regularizer(var)
qval_loss += qf_reg
with tf.name_scope('minimize_qf_loss'):
qf_optimizer = make_optimizer(self._qf_optimizer,
learning_rate=self._qf_lr,
name='QFunctionOptimizer')
qf_train_op = qf_optimizer.minimize(
qval_loss, var_list=self._qf.get_trainable_vars())
f_train_qf = compile_function(
inputs=[input_y, obs, actions],
outputs=[qf_train_op, qval_loss, qval])
self._f_train_policy = f_train_policy
self._f_train_qf = f_train_qf
self._f_init_target = f_init_target
self._f_update_target = f_update_target
|
Build the loss function and init the optimizer.
|
_init_opt
|
python
|
rlworkgroup/garage
|
src/garage/tf/algos/ddpg.py
|
https://github.com/rlworkgroup/garage/blob/master/src/garage/tf/algos/ddpg.py
|
MIT
|
def __getstate__(self):
"""Object.__getstate__.
Returns:
dict: the state to be pickled for the instance.
"""
data = self.__dict__.copy()
del data['_target_policy_f_prob_online']
del data['_target_qf_f_prob_online']
del data['_f_train_policy']
del data['_f_train_qf']
del data['_f_init_target']
del data['_f_update_target']
return data
|
Object.__getstate__.
Returns:
dict: the state to be pickled for the instance.
|
__getstate__
|
python
|
rlworkgroup/garage
|
src/garage/tf/algos/ddpg.py
|
https://github.com/rlworkgroup/garage/blob/master/src/garage/tf/algos/ddpg.py
|
MIT
|
def train(self, trainer):
"""Obtain samplers and start actual training for each epoch.
Args:
trainer (Trainer): Experiment trainer, which provides services
such as snapshotting and sampler control.
Returns:
float: The average return in last epoch cycle.
"""
if not self._eval_env:
self._eval_env = trainer.get_env_copy()
last_returns = [float('nan')]
trainer.enable_logging = False
for _ in trainer.step_epochs():
for cycle in range(self._steps_per_epoch):
trainer.step_episode = trainer.obtain_episodes(
trainer.step_itr)
if hasattr(self.exploration_policy, 'update'):
self.exploration_policy.update(trainer.step_episode)
self._train_once(trainer.step_itr, trainer.step_episode)
if (cycle == 0 and self._replay_buffer.n_transitions_stored >=
self._min_buffer_size):
trainer.enable_logging = True
eval_episodes = obtain_evaluation_episodes(
self.policy, self._eval_env)
last_returns = log_performance(trainer.step_itr,
eval_episodes,
discount=self._discount)
trainer.step_itr += 1
return np.mean(last_returns)
|
Obtain samplers and start actual training for each epoch.
Args:
trainer (Trainer): Experiment trainer, which provides services
such as snapshotting and sampler control.
Returns:
float: The average return in last epoch cycle.
|
train
|
python
|
rlworkgroup/garage
|
src/garage/tf/algos/ddpg.py
|
https://github.com/rlworkgroup/garage/blob/master/src/garage/tf/algos/ddpg.py
|
MIT
|
def _train_once(self, itr, episodes):
"""Perform one step of policy optimization given one batch of samples.
Args:
itr (int): Iteration number.
episodes (EpisodeBatch): Batch of episodes.
"""
self._replay_buffer.add_episode_batch(episodes)
epoch = itr / self._steps_per_epoch
for _ in range(self._n_train_steps):
if (self._replay_buffer.n_transitions_stored >=
self._min_buffer_size):
qf_loss, y_s, qval, policy_loss = self._optimize_policy()
self._episode_policy_losses.append(policy_loss)
self._episode_qf_losses.append(qf_loss)
self._epoch_ys.append(y_s)
self._epoch_qs.append(qval)
if itr % self._steps_per_epoch == 0:
logger.log('Training finished')
if (self._replay_buffer.n_transitions_stored >=
self._min_buffer_size):
tabular.record('Epoch', epoch)
tabular.record('Policy/AveragePolicyLoss',
np.mean(self._episode_policy_losses))
tabular.record('QFunction/AverageQFunctionLoss',
np.mean(self._episode_qf_losses))
tabular.record('QFunction/AverageQ', np.mean(self._epoch_qs))
tabular.record('QFunction/MaxQ', np.max(self._epoch_qs))
tabular.record('QFunction/AverageAbsQ',
np.mean(np.abs(self._epoch_qs)))
tabular.record('QFunction/AverageY', np.mean(self._epoch_ys))
tabular.record('QFunction/MaxY', np.max(self._epoch_ys))
tabular.record('QFunction/AverageAbsY',
np.mean(np.abs(self._epoch_ys)))
|
Perform one step of policy optimization given one batch of samples.
Args:
itr (int): Iteration number.
episodes (EpisodeBatch): Batch of episodes.
|
_train_once
|
python
|
rlworkgroup/garage
|
src/garage/tf/algos/ddpg.py
|
https://github.com/rlworkgroup/garage/blob/master/src/garage/tf/algos/ddpg.py
|
MIT
|
def _optimize_policy(self):
"""Perform algorithm optimizing.
Returns:
float: Loss of action predicted by the policy network
float: Loss of q value predicted by the q network.
float: ys.
float: Q value predicted by the q network.
"""
timesteps = self._replay_buffer.sample_timesteps(
self._buffer_batch_size)
observations = timesteps.observations
rewards = timesteps.rewards.reshape(-1, 1)
actions = timesteps.actions
next_observations = timesteps.next_observations
terminals = timesteps.terminals.reshape(-1, 1)
rewards *= self._reward_scale
next_inputs = next_observations
inputs = observations
target_actions = self._target_policy_f_prob_online(next_inputs)
target_qvals = self._target_qf_f_prob_online(next_inputs,
target_actions)
clip_range = (-self._clip_return,
0. if self._clip_pos_returns else self._clip_return)
ys = np.clip(
rewards + (1.0 - terminals) * self._discount * target_qvals,
clip_range[0], clip_range[1])
_, qval_loss, qval = self._f_train_qf(ys, inputs, actions)
_, action_loss = self._f_train_policy(inputs)
self._f_update_target()
return qval_loss, ys, qval, action_loss
|
Perform algorithm optimizing.
Returns:
float: Loss of action predicted by the policy network
float: Loss of q value predicted by the q network.
float: ys.
float: Q value predicted by the q network.
|
_optimize_policy
|
python
|
rlworkgroup/garage
|
src/garage/tf/algos/ddpg.py
|
https://github.com/rlworkgroup/garage/blob/master/src/garage/tf/algos/ddpg.py
|
MIT
|
def _init_opt(self):
"""Initialize the networks and Ops.
Assume discrete space for dqn, so action dimension
will always be action_space.n
"""
action_dim = self._env_spec.action_space.n
# build q networks
with tf.name_scope(self._name):
action_t_ph = tf.compat.v1.placeholder(tf.int32,
None,
name='action')
reward_t_ph = tf.compat.v1.placeholder(tf.float32,
None,
name='reward')
done_t_ph = tf.compat.v1.placeholder(tf.float32, None, name='done')
with tf.name_scope('update_ops'):
target_update_op = get_target_ops(
self._qf.get_global_vars(),
self._target_qf.get_global_vars())
self._qf_update_ops = compile_function(inputs=[],
outputs=target_update_op)
with tf.name_scope('td_error'):
# Q-value of the selected action
action = tf.one_hot(action_t_ph,
action_dim,
on_value=1.,
off_value=0.)
q_selected = tf.reduce_sum(
self._qf.q_vals * action, # yapf: disable
axis=1)
# r + Q'(s', argmax_a(Q(s', _)) - Q(s, a)
if self._double_q:
target_qval_with_online_q = self._qf.build(
self._target_qf.input, self._qf.name)
future_best_q_val_action = tf.argmax(
target_qval_with_online_q, 1)
future_best_q_val = tf.reduce_sum(
self._target_qf.q_vals *
tf.one_hot(future_best_q_val_action,
action_dim,
on_value=1.,
off_value=0.),
axis=1)
else:
# r + max_a(Q'(s', _)) - Q(s, a)
future_best_q_val = tf.reduce_max(self._target_qf.q_vals,
axis=1)
q_best_masked = (1.0 - done_t_ph) * future_best_q_val
# if done, it's just reward
# else reward + discount * future_best_q_val
target_q_values = (reward_t_ph +
self._discount * q_best_masked)
# td_error = q_selected - tf.stop_gradient(target_q_values)
loss = tf.compat.v1.losses.huber_loss(
q_selected, tf.stop_gradient(target_q_values))
loss = tf.reduce_mean(loss)
with tf.name_scope('optimize_ops'):
qf_optimizer = make_optimizer(self._qf_optimizer,
learning_rate=self._qf_lr)
if self._grad_norm_clipping is not None:
gradients = qf_optimizer.compute_gradients(
loss, var_list=self._qf.get_trainable_vars())
for i, (grad, var) in enumerate(gradients):
if grad is not None:
gradients[i] = (tf.clip_by_norm(
grad, self._grad_norm_clipping), var)
optimize_loss = qf_optimizer.apply_gradients(gradients)
else:
optimize_loss = qf_optimizer.minimize(
loss, var_list=self._qf.get_trainable_vars())
self._train_qf = compile_function(inputs=[
self._qf.input, action_t_ph, reward_t_ph, done_t_ph,
self._target_qf.input
],
outputs=[loss, optimize_loss])
|
Initialize the networks and Ops.
Assume discrete space for dqn, so action dimension
will always be action_space.n
|
_init_opt
|
python
|
rlworkgroup/garage
|
src/garage/tf/algos/dqn.py
|
https://github.com/rlworkgroup/garage/blob/master/src/garage/tf/algos/dqn.py
|
MIT
|
def train(self, trainer):
"""Obtain samplers and start actual training for each epoch.
Args:
trainer (Trainer): Experiment trainer, which provides services
such as snapshotting and sampler control.
Returns:
float: The average return in last epoch cycle.
"""
if not self._eval_env:
self._eval_env = trainer.get_env_copy()
last_returns = [float('nan')]
trainer.enable_logging = False
qf_losses = []
for _ in trainer.step_epochs():
for cycle in range(self._steps_per_epoch):
trainer.step_episode = trainer.obtain_episodes(
trainer.step_itr)
if hasattr(self.exploration_policy, 'update'):
self.exploration_policy.update(trainer.step_episode)
qf_losses.extend(
self._train_once(trainer.step_itr, trainer.step_episode))
if (cycle == 0 and self._replay_buffer.n_transitions_stored >=
self._min_buffer_size):
trainer.enable_logging = True
eval_episodes = obtain_evaluation_episodes(
self.policy, self._eval_env)
last_returns = log_performance(trainer.step_itr,
eval_episodes,
discount=self._discount)
trainer.step_itr += 1
tabular.record('DQN/QFLossMean', np.mean(qf_losses))
tabular.record('DQN/QFLossStd', np.std(qf_losses))
return np.mean(last_returns)
|
Obtain samplers and start actual training for each epoch.
Args:
trainer (Trainer): Experiment trainer, which provides services
such as snapshotting and sampler control.
Returns:
float: The average return in last epoch cycle.
|
train
|
python
|
rlworkgroup/garage
|
src/garage/tf/algos/dqn.py
|
https://github.com/rlworkgroup/garage/blob/master/src/garage/tf/algos/dqn.py
|
MIT
|
def _train_once(self, itr, episodes):
"""Perform one step of policy optimization given one batch of samples.
Args:
itr (int): Iteration number.
episodes (EpisodeBatch): Batch of episodes.
Returns:
list[float]: Q function losses
"""
self._replay_buffer.add_episode_batch(episodes)
qf_losses = []
for _ in range(self._n_train_steps):
if (self._replay_buffer.n_transitions_stored >=
self._min_buffer_size):
qf_losses.append(self._optimize_policy())
if self._replay_buffer.n_transitions_stored >= self._min_buffer_size:
if itr % self._target_network_update_freq == 0:
self._qf_update_ops()
return qf_losses
|
Perform one step of policy optimization given one batch of samples.
Args:
itr (int): Iteration number.
episodes (EpisodeBatch): Batch of episodes.
Returns:
list[float]: Q function losses
|
_train_once
|
python
|
rlworkgroup/garage
|
src/garage/tf/algos/dqn.py
|
https://github.com/rlworkgroup/garage/blob/master/src/garage/tf/algos/dqn.py
|
MIT
|
def _optimize_policy(self):
"""Optimize network using experiences from replay buffer.
Returns:
numpy.float64: Loss of policy.
"""
timesteps = self._replay_buffer.sample_timesteps(
self._buffer_batch_size)
observations = timesteps.observations
rewards = timesteps.rewards.reshape(-1, 1)
actions = self._env_spec.action_space.unflatten_n(timesteps.actions)
next_observations = timesteps.next_observations
dones = timesteps.terminals.reshape(-1, 1)
if isinstance(self._env_spec.observation_space, akro.Image):
if len(observations.shape[1:]) < len(
self._env_spec.observation_space.shape):
observations = self._env_spec.observation_space.unflatten_n(
observations)
next_observations = self._env_spec.observation_space.\
unflatten_n(next_observations)
loss, _ = self._train_qf(observations, actions, rewards, dones,
next_observations)
return loss
|
Optimize network using experiences from replay buffer.
Returns:
numpy.float64: Loss of policy.
|
_optimize_policy
|
python
|
rlworkgroup/garage
|
src/garage/tf/algos/dqn.py
|
https://github.com/rlworkgroup/garage/blob/master/src/garage/tf/algos/dqn.py
|
MIT
|
def __getstate__(self):
"""Parameters to save in snapshot.
Returns:
dict: Parameters to save.
"""
data = self.__dict__.copy()
del data['_qf_update_ops']
del data['_train_qf']
return data
|
Parameters to save in snapshot.
Returns:
dict: Parameters to save.
|
__getstate__
|
python
|
rlworkgroup/garage
|
src/garage/tf/algos/dqn.py
|
https://github.com/rlworkgroup/garage/blob/master/src/garage/tf/algos/dqn.py
|
MIT
|
def train(self, trainer):
"""Obtain samplers and start actual training for each epoch.
Args:
trainer (Trainer): Experiment trainer, which rovides services
such as snapshotting and sampler control.
Returns:
float: The average return in last epoch cycle.
"""
last_return = None
for _ in trainer.step_epochs():
trainer.step_episode = trainer.obtain_episodes(trainer.step_itr)
last_return = self._train_once(trainer.step_itr,
trainer.step_episode)
trainer.step_itr += 1
return last_return
|
Obtain samplers and start actual training for each epoch.
Args:
trainer (Trainer): Experiment trainer, which rovides services
such as snapshotting and sampler control.
Returns:
float: The average return in last epoch cycle.
|
train
|
python
|
rlworkgroup/garage
|
src/garage/tf/algos/npo.py
|
https://github.com/rlworkgroup/garage/blob/master/src/garage/tf/algos/npo.py
|
MIT
|
def _train_once(self, itr, episodes):
"""Perform one step of policy optimization given one batch of samples.
Args:
itr (int): Iteration number.
episodes (EpisodeBatch): Batch of episodes.
Returns:
numpy.float64: Average return.
"""
# -- Stage: Calculate and pad baselines
obs = [
self._baseline.predict({'observations': obs})
for obs in episodes.observations_list
]
baselines = pad_batch_array(np.concatenate(obs), episodes.lengths,
self.max_episode_length)
# -- Stage: Run and calculate performance of the algorithm
undiscounted_returns = log_performance(itr,
episodes,
discount=self._discount)
self._episode_reward_mean.extend(undiscounted_returns)
tabular.record('Extras/EpisodeRewardMean',
np.mean(self._episode_reward_mean))
logger.log('Optimizing policy...')
self._optimize_policy(episodes, baselines)
return np.mean(undiscounted_returns)
|
Perform one step of policy optimization given one batch of samples.
Args:
itr (int): Iteration number.
episodes (EpisodeBatch): Batch of episodes.
Returns:
numpy.float64: Average return.
|
_train_once
|
python
|
rlworkgroup/garage
|
src/garage/tf/algos/npo.py
|
https://github.com/rlworkgroup/garage/blob/master/src/garage/tf/algos/npo.py
|
MIT
|
def _optimize_policy(self, episodes, baselines):
"""Optimize policy.
Args:
episodes (EpisodeBatch): Batch of episodes.
baselines (np.ndarray): Baseline predictions.
"""
policy_opt_input_values = self._policy_opt_input_values(
episodes, baselines)
logger.log('Computing loss before')
loss_before = self._optimizer.loss(policy_opt_input_values)
logger.log('Computing KL before')
policy_kl_before = self._f_policy_kl(*policy_opt_input_values)
logger.log('Optimizing')
self._optimizer.optimize(policy_opt_input_values)
logger.log('Computing KL after')
policy_kl = self._f_policy_kl(*policy_opt_input_values)
logger.log('Computing loss after')
loss_after = self._optimizer.loss(policy_opt_input_values)
tabular.record('{}/LossBefore'.format(self.policy.name), loss_before)
tabular.record('{}/LossAfter'.format(self.policy.name), loss_after)
tabular.record('{}/dLoss'.format(self.policy.name),
loss_before - loss_after)
tabular.record('{}/KLBefore'.format(self.policy.name),
policy_kl_before)
tabular.record('{}/KL'.format(self.policy.name), policy_kl)
pol_ent = self._f_policy_entropy(*policy_opt_input_values)
ent = np.sum(pol_ent) / np.sum(episodes.lengths)
tabular.record('{}/Entropy'.format(self.policy.name), ent)
tabular.record('{}/Perplexity'.format(self.policy.name), np.exp(ent))
returns = self._fit_baseline_with_data(episodes, baselines)
ev = explained_variance_1d(baselines, returns, episodes.valids)
tabular.record('{}/ExplainedVariance'.format(self._baseline.name), ev)
self._old_policy.parameters = self.policy.parameters
|
Optimize policy.
Args:
episodes (EpisodeBatch): Batch of episodes.
baselines (np.ndarray): Baseline predictions.
|
_optimize_policy
|
python
|
rlworkgroup/garage
|
src/garage/tf/algos/npo.py
|
https://github.com/rlworkgroup/garage/blob/master/src/garage/tf/algos/npo.py
|
MIT
|
def _build_inputs(self):
"""Build input variables.
Returns:
namedtuple: Collection of variables to compute policy loss.
namedtuple: Collection of variables to do policy optimization.
"""
observation_space = self.policy.observation_space
action_space = self.policy.action_space
with tf.name_scope('inputs'):
obs_var = observation_space.to_tf_placeholder(name='obs',
batch_dims=2)
action_var = action_space.to_tf_placeholder(name='action',
batch_dims=2)
reward_var = tf.compat.v1.placeholder(tf.float32,
shape=[None, None],
name='reward')
valid_var = tf.compat.v1.placeholder(tf.float32,
shape=[None, None],
name='valid')
baseline_var = tf.compat.v1.placeholder(tf.float32,
shape=[None, None],
name='baseline')
policy_state_info_vars = {
k: tf.compat.v1.placeholder(tf.float32,
shape=[None] * 2 + list(shape),
name=k)
for k, shape in self.policy.state_info_specs
}
policy_state_info_vars_list = [
policy_state_info_vars[k] for k in self.policy.state_info_keys
]
augmented_obs_var = obs_var
for k in self.policy.state_info_keys:
extra_state_var = policy_state_info_vars[k]
extra_state_var = tf.cast(extra_state_var, tf.float32)
augmented_obs_var = tf.concat([augmented_obs_var, extra_state_var],
-1)
self._policy_network = self.policy.build(augmented_obs_var,
name='policy')
self._old_policy_network = self._old_policy.build(augmented_obs_var,
name='policy')
policy_loss_inputs = graph_inputs(
'PolicyLossInputs',
action_var=action_var,
reward_var=reward_var,
baseline_var=baseline_var,
valid_var=valid_var,
policy_state_info_vars=policy_state_info_vars,
)
policy_opt_inputs = graph_inputs(
'PolicyOptInputs',
obs_var=obs_var,
action_var=action_var,
reward_var=reward_var,
baseline_var=baseline_var,
valid_var=valid_var,
policy_state_info_vars_list=policy_state_info_vars_list,
)
return policy_loss_inputs, policy_opt_inputs
|
Build input variables.
Returns:
namedtuple: Collection of variables to compute policy loss.
namedtuple: Collection of variables to do policy optimization.
|
_build_inputs
|
python
|
rlworkgroup/garage
|
src/garage/tf/algos/npo.py
|
https://github.com/rlworkgroup/garage/blob/master/src/garage/tf/algos/npo.py
|
MIT
|
def _build_policy_loss(self, i):
"""Build policy loss and other output tensors.
Args:
i (namedtuple): Collection of variables to compute policy loss.
Returns:
tf.Tensor: Policy loss.
tf.Tensor: Mean policy KL divergence.
"""
policy_entropy = self._build_entropy_term(i)
rewards = i.reward_var
if self._maximum_entropy:
with tf.name_scope('augmented_rewards'):
rewards = i.reward_var + (self._policy_ent_coeff *
policy_entropy)
with tf.name_scope('policy_loss'):
adv = compute_advantages(self._discount,
self._gae_lambda,
self.max_episode_length,
i.baseline_var,
rewards,
name='adv')
adv = tf.reshape(adv, [-1, self.max_episode_length])
# Optionally normalize advantages
eps = tf.constant(1e-8, dtype=tf.float32)
if self._center_adv:
adv = center_advs(adv, axes=[0], eps=eps)
if self._positive_adv:
adv = positive_advs(adv, eps)
old_policy_dist = self._old_policy_network.dist
policy_dist = self._policy_network.dist
with tf.name_scope('kl'):
kl = old_policy_dist.kl_divergence(policy_dist)
pol_mean_kl = tf.reduce_mean(kl)
# Calculate vanilla loss
with tf.name_scope('vanilla_loss'):
ll = policy_dist.log_prob(i.action_var, name='log_likelihood')
vanilla = ll * adv
# Calculate surrogate loss
with tf.name_scope('surrogate_loss'):
lr = tf.exp(ll - old_policy_dist.log_prob(i.action_var))
surrogate = lr * adv
# Finalize objective function
with tf.name_scope('loss'):
if self._pg_loss == 'vanilla':
# VPG uses the vanilla objective
obj = tf.identity(vanilla, name='vanilla_obj')
elif self._pg_loss == 'surrogate':
# TRPO uses the standard surrogate objective
obj = tf.identity(surrogate, name='surr_obj')
elif self._pg_loss == 'surrogate_clip':
lr_clip = tf.clip_by_value(lr,
1 - self._lr_clip_range,
1 + self._lr_clip_range,
name='lr_clip')
surr_clip = lr_clip * adv
obj = tf.minimum(surrogate, surr_clip, name='surr_obj')
if self._entropy_regularzied:
obj += self._policy_ent_coeff * policy_entropy
# filter only the valid values
obj = tf.boolean_mask(obj, i.valid_var)
# Maximize E[surrogate objective] by minimizing
# -E_t[surrogate objective]
loss = -tf.reduce_mean(obj)
# Diagnostic functions
self._f_policy_kl = tf.compat.v1.get_default_session(
).make_callable(pol_mean_kl,
feed_list=flatten_inputs(self._policy_opt_inputs))
self._f_rewards = tf.compat.v1.get_default_session().make_callable(
rewards, feed_list=flatten_inputs(self._policy_opt_inputs))
returns = discounted_returns(self._discount,
self.max_episode_length, rewards)
self._f_returns = tf.compat.v1.get_default_session().make_callable(
returns, feed_list=flatten_inputs(self._policy_opt_inputs))
return loss, pol_mean_kl
|
Build policy loss and other output tensors.
Args:
i (namedtuple): Collection of variables to compute policy loss.
Returns:
tf.Tensor: Policy loss.
tf.Tensor: Mean policy KL divergence.
|
_build_policy_loss
|
python
|
rlworkgroup/garage
|
src/garage/tf/algos/npo.py
|
https://github.com/rlworkgroup/garage/blob/master/src/garage/tf/algos/npo.py
|
MIT
|
def _build_entropy_term(self, i):
"""Build policy entropy tensor.
Args:
i (namedtuple): Collection of variables to compute policy loss.
Returns:
tf.Tensor: Policy entropy.
"""
pol_dist = self._policy_network.dist
with tf.name_scope('policy_entropy'):
if self._use_neg_logli_entropy:
policy_entropy = -pol_dist.log_prob(i.action_var,
name='policy_log_likeli')
else:
policy_entropy = pol_dist.entropy()
# This prevents entropy from becoming negative for small policy std
if self._use_softplus_entropy:
policy_entropy = tf.nn.softplus(policy_entropy)
if self._stop_entropy_gradient:
policy_entropy = tf.stop_gradient(policy_entropy)
# dense form, match the shape of advantage
policy_entropy = tf.reshape(policy_entropy,
[-1, self.max_episode_length])
self._f_policy_entropy = compile_function(
flatten_inputs(self._policy_opt_inputs), policy_entropy)
return policy_entropy
|
Build policy entropy tensor.
Args:
i (namedtuple): Collection of variables to compute policy loss.
Returns:
tf.Tensor: Policy entropy.
|
_build_entropy_term
|
python
|
rlworkgroup/garage
|
src/garage/tf/algos/npo.py
|
https://github.com/rlworkgroup/garage/blob/master/src/garage/tf/algos/npo.py
|
MIT
|
def _fit_baseline_with_data(self, episodes, baselines):
"""Update baselines from samples.
Args:
episodes (EpisodeBatch): Batch of episodes.
baselines (np.ndarray): Baseline predictions.
Returns:
np.ndarray: Augment returns.
"""
policy_opt_input_values = self._policy_opt_input_values(
episodes, baselines)
returns_tensor = self._f_returns(*policy_opt_input_values)
returns_tensor = np.squeeze(returns_tensor, -1)
paths = []
valids = episodes.valids
observations = episodes.padded_observations
# Compute returns
for ret, val, ob in zip(returns_tensor, valids, observations):
returns = ret[val.astype(np.bool)]
obs = ob[val.astype(np.bool)]
paths.append(dict(observations=obs, returns=returns))
# Fit baseline
logger.log('Fitting baseline...')
self._baseline.fit(paths)
return returns_tensor
|
Update baselines from samples.
Args:
episodes (EpisodeBatch): Batch of episodes.
baselines (np.ndarray): Baseline predictions.
Returns:
np.ndarray: Augment returns.
|
_fit_baseline_with_data
|
python
|
rlworkgroup/garage
|
src/garage/tf/algos/npo.py
|
https://github.com/rlworkgroup/garage/blob/master/src/garage/tf/algos/npo.py
|
MIT
|
def _policy_opt_input_values(self, episodes, baselines):
"""Map episode samples to the policy optimizer inputs.
Args:
episodes (EpisodeBatch): Batch of episodes.
baselines (np.ndarray): Baseline predictions.
Returns:
list(np.ndarray): Flatten policy optimization input values.
"""
agent_infos = episodes.padded_agent_infos
policy_state_info_list = [
agent_infos[k] for k in self.policy.state_info_keys
]
actions = [
self._env_spec.action_space.flatten_n(act)
for act in episodes.actions_list
]
padded_actions = pad_batch_array(np.concatenate(actions),
episodes.lengths,
self.max_episode_length)
# pylint: disable=unexpected-keyword-arg
policy_opt_input_values = self._policy_opt_inputs._replace(
obs_var=episodes.padded_observations,
action_var=padded_actions,
reward_var=episodes.padded_rewards,
baseline_var=baselines,
valid_var=episodes.valids,
policy_state_info_vars_list=policy_state_info_list,
)
return flatten_inputs(policy_opt_input_values)
|
Map episode samples to the policy optimizer inputs.
Args:
episodes (EpisodeBatch): Batch of episodes.
baselines (np.ndarray): Baseline predictions.
Returns:
list(np.ndarray): Flatten policy optimization input values.
|
_policy_opt_input_values
|
python
|
rlworkgroup/garage
|
src/garage/tf/algos/npo.py
|
https://github.com/rlworkgroup/garage/blob/master/src/garage/tf/algos/npo.py
|
MIT
|
def _check_entropy_configuration(self, entropy_method, center_adv,
stop_entropy_gradient,
use_neg_logli_entropy, policy_ent_coeff):
"""Check entropy configuration.
Args:
entropy_method (str): A string from: 'max', 'regularized',
'no_entropy'. The type of entropy method to use. 'max' adds the
dense entropy to the reward for each time step. 'regularized'
adds the mean entropy to the surrogate objective. See
https://arxiv.org/abs/1805.00909 for more details.
center_adv (bool): Whether to rescale the advantages
so that they have mean 0 and standard deviation 1.
stop_entropy_gradient (bool): Whether to stop the entropy gradient.
use_neg_logli_entropy (bool): Whether to estimate the entropy as
the negative log likelihood of the action.
policy_ent_coeff (float): The coefficient of the policy entropy.
Setting it to zero would mean no entropy regularization.
Raises:
ValueError: If center_adv is True when entropy_method is max.
ValueError: If stop_gradient is False when entropy_method is max.
ValueError: If policy_ent_coeff is non-zero when there is
no entropy method.
ValueError: If entropy_method is not one of 'max', 'regularized',
'no_entropy'.
"""
del use_neg_logli_entropy
if entropy_method == 'max':
if center_adv:
raise ValueError('center_adv should be False when '
'entropy_method is max')
if not stop_entropy_gradient:
raise ValueError('stop_gradient should be True when '
'entropy_method is max')
self._maximum_entropy = True
self._entropy_regularzied = False
elif entropy_method == 'regularized':
self._maximum_entropy = False
self._entropy_regularzied = True
elif entropy_method == 'no_entropy':
if policy_ent_coeff != 0.0:
raise ValueError('policy_ent_coeff should be zero '
'when there is no entropy method')
self._maximum_entropy = False
self._entropy_regularzied = False
else:
raise ValueError('Invalid entropy_method')
|
Check entropy configuration.
Args:
entropy_method (str): A string from: 'max', 'regularized',
'no_entropy'. The type of entropy method to use. 'max' adds the
dense entropy to the reward for each time step. 'regularized'
adds the mean entropy to the surrogate objective. See
https://arxiv.org/abs/1805.00909 for more details.
center_adv (bool): Whether to rescale the advantages
so that they have mean 0 and standard deviation 1.
stop_entropy_gradient (bool): Whether to stop the entropy gradient.
use_neg_logli_entropy (bool): Whether to estimate the entropy as
the negative log likelihood of the action.
policy_ent_coeff (float): The coefficient of the policy entropy.
Setting it to zero would mean no entropy regularization.
Raises:
ValueError: If center_adv is True when entropy_method is max.
ValueError: If stop_gradient is False when entropy_method is max.
ValueError: If policy_ent_coeff is non-zero when there is
no entropy method.
ValueError: If entropy_method is not one of 'max', 'regularized',
'no_entropy'.
|
_check_entropy_configuration
|
python
|
rlworkgroup/garage
|
src/garage/tf/algos/npo.py
|
https://github.com/rlworkgroup/garage/blob/master/src/garage/tf/algos/npo.py
|
MIT
|
def __getstate__(self):
"""Parameters to save in snapshot.
Returns:
dict: Parameters to save.
"""
data = self.__dict__.copy()
del data['_name_scope']
del data['_policy_opt_inputs']
del data['_f_policy_entropy']
del data['_f_policy_kl']
del data['_f_rewards']
del data['_f_returns']
del data['_policy_network']
del data['_old_policy_network']
return data
|
Parameters to save in snapshot.
Returns:
dict: Parameters to save.
|
__getstate__
|
python
|
rlworkgroup/garage
|
src/garage/tf/algos/npo.py
|
https://github.com/rlworkgroup/garage/blob/master/src/garage/tf/algos/npo.py
|
MIT
|
def __setstate__(self, state):
"""Parameters to restore from snapshot.
Args:
state (dict): Parameters to restore from.
"""
self.__dict__ = state
self._name_scope = tf.name_scope(self._name)
self._init_opt()
|
Parameters to restore from snapshot.
Args:
state (dict): Parameters to restore from.
|
__setstate__
|
python
|
rlworkgroup/garage
|
src/garage/tf/algos/npo.py
|
https://github.com/rlworkgroup/garage/blob/master/src/garage/tf/algos/npo.py
|
MIT
|
def train(self, trainer):
"""Obtain samplers and start actual training for each epoch.
Args:
trainer (Trainer): Experiment trainer, which provides services
such as snapshotting and sampler control.
Returns:
float: The average return in last epoch cycle.
"""
last_return = None
for _ in trainer.step_epochs():
trainer.step_episode = trainer.obtain_episodes(trainer.step_itr)
last_return = self._train_once(trainer.step_itr,
trainer.step_episode)
trainer.step_itr += 1
return last_return
|
Obtain samplers and start actual training for each epoch.
Args:
trainer (Trainer): Experiment trainer, which provides services
such as snapshotting and sampler control.
Returns:
float: The average return in last epoch cycle.
|
train
|
python
|
rlworkgroup/garage
|
src/garage/tf/algos/reps.py
|
https://github.com/rlworkgroup/garage/blob/master/src/garage/tf/algos/reps.py
|
MIT
|
def _train_once(self, itr, episodes):
"""Perform one step of policy optimization given one batch of samples.
Args:
itr (int): Iteration number.
episodes (EpisodeBatch): Batch of episodes.
Returns:
numpy.float64: Average return.
"""
# -- Stage: Run and calculate performance of the algorithm
undiscounted_returns = log_performance(
itr,
episodes,
discount=self._discount)
self._episode_reward_mean.extend(undiscounted_returns)
tabular.record('Extras/EpisodeRewardMean',
np.mean(self._episode_reward_mean))
average_return = np.mean(undiscounted_returns)
logger.log('Optimizing policy...')
self._optimize_policy(episodes)
return average_return
|
Perform one step of policy optimization given one batch of samples.
Args:
itr (int): Iteration number.
episodes (EpisodeBatch): Batch of episodes.
Returns:
numpy.float64: Average return.
|
_train_once
|
python
|
rlworkgroup/garage
|
src/garage/tf/algos/reps.py
|
https://github.com/rlworkgroup/garage/blob/master/src/garage/tf/algos/reps.py
|
MIT
|
def __getstate__(self):
"""Parameters to save in snapshot.
Returns:
dict: Parameters to save.
"""
data = self.__dict__.copy()
del data['_name_scope']
del data['_policy_opt_inputs']
del data['_dual_opt_inputs']
del data['_f_dual']
del data['_f_dual_grad']
del data['_f_policy_kl']
del data['_policy_network']
del data['_old_policy_network']
return data
|
Parameters to save in snapshot.
Returns:
dict: Parameters to save.
|
__getstate__
|
python
|
rlworkgroup/garage
|
src/garage/tf/algos/reps.py
|
https://github.com/rlworkgroup/garage/blob/master/src/garage/tf/algos/reps.py
|
MIT
|
def __setstate__(self, state):
"""Parameters to restore from snapshot.
Args:
state (dict): Parameters to restore from.
"""
self.__dict__ = state
self._name_scope = tf.name_scope(self._name)
self._init_opt()
|
Parameters to restore from snapshot.
Args:
state (dict): Parameters to restore from.
|
__setstate__
|
python
|
rlworkgroup/garage
|
src/garage/tf/algos/reps.py
|
https://github.com/rlworkgroup/garage/blob/master/src/garage/tf/algos/reps.py
|
MIT
|
def _optimize_policy(self, episodes):
"""Optimize the policy using the samples.
Args:
episodes (EpisodeBatch): Batch of episodes.
"""
# Initial BFGS parameter values.
x0 = np.hstack([self._param_eta, self._param_v])
# Set parameter boundaries: \eta>=1e-12, v unrestricted.
bounds = [(-np.inf, np.inf) for _ in x0]
bounds[0] = (1e-12, np.inf)
# Optimize dual
eta_before = self._param_eta
logger.log('Computing dual before')
self._feat_diff = self._features(episodes)
dual_opt_input_values = self._dual_opt_input_values(episodes)
dual_before = self._f_dual(*dual_opt_input_values)
logger.log('Optimizing dual')
def eval_dual(x):
"""Evaluate dual function loss.
Args:
x (numpy.ndarray): Input to dual function.
Returns:
numpy.float64: Dual function loss.
"""
self._param_eta = x[0]
self._param_v = x[1:]
dual_opt_input_values = self._dual_opt_input_values(episodes)
return self._f_dual(*dual_opt_input_values)
def eval_dual_grad(x):
"""Evaluate gradient of dual function loss.
Args:
x (numpy.ndarray): Input to dual function.
Returns:
numpy.ndarray: Gradient of dual function loss.
"""
self._param_eta = x[0]
self._param_v = x[1:]
dual_opt_input_values = self._dual_opt_input_values(episodes)
grad = self._f_dual_grad(*dual_opt_input_values)
eta_grad = np.float(grad[0])
v_grad = grad[1]
return np.hstack([eta_grad, v_grad])
params_ast, _, _ = self._dual_optimizer(func=eval_dual,
x0=x0,
fprime=eval_dual_grad,
bounds=bounds,
**self._dual_optimizer_args)
logger.log('Computing dual after')
self._param_eta, self._param_v = params_ast[0], params_ast[1:]
dual_opt_input_values = self._dual_opt_input_values(episodes)
dual_after = self._f_dual(*dual_opt_input_values)
# Optimize policy
policy_opt_input_values = self._policy_opt_input_values(episodes)
logger.log('Computing policy loss before')
loss_before = self._optimizer.loss(policy_opt_input_values)
logger.log('Computing policy KL before')
policy_kl_before = self._f_policy_kl(*policy_opt_input_values)
logger.log('Optimizing policy')
self._optimizer.optimize(policy_opt_input_values)
logger.log('Computing policy KL')
policy_kl = self._f_policy_kl(*policy_opt_input_values)
logger.log('Computing policy loss after')
loss_after = self._optimizer.loss(policy_opt_input_values)
tabular.record('EtaBefore', eta_before)
tabular.record('EtaAfter', self._param_eta)
tabular.record('DualBefore', dual_before)
tabular.record('DualAfter', dual_after)
tabular.record('{}/LossBefore'.format(self.policy.name), loss_before)
tabular.record('{}/LossAfter'.format(self.policy.name), loss_after)
tabular.record('{}/dLoss'.format(self.policy.name),
loss_before - loss_after)
tabular.record('{}/KLBefore'.format(self.policy.name),
policy_kl_before)
tabular.record('{}/KL'.format(self.policy.name), policy_kl)
self._old_policy.parameters = self.policy.parameters
|
Optimize the policy using the samples.
Args:
episodes (EpisodeBatch): Batch of episodes.
|
_optimize_policy
|
python
|
rlworkgroup/garage
|
src/garage/tf/algos/reps.py
|
https://github.com/rlworkgroup/garage/blob/master/src/garage/tf/algos/reps.py
|
MIT
|
def eval_dual(x):
"""Evaluate dual function loss.
Args:
x (numpy.ndarray): Input to dual function.
Returns:
numpy.float64: Dual function loss.
"""
self._param_eta = x[0]
self._param_v = x[1:]
dual_opt_input_values = self._dual_opt_input_values(episodes)
return self._f_dual(*dual_opt_input_values)
|
Evaluate dual function loss.
Args:
x (numpy.ndarray): Input to dual function.
Returns:
numpy.float64: Dual function loss.
|
eval_dual
|
python
|
rlworkgroup/garage
|
src/garage/tf/algos/reps.py
|
https://github.com/rlworkgroup/garage/blob/master/src/garage/tf/algos/reps.py
|
MIT
|
def eval_dual_grad(x):
"""Evaluate gradient of dual function loss.
Args:
x (numpy.ndarray): Input to dual function.
Returns:
numpy.ndarray: Gradient of dual function loss.
"""
self._param_eta = x[0]
self._param_v = x[1:]
dual_opt_input_values = self._dual_opt_input_values(episodes)
grad = self._f_dual_grad(*dual_opt_input_values)
eta_grad = np.float(grad[0])
v_grad = grad[1]
return np.hstack([eta_grad, v_grad])
|
Evaluate gradient of dual function loss.
Args:
x (numpy.ndarray): Input to dual function.
Returns:
numpy.ndarray: Gradient of dual function loss.
|
eval_dual_grad
|
python
|
rlworkgroup/garage
|
src/garage/tf/algos/reps.py
|
https://github.com/rlworkgroup/garage/blob/master/src/garage/tf/algos/reps.py
|
MIT
|
def _build_inputs(self):
"""Build input variables.
Returns:
namedtuple: Collection of variables to compute policy loss.
namedtuple: Collection of variables to do policy optimization.
"""
observation_space = self.policy.observation_space
action_space = self.policy.action_space
with tf.name_scope('inputs'):
obs_var = observation_space.to_tf_placeholder(
name='obs',
batch_dims=2)
action_var = action_space.to_tf_placeholder(
name='action',
batch_dims=2)
reward_var = new_tensor(
name='reward',
ndim=2,
dtype=tf.float32)
valid_var = new_tensor(
name='valid',
ndim=2,
dtype=tf.float32)
feat_diff = new_tensor(
name='feat_diff',
ndim=2,
dtype=tf.float32)
param_v = new_tensor(
name='param_v',
ndim=1,
dtype=tf.float32)
param_eta = new_tensor(
name='param_eta',
ndim=0,
dtype=tf.float32)
policy_state_info_vars = {
k: tf.compat.v1.placeholder(
tf.float32,
shape=[None] * 2 + list(shape),
name=k)
for k, shape in self.policy.state_info_specs
}
policy_state_info_vars_list = [
policy_state_info_vars[k]
for k in self.policy.state_info_keys
]
self._policy_network = self.policy.build(obs_var, name='policy')
self._old_policy_network = self._old_policy.build(obs_var,
name='policy')
policy_loss_inputs = graph_inputs(
'PolicyLossInputs',
obs_var=obs_var,
action_var=action_var,
reward_var=reward_var,
valid_var=valid_var,
feat_diff=feat_diff,
param_eta=param_eta,
param_v=param_v,
policy_state_info_vars=policy_state_info_vars,
)
policy_opt_inputs = graph_inputs(
'PolicyOptInputs',
obs_var=obs_var,
action_var=action_var,
reward_var=reward_var,
valid_var=valid_var,
feat_diff=feat_diff,
param_eta=param_eta,
param_v=param_v,
policy_state_info_vars_list=policy_state_info_vars_list,
)
dual_opt_inputs = graph_inputs(
'DualOptInputs',
reward_var=reward_var,
valid_var=valid_var,
feat_diff=feat_diff,
param_eta=param_eta,
param_v=param_v,
policy_state_info_vars_list=policy_state_info_vars_list,
)
return policy_loss_inputs, policy_opt_inputs, dual_opt_inputs
|
Build input variables.
Returns:
namedtuple: Collection of variables to compute policy loss.
namedtuple: Collection of variables to do policy optimization.
|
_build_inputs
|
python
|
rlworkgroup/garage
|
src/garage/tf/algos/reps.py
|
https://github.com/rlworkgroup/garage/blob/master/src/garage/tf/algos/reps.py
|
MIT
|
def _build_policy_loss(self, i):
"""Build policy loss and other output tensors.
Args:
i (namedtuple): Collection of variables to compute policy loss.
Returns:
tf.Tensor: Policy loss.
tf.Tensor: Mean policy KL divergence.
Raises:
NotImplementedError: If is_recurrent is True.
"""
pol_dist = self._policy_network.dist
old_pol_dist = self._old_policy_network.dist
# Initialize dual params
self._param_eta = 15.
self._param_v = np.random.rand(
self._env_spec.observation_space.flat_dim * 2 + 4)
with tf.name_scope('bellman_error'):
delta_v = tf.boolean_mask(i.reward_var,
i.valid_var) + tf.tensordot(
i.feat_diff, i.param_v, 1)
with tf.name_scope('policy_loss'):
ll = pol_dist.log_prob(i.action_var)
ll = tf.boolean_mask(ll, i.valid_var)
loss = -tf.reduce_mean(
ll * tf.exp(delta_v / i.param_eta -
tf.reduce_max(delta_v / i.param_eta)))
reg_params = self.policy.get_regularizable_vars()
loss += self._l2_reg_loss * tf.reduce_sum(
[tf.reduce_mean(tf.square(param))
for param in reg_params]) / len(reg_params)
with tf.name_scope('kl'):
kl = old_pol_dist.kl_divergence(pol_dist)
pol_mean_kl = tf.reduce_mean(kl)
with tf.name_scope('dual'):
dual_loss = i.param_eta * self._epsilon + (
i.param_eta * tf.math.log(
tf.reduce_mean(
tf.exp(delta_v / i.param_eta -
tf.reduce_max(delta_v / i.param_eta)))) +
i.param_eta * tf.reduce_max(delta_v / i.param_eta))
dual_loss += self._l2_reg_dual * (tf.square(i.param_eta) +
tf.square(1 / i.param_eta))
dual_grad = tf.gradients(dual_loss, [i.param_eta, i.param_v])
self._f_dual = compile_function(
flatten_inputs(self._dual_opt_inputs),
dual_loss)
self._f_dual_grad = compile_function(
flatten_inputs(self._dual_opt_inputs),
dual_grad)
self._f_policy_kl = compile_function(
flatten_inputs(self._policy_opt_inputs),
pol_mean_kl)
return loss
|
Build policy loss and other output tensors.
Args:
i (namedtuple): Collection of variables to compute policy loss.
Returns:
tf.Tensor: Policy loss.
tf.Tensor: Mean policy KL divergence.
Raises:
NotImplementedError: If is_recurrent is True.
|
_build_policy_loss
|
python
|
rlworkgroup/garage
|
src/garage/tf/algos/reps.py
|
https://github.com/rlworkgroup/garage/blob/master/src/garage/tf/algos/reps.py
|
MIT
|
def _dual_opt_input_values(self, episodes):
"""Update dual func optimize input values based on samples data.
Args:
episodes (EpisodeBatch): Batch of episodes.
Returns:
list(np.ndarray): Flatten dual function optimization input values.
"""
agent_infos = episodes.padded_agent_infos
policy_state_info_list = [
agent_infos[k] for k in self.policy.state_info_keys
]
# pylint: disable=unexpected-keyword-arg
dual_opt_input_values = self._dual_opt_inputs._replace(
reward_var=episodes.padded_rewards,
valid_var=episodes.valids,
feat_diff=self._feat_diff,
param_eta=self._param_eta,
param_v=self._param_v,
policy_state_info_vars_list=policy_state_info_list,
)
return flatten_inputs(dual_opt_input_values)
|
Update dual func optimize input values based on samples data.
Args:
episodes (EpisodeBatch): Batch of episodes.
Returns:
list(np.ndarray): Flatten dual function optimization input values.
|
_dual_opt_input_values
|
python
|
rlworkgroup/garage
|
src/garage/tf/algos/reps.py
|
https://github.com/rlworkgroup/garage/blob/master/src/garage/tf/algos/reps.py
|
MIT
|
def _policy_opt_input_values(self, episodes):
"""Update policy optimize input values based on samples data.
Args:
episodes (EpisodeBatch): Batch of episodes.
Returns:
list(np.ndarray): Flatten policy optimization input values.
"""
agent_infos = episodes.padded_agent_infos
policy_state_info_list = [
agent_infos[k] for k in self.policy.state_info_keys
]
actions = [
self._env_spec.action_space.flatten_n(act)
for act in episodes.actions_list
]
padded_actions = pad_batch_array(np.concatenate(actions),
episodes.lengths,
self.max_episode_length)
# pylint: disable=unexpected-keyword-arg
policy_opt_input_values = self._policy_opt_inputs._replace(
obs_var=episodes.padded_observations,
action_var=padded_actions,
reward_var=episodes.padded_rewards,
valid_var=episodes.valids,
feat_diff=self._feat_diff,
param_eta=self._param_eta,
param_v=self._param_v,
policy_state_info_vars_list=policy_state_info_list,
)
return flatten_inputs(policy_opt_input_values)
|
Update policy optimize input values based on samples data.
Args:
episodes (EpisodeBatch): Batch of episodes.
Returns:
list(np.ndarray): Flatten policy optimization input values.
|
_policy_opt_input_values
|
python
|
rlworkgroup/garage
|
src/garage/tf/algos/reps.py
|
https://github.com/rlworkgroup/garage/blob/master/src/garage/tf/algos/reps.py
|
MIT
|
def _features(self, episodes):
"""Get valid view features based on samples data.
Args:
episodes (EpisodeBatch): Batch of episodes.
Returns:
numpy.ndarray: Features for training.
"""
start = 0
feat_diff = []
for length in episodes.lengths:
stop = start + length
o = np.clip(episodes.observations[start:stop],
self._env_spec.observation_space.low,
self._env_spec.observation_space.high)
lr = length
al = np.arange(lr).reshape(-1, 1) / self.max_episode_length
feats = np.concatenate(
[o, o**2, al, al**2, al**3,
np.ones((lr, 1))], axis=1)
# pylint: disable=unsubscriptable-object
feats = np.vstack([feats, np.zeros(feats.shape[1])])
feat_diff.append(feats[1:] - feats[:-1])
start = stop
return np.vstack(feat_diff)
|
Get valid view features based on samples data.
Args:
episodes (EpisodeBatch): Batch of episodes.
Returns:
numpy.ndarray: Features for training.
|
_features
|
python
|
rlworkgroup/garage
|
src/garage/tf/algos/reps.py
|
https://github.com/rlworkgroup/garage/blob/master/src/garage/tf/algos/reps.py
|
MIT
|
def step(self, action):
"""Call step on wrapped env.
Args:
action (np.ndarray): An action provided by the agent.
Returns:
EnvStep: The environment step resulting from the action.
Raises:
RuntimeError: if `step()` is called after the environment has been
constructed and `reset()` has not been called.
"""
es = self._env.step(action)
next_obs = es.observation
next_obs = np.concatenate([
next_obs, action, [es.reward], [es.step_type == StepType.TERMINAL]
])
return EnvStep(env_spec=self.spec,
action=action,
reward=es.reward,
observation=next_obs,
env_info=es.env_info,
step_type=es.step_type)
|
Call step on wrapped env.
Args:
action (np.ndarray): An action provided by the agent.
Returns:
EnvStep: The environment step resulting from the action.
Raises:
RuntimeError: if `step()` is called after the environment has been
constructed and `reset()` has not been called.
|
step
|
python
|
rlworkgroup/garage
|
src/garage/tf/algos/rl2.py
|
https://github.com/rlworkgroup/garage/blob/master/src/garage/tf/algos/rl2.py
|
MIT
|
def _create_rl2_obs_space(self):
"""Create observation space for RL2.
Returns:
akro.Box: Augmented observation space.
"""
obs_flat_dim = np.prod(self._env.observation_space.shape)
action_flat_dim = np.prod(self._env.action_space.shape)
return akro.Box(low=-np.inf,
high=np.inf,
shape=(obs_flat_dim + action_flat_dim + 1 + 1, ))
|
Create observation space for RL2.
Returns:
akro.Box: Augmented observation space.
|
_create_rl2_obs_space
|
python
|
rlworkgroup/garage
|
src/garage/tf/algos/rl2.py
|
https://github.com/rlworkgroup/garage/blob/master/src/garage/tf/algos/rl2.py
|
MIT
|
def rollout(self):
"""Sample a single episode of the agent in the environment.
Returns:
EpisodeBatch: The collected episode.
"""
self.agent.reset()
for _ in range(self._n_episodes_per_trial):
self.start_episode()
while not self.step_episode():
pass
self._agent_infos['batch_idx'] = np.full(len(self._env_steps),
self._worker_number)
return self.collect_episode()
|
Sample a single episode of the agent in the environment.
Returns:
EpisodeBatch: The collected episode.
|
rollout
|
python
|
rlworkgroup/garage
|
src/garage/tf/algos/rl2.py
|
https://github.com/rlworkgroup/garage/blob/master/src/garage/tf/algos/rl2.py
|
MIT
|
def set_param_values(self, params):
"""Set param values.
Args:
params (Tuple[np.ndarray, np.ndarray]): Two numpy array of
parameter values, one of the network parameters, one
for the initial hidden state.
"""
inner_params, hiddens = params
self._policy.set_param_values(inner_params)
self._initial_hiddens = hiddens
|
Set param values.
Args:
params (Tuple[np.ndarray, np.ndarray]): Two numpy array of
parameter values, one of the network parameters, one
for the initial hidden state.
|
set_param_values
|
python
|
rlworkgroup/garage
|
src/garage/tf/algos/rl2.py
|
https://github.com/rlworkgroup/garage/blob/master/src/garage/tf/algos/rl2.py
|
MIT
|
def train(self, trainer):
"""Obtain samplers and start actual training for each epoch.
Args:
trainer (Trainer): Experiment trainer, which provides services
such as snapshotting and sampler control.
Returns:
float: The average return in last epoch.
"""
last_return = None
for _ in trainer.step_epochs():
if trainer.step_itr % self._n_epochs_per_eval == 0:
if self._meta_evaluator is not None:
self._meta_evaluator.evaluate(self)
trainer.step_episode = trainer.obtain_episodes(
trainer.step_itr,
env_update=self._task_sampler.sample(self._meta_batch_size))
last_return = self.train_once(trainer.step_itr,
trainer.step_episode)
trainer.step_itr += 1
return last_return
|
Obtain samplers and start actual training for each epoch.
Args:
trainer (Trainer): Experiment trainer, which provides services
such as snapshotting and sampler control.
Returns:
float: The average return in last epoch.
|
train
|
python
|
rlworkgroup/garage
|
src/garage/tf/algos/rl2.py
|
https://github.com/rlworkgroup/garage/blob/master/src/garage/tf/algos/rl2.py
|
MIT
|
def train_once(self, itr, episodes):
"""Perform one step of policy optimization given one batch of samples.
Args:
itr (int): Iteration number.
episodes (EpisodeBatch): Batch of episodes.
Returns:
numpy.float64: Average return.
"""
episodes, average_return = self._process_samples(itr, episodes)
logger.log('Optimizing policy...')
self._inner_algo.optimize_policy(episodes)
return average_return
|
Perform one step of policy optimization given one batch of samples.
Args:
itr (int): Iteration number.
episodes (EpisodeBatch): Batch of episodes.
Returns:
numpy.float64: Average return.
|
train_once
|
python
|
rlworkgroup/garage
|
src/garage/tf/algos/rl2.py
|
https://github.com/rlworkgroup/garage/blob/master/src/garage/tf/algos/rl2.py
|
MIT
|
def _process_samples(self, itr, episodes):
# pylint: disable=too-many-statements
"""Return processed sample data based on the collected paths.
Args:
itr (int): Iteration number.
episodes (EpisodeBatch): Original collected episode batch for each
task. For each episode, episode.agent_infos['batch_idx']
indicates which task this episode belongs to. In RL^2, there
are n environments/tasks and paths in each of them will be
concatenated at some point and fed to the policy.
Returns:
EpisodeBatch: Processed batch of episodes for feeding the inner
algorithm.
numpy.float64: The average return.
Raises:
ValueError: If 'batch_idx' is not found.
"""
concatenated_paths = []
paths_by_task = collections.defaultdict(list)
for episode in episodes.split():
if hasattr(episode, 'batch_idx'):
paths_by_task[episode.batch_idx[0]].append(episode)
elif 'batch_idx' in episode.agent_infos:
paths_by_task[episode.agent_infos['batch_idx'][0]].append(
episode)
else:
raise ValueError(
'Batch idx is required for RL2 but not found, '
'Make sure to use garage.tf.algos.rl2.RL2Worker '
'for sampling')
# all path in paths_by_task[i] are sampled from task[i]
for episode_list in paths_by_task.values():
concatenated_path = self._concatenate_episodes(episode_list)
concatenated_paths.append(concatenated_path)
concatenated_episodes = EpisodeBatch.concatenate(*concatenated_paths)
name_map = None
if hasattr(self._task_sampler, '_envs') and hasattr(
self._task_sampler._envs[0]._env, 'all_task_names'):
names = [
env._env.all_task_names[0] for env in self._task_sampler._envs
]
name_map = dict(enumerate(names))
undiscounted_returns = log_multitask_performance(
itr, episodes, self._inner_algo._discount, name_map=name_map)
average_return = np.mean(undiscounted_returns)
return concatenated_episodes, average_return
|
Return processed sample data based on the collected paths.
Args:
itr (int): Iteration number.
episodes (EpisodeBatch): Original collected episode batch for each
task. For each episode, episode.agent_infos['batch_idx']
indicates which task this episode belongs to. In RL^2, there
are n environments/tasks and paths in each of them will be
concatenated at some point and fed to the policy.
Returns:
EpisodeBatch: Processed batch of episodes for feeding the inner
algorithm.
numpy.float64: The average return.
Raises:
ValueError: If 'batch_idx' is not found.
|
_process_samples
|
python
|
rlworkgroup/garage
|
src/garage/tf/algos/rl2.py
|
https://github.com/rlworkgroup/garage/blob/master/src/garage/tf/algos/rl2.py
|
MIT
|
def _concatenate_episodes(self, episode_list):
"""Concatenate episodes.
The input list contains samples from different episodes but same
task/environment. In RL^2, paths within each meta batch are all
concatenate into a single path and fed to the policy.
Args:
episode_list (list[EpisodeBatch]): Input paths. All paths are from
different episodes, but the same task/environment.
Returns:
EpisodeBatch: Concatenated episode from the same task/environment.
Shape of values: :math:`[max_episode_length * episode_per_task,
S^*]`
"""
env_infos = {
k: np.concatenate([b.env_infos[k] for b in episode_list])
for k in episode_list[0].env_infos.keys()
}
agent_infos = {
k: np.concatenate([b.agent_infos[k] for b in episode_list])
for k in episode_list[0].agent_infos.keys()
}
episode_infos = {
k: np.concatenate([b.episode_infos[k] for b in episode_list])
for k in episode_list[0].episode_infos.keys()
}
actions = np.concatenate([
self._env_spec.action_space.flatten_n(ep.actions)
for ep in episode_list
])
return EpisodeBatch(
env_spec=episode_list[0].env_spec,
episode_infos=episode_infos,
observations=np.concatenate(
[ep.observations for ep in episode_list]),
last_observations=episode_list[-1].last_observations,
actions=actions,
rewards=np.concatenate([ep.rewards for ep in episode_list]),
env_infos=env_infos,
agent_infos=agent_infos,
step_types=np.concatenate([ep.step_types for ep in episode_list]),
lengths=np.asarray([sum([ep.lengths[0] for ep in episode_list])]))
|
Concatenate episodes.
The input list contains samples from different episodes but same
task/environment. In RL^2, paths within each meta batch are all
concatenate into a single path and fed to the policy.
Args:
episode_list (list[EpisodeBatch]): Input paths. All paths are from
different episodes, but the same task/environment.
Returns:
EpisodeBatch: Concatenated episode from the same task/environment.
Shape of values: :math:`[max_episode_length * episode_per_task,
S^*]`
|
_concatenate_episodes
|
python
|
rlworkgroup/garage
|
src/garage/tf/algos/rl2.py
|
https://github.com/rlworkgroup/garage/blob/master/src/garage/tf/algos/rl2.py
|
MIT
|
def _init_opt(self):
"""Build the loss function and init the optimizer."""
with tf.name_scope(self._name):
# Create target policy (actor) and qf (critic) networks
with tf.name_scope('inputs'):
obs_dim = self._env_spec.observation_space.flat_dim
y = tf.compat.v1.placeholder(tf.float32,
shape=(None, 1),
name='input_y')
obs = tf.compat.v1.placeholder(tf.float32,
shape=(None, obs_dim),
name='input_observation')
actions = tf.compat.v1.placeholder(
tf.float32,
shape=(None, self._env_spec.action_space.flat_dim),
name='input_action')
policy_network_outputs = self._target_policy.build(obs,
name='policy')
target_qf_outputs = self._target_qf.build(obs, actions, name='qf')
target_qf2_outputs = self._target_qf2.build(obs,
actions,
name='qf')
self._target_policy_f_prob_online = compile_function(
inputs=[obs], outputs=policy_network_outputs)
self._target_qf_f_prob_online = compile_function(
inputs=[obs, actions], outputs=target_qf_outputs)
self._target_qf2_f_prob_online = compile_function(
inputs=[obs, actions], outputs=target_qf2_outputs)
# Set up target init and update functions
with tf.name_scope('setup_target'):
policy_init_op, policy_update_op = get_target_ops(
self.policy.get_global_vars(),
self._target_policy.get_global_vars(), self._tau)
qf_init_ops, qf_update_ops = get_target_ops(
self.qf.get_global_vars(),
self._target_qf.get_global_vars(), self._tau)
qf2_init_ops, qf2_update_ops = get_target_ops(
self.qf2.get_global_vars(),
self._target_qf2.get_global_vars(), self._tau)
target_init_op = policy_init_op + qf_init_ops + qf2_init_ops
target_update_op = (policy_update_op + qf_update_ops +
qf2_update_ops)
f_init_target = compile_function(inputs=[], outputs=target_init_op)
f_update_target = compile_function(inputs=[],
outputs=target_update_op)
# Set up policy training function
next_action = self.policy.build(obs, name='policy_action')
next_qval = self.qf.build(obs,
next_action,
name='policy_action_qval')
with tf.name_scope('action_loss'):
action_loss = -tf.reduce_mean(next_qval)
with tf.name_scope('minimize_action_loss'):
policy_optimizer = make_optimizer(
self._policy_optimizer,
learning_rate=self._policy_lr,
name='PolicyOptimizer')
policy_train_op = policy_optimizer.minimize(
action_loss, var_list=self.policy.get_trainable_vars())
f_train_policy = compile_function(
inputs=[obs], outputs=[policy_train_op, action_loss])
# Set up qf training function
qval = self.qf.build(obs, actions, name='q_value')
q2val = self.qf2.build(obs, actions, name='q2_value')
with tf.name_scope('qval1_loss'):
qval1_loss = tf.reduce_mean(tf.math.squared_difference(
y, qval))
with tf.name_scope('qval2_loss'):
qval2_loss = tf.reduce_mean(
tf.math.squared_difference(y, q2val))
with tf.name_scope('minimize_qf_loss'):
qf_optimizer = make_optimizer(self._qf_optimizer,
learning_rate=self._qf_lr,
name='QFunctionOptimizer')
qf_train_op = qf_optimizer.minimize(
qval1_loss, var_list=self.qf.get_trainable_vars())
qf2_train_op = qf_optimizer.minimize(
qval2_loss, var_list=self.qf2.get_trainable_vars())
f_train_qf = compile_function(
inputs=[y, obs, actions],
outputs=[qf_train_op, qval1_loss, qval])
f_train_qf2 = compile_function(
inputs=[y, obs, actions],
outputs=[qf2_train_op, qval2_loss, q2val])
self._f_train_policy = f_train_policy
self._f_train_qf = f_train_qf
self._f_init_target = f_init_target
self._f_update_target = f_update_target
self._f_train_qf2 = f_train_qf2
|
Build the loss function and init the optimizer.
|
_init_opt
|
python
|
rlworkgroup/garage
|
src/garage/tf/algos/td3.py
|
https://github.com/rlworkgroup/garage/blob/master/src/garage/tf/algos/td3.py
|
MIT
|
def train(self, trainer):
"""Obtain samplers and start actual training for each epoch.
Args:
trainer (Trainer): Experiment trainer, which provides services
such as snapshotting and sampler control.
Returns:
float: The average return in last epoch cycle.
"""
if not self._eval_env:
self._eval_env = trainer.get_env_copy()
last_returns = [float('nan')]
trainer.enable_logging = False
for _ in trainer.step_epochs():
for cycle in range(self._steps_per_epoch):
trainer.step_episode = trainer.obtain_episodes(
trainer.step_itr)
if hasattr(self.exploration_policy, 'update'):
self.exploration_policy.update(trainer.step_episode)
self._train_once(trainer.step_itr, trainer.step_episode)
if (cycle == 0 and self._replay_buffer.n_transitions_stored >=
self._min_buffer_size):
trainer.enable_logging = True
eval_episodes = obtain_evaluation_episodes(
self.policy, self._eval_env)
last_returns = log_performance(trainer.step_itr,
eval_episodes,
discount=self._discount)
trainer.step_itr += 1
return np.mean(last_returns)
|
Obtain samplers and start actual training for each epoch.
Args:
trainer (Trainer): Experiment trainer, which provides services
such as snapshotting and sampler control.
Returns:
float: The average return in last epoch cycle.
|
train
|
python
|
rlworkgroup/garage
|
src/garage/tf/algos/td3.py
|
https://github.com/rlworkgroup/garage/blob/master/src/garage/tf/algos/td3.py
|
MIT
|
def _train_once(self, itr, episodes):
"""Perform one step of policy optimization given one batch of samples.
Args:
itr (int): Iteration number.
episodes (EpisodeBatch): Batch of episodes.
"""
self._replay_buffer.add_episode_batch(episodes)
epoch = itr / self._steps_per_epoch
for _ in range(self._n_train_steps):
if (self._replay_buffer.n_transitions_stored >=
self._min_buffer_size):
qf_loss, y_s, qval, policy_loss = self._optimize_policy(itr)
self._episode_policy_losses.append(policy_loss)
self._episode_qf_losses.append(qf_loss)
self._epoch_ys.append(y_s)
self._epoch_qs.append(qval)
if itr % self._steps_per_epoch == 0:
logger.log('Training finished')
if (self._replay_buffer.n_transitions_stored >=
self._min_buffer_size):
tabular.record('Epoch', epoch)
tabular.record('Policy/AveragePolicyLoss',
np.mean(self._episode_policy_losses))
tabular.record('QFunction/AverageQFunctionLoss',
np.mean(self._episode_qf_losses))
tabular.record('QFunction/AverageQ', np.mean(self._epoch_qs))
tabular.record('QFunction/MaxQ', np.max(self._epoch_qs))
tabular.record('QFunction/AverageAbsQ',
np.mean(np.abs(self._epoch_qs)))
tabular.record('QFunction/AverageY', np.mean(self._epoch_ys))
tabular.record('QFunction/MaxY', np.max(self._epoch_ys))
tabular.record('QFunction/AverageAbsY',
np.mean(np.abs(self._epoch_ys)))
|
Perform one step of policy optimization given one batch of samples.
Args:
itr (int): Iteration number.
episodes (EpisodeBatch): Batch of episodes.
|
_train_once
|
python
|
rlworkgroup/garage
|
src/garage/tf/algos/td3.py
|
https://github.com/rlworkgroup/garage/blob/master/src/garage/tf/algos/td3.py
|
MIT
|
def _optimize_policy(self, itr):
"""Perform algorithm optimizing.
Args:
itr(int): Iterations.
Returns:
float: Loss of action predicted by the policy network.
float: Loss of q value predicted by the q network.
float: y_s.
float: Q value predicted by the q network.
"""
timesteps = self._replay_buffer.sample_timesteps(
self._buffer_batch_size)
observations = timesteps.observations
rewards = timesteps.rewards.reshape(-1, 1)
actions = timesteps.actions
next_observations = timesteps.next_observations
terminals = timesteps.terminals.reshape(-1, 1)
rewards *= self._reward_scale
next_inputs = next_observations
inputs = observations
target_actions = self._target_policy_f_prob_online(next_inputs)
noise = np.random.normal(0.0, self._exploration_policy_sigma,
target_actions.shape)
noise = np.clip(noise, -self._exploration_policy_clip,
self._exploration_policy_clip)
target_actions += noise
target_qvals = self._target_qf_f_prob_online(next_inputs,
target_actions)
target_q2vals = self._target_qf2_f_prob_online(next_inputs,
target_actions)
target_qvals = np.minimum(target_qvals, target_q2vals)
ys = (rewards + (1.0 - terminals) * self._discount * target_qvals)
_, qval_loss, qval = self._f_train_qf(ys, inputs, actions)
_, q2val_loss, q2val = self._f_train_qf2(ys, inputs, actions)
if qval_loss > q2val_loss:
qval_loss = q2val_loss
qval = q2val
# update policy and target networks less frequently
if self._action_loss is None or (itr % self._actor_update_period) == 0:
_, self._action_loss = self._f_train_policy(inputs)
self._f_update_target()
return qval_loss, ys, qval, self._action_loss
|
Perform algorithm optimizing.
Args:
itr(int): Iterations.
Returns:
float: Loss of action predicted by the policy network.
float: Loss of q value predicted by the q network.
float: y_s.
float: Q value predicted by the q network.
|
_optimize_policy
|
python
|
rlworkgroup/garage
|
src/garage/tf/algos/td3.py
|
https://github.com/rlworkgroup/garage/blob/master/src/garage/tf/algos/td3.py
|
MIT
|
def step_episode(self):
"""Take a single time-step in the current episode.
Returns:
bool: True iff the episode is done, either due to the environment
indicating termination of due to reaching `max_episode_length`.
"""
if self._eps_length < self._max_episode_length:
a, agent_info = self.agent.get_action_given_latent(
self._prev_obs, self._z)
es = self.env.step(a)
self._observations.append(self._prev_obs)
self._env_steps.append(es)
self._tasks.append(self._t)
self._latents.append(self._z)
for k, v in self._latent_info.items():
self._latent_infos[k].append(v)
for k, v in agent_info.items():
self._agent_infos[k].append(v)
self._eps_length += 1
if not es.last:
self._prev_obs = es.observation
return False
self._lengths.append(self._eps_length)
self._last_observations.append(self._prev_obs)
return True
|
Take a single time-step in the current episode.
Returns:
bool: True iff the episode is done, either due to the environment
indicating termination of due to reaching `max_episode_length`.
|
step_episode
|
python
|
rlworkgroup/garage
|
src/garage/tf/algos/te.py
|
https://github.com/rlworkgroup/garage/blob/master/src/garage/tf/algos/te.py
|
MIT
|
def collect_episode(self):
"""Collect the current episode, clearing the internal buffer.
One-hot task id is saved in env_infos['task_onehot']. Latent is saved
in agent_infos['latent']. Latent infos are saved in
agent_infos['latent_info_name'], where info_name is the original latent
info name.
Returns:
EpisodeBatch: A batch of the episodes completed since the last call
to collect_episode().
"""
observations = self._observations
self._observations = []
last_observations = self._last_observations
self._last_observations = []
actions = []
rewards = []
env_infos = defaultdict(list)
step_types = []
for es in self._env_steps:
actions.append(es.action)
rewards.append(es.reward)
step_types.append(es.step_type)
for k, v in es.env_info.items():
env_infos[k].append(v)
self._env_steps = []
latents = self._latents
self._latents = []
tasks = self._tasks
self._tasks = []
agent_infos = self._agent_infos
self._agent_infos = defaultdict(list)
latent_infos = self._latent_infos
self._latent_infos = defaultdict(list)
episode_infos = self._episode_infos
self._episode_infos = defaultdict(list)
for k, v in latent_infos.items():
latent_infos[k] = np.asarray(v)
for k, v in agent_infos.items():
agent_infos[k] = np.asarray(v)
for k, v in env_infos.items():
env_infos[k] = np.asarray(v)
for k, v in episode_infos.items():
episode_infos[k] = np.asarray(v)
env_infos['task_onehot'] = np.asarray(tasks)
agent_infos['latent'] = np.asarray(latents)
for k, v in latent_infos.items():
agent_infos['latent_{}'.format(k)] = v
lengths = self._lengths
self._lengths = []
return EpisodeBatch(env_spec=self.env.spec,
episode_infos=episode_infos,
observations=np.asarray(observations),
last_observations=np.asarray(last_observations),
actions=np.asarray(actions),
rewards=np.asarray(rewards),
step_types=np.asarray(step_types, dtype=StepType),
env_infos=(env_infos),
agent_infos=dict(agent_infos),
lengths=np.asarray(lengths, dtype='i'))
|
Collect the current episode, clearing the internal buffer.
One-hot task id is saved in env_infos['task_onehot']. Latent is saved
in agent_infos['latent']. Latent infos are saved in
agent_infos['latent_info_name'], where info_name is the original latent
info name.
Returns:
EpisodeBatch: A batch of the episodes completed since the last call
to collect_episode().
|
collect_episode
|
python
|
rlworkgroup/garage
|
src/garage/tf/algos/te.py
|
https://github.com/rlworkgroup/garage/blob/master/src/garage/tf/algos/te.py
|
MIT
|
def _init_opt(self):
"""Initialize optimizater.
Raises:
NotImplementedError: Raise if the policy is recurrent.
"""
# Input variables
(pol_loss_inputs, pol_opt_inputs, infer_loss_inputs,
infer_opt_inputs) = self._build_inputs()
self._policy_opt_inputs = pol_opt_inputs
self._inference_opt_inputs = infer_opt_inputs
# Jointly optimize policy and encoder network
pol_loss, pol_kl, _ = self._build_policy_loss(pol_loss_inputs)
self._optimizer.update_opt(loss=pol_loss,
target=self.policy,
leq_constraint=(pol_kl, self._max_kl_step),
inputs=flatten_inputs(
self._policy_opt_inputs),
constraint_name='mean_kl')
# Optimize inference distribution separately (supervised learning)
infer_loss, _ = self._build_inference_loss(infer_loss_inputs)
self.inference_optimizer.update_opt(loss=infer_loss,
target=self._inference,
inputs=flatten_inputs(
self._inference_opt_inputs))
|
Initialize optimizater.
Raises:
NotImplementedError: Raise if the policy is recurrent.
|
_init_opt
|
python
|
rlworkgroup/garage
|
src/garage/tf/algos/te_npo.py
|
https://github.com/rlworkgroup/garage/blob/master/src/garage/tf/algos/te_npo.py
|
MIT
|
def train(self, trainer):
"""Obtain samplers and start actual training for each epoch.
Args:
trainer (Trainer): Trainer is passed to give algorithm
the access to trainer.step_epochs(), which provides services
such as snapshotting and sampler control.
Returns:
float: The average return in last epoch cycle.
"""
last_return = None
for _ in trainer.step_epochs():
trainer.step_episode = trainer.obtain_episodes(trainer.step_itr)
last_return = self._train_once(trainer.step_itr,
trainer.step_episode)
trainer.step_itr += 1
return last_return
|
Obtain samplers and start actual training for each epoch.
Args:
trainer (Trainer): Trainer is passed to give algorithm
the access to trainer.step_epochs(), which provides services
such as snapshotting and sampler control.
Returns:
float: The average return in last epoch cycle.
|
train
|
python
|
rlworkgroup/garage
|
src/garage/tf/algos/te_npo.py
|
https://github.com/rlworkgroup/garage/blob/master/src/garage/tf/algos/te_npo.py
|
MIT
|
def _train_once(self, itr, episodes):
"""Perform one step of policy optimization given one batch of samples.
Args:
itr (int): Iteration number.
episodes (EpisodeBatch): Batch of episodes.
Returns:
numpy.float64: Average return.
"""
undiscounted_returns = log_performance(itr,
episodes,
discount=self._discount)
# Calculate baseline predictions
baselines = []
start = 0
for length in episodes.lengths:
stop = start + length
baseline = self._baseline.predict(
dict(observations=episodes.observations[start:stop],
tasks=episodes.env_infos['task_onehot'][start:stop],
latents=episodes.agent_infos['latent'][start:stop]))
baselines.append(baseline)
start = stop
baselines = pad_batch_array(np.concatenate(baselines),
episodes.lengths, self.max_episode_length)
# Process trajectories
embed_eps, embed_ep_infos = self._process_episodes(episodes)
average_return = np.mean(undiscounted_returns)
logger.log('Optimizing policy...')
self._optimize_policy(itr, episodes, baselines, embed_eps,
embed_ep_infos)
return average_return
|
Perform one step of policy optimization given one batch of samples.
Args:
itr (int): Iteration number.
episodes (EpisodeBatch): Batch of episodes.
Returns:
numpy.float64: Average return.
|
_train_once
|
python
|
rlworkgroup/garage
|
src/garage/tf/algos/te_npo.py
|
https://github.com/rlworkgroup/garage/blob/master/src/garage/tf/algos/te_npo.py
|
MIT
|
def _optimize_policy(self, itr, episodes, baselines, embed_eps,
embed_ep_infos):
"""Optimize policy.
Args:
itr (int): Iteration number.
episodes (EpisodeBatch): Batch of episodes.
baselines (np.ndarray): Baseline predictions.
embed_eps (np.ndarray): Embedding episodes.
embed_ep_infos (dict): Embedding distribution information.
"""
del itr
policy_opt_input_values = self._policy_opt_input_values(
episodes, baselines, embed_eps)
inference_opt_input_values = self._inference_opt_input_values(
episodes, embed_eps, embed_ep_infos)
self._train_policy_and_encoder_networks(policy_opt_input_values)
self._train_inference_network(inference_opt_input_values)
# paths = samples_data['paths']
fit_paths = self._evaluate(policy_opt_input_values, episodes,
baselines, embed_ep_infos)
self._visualize_distribution()
logger.log('Fitting baseline...')
self._baseline.fit(fit_paths)
self._old_policy.parameters = self.policy.parameters
self._old_policy.encoder.model.parameters = (
self.policy.encoder.model.parameters)
self._old_inference.model.parameters = self._inference.model.parameters
|
Optimize policy.
Args:
itr (int): Iteration number.
episodes (EpisodeBatch): Batch of episodes.
baselines (np.ndarray): Baseline predictions.
embed_eps (np.ndarray): Embedding episodes.
embed_ep_infos (dict): Embedding distribution information.
|
_optimize_policy
|
python
|
rlworkgroup/garage
|
src/garage/tf/algos/te_npo.py
|
https://github.com/rlworkgroup/garage/blob/master/src/garage/tf/algos/te_npo.py
|
MIT
|
def _process_episodes(self, episodes):
# pylint: disable=too-many-statements
"""Return processed sample data based on the collected paths.
Args:
episodes (EpisodeBatch): Batch of episodes.
Returns:
np.ndarray: Embedding episodes.
dict: Embedding distribution information.
* mean (list[numpy.ndarray]): Means of the distribution.
* log_std (list[numpy.ndarray]): Log standard deviations of the
distribution.
"""
max_episode_length = self.max_episode_length
trajectories = []
trajectory_infos = []
for obs in episodes.padded_observations:
# - Calculate a forward-looking sliding window.
# - If step_space has shape (n, d), then trajs will have shape
# (n, window, d)
# - The length of the sliding window is determined by the
# trajectory inference spec. We smear the last few elements to
# preserve the time dimension.
# - Only observation is used for a single step.
# Alternatively, stacked [observation, action] can be used for
# in harder tasks.
obs_flat = self._env_spec.observation_space.flatten_n(obs)
steps = obs_flat
window = self._inference.spec.input_space.shape[0]
traj = sliding_window(steps, window, smear=True)
traj_flat = self._inference.spec.input_space.flatten_n(traj)
trajectories.append(traj_flat)
_, traj_info = self._inference.get_latents(traj_flat)
trajectory_infos.append(traj_info)
trajectories = np.stack(trajectories)
trajectory_infos = stack_tensor_dict_list(
[pad_tensor_dict(p, max_episode_length) for p in trajectory_infos])
return trajectories, trajectory_infos
|
Return processed sample data based on the collected paths.
Args:
episodes (EpisodeBatch): Batch of episodes.
Returns:
np.ndarray: Embedding episodes.
dict: Embedding distribution information.
* mean (list[numpy.ndarray]): Means of the distribution.
* log_std (list[numpy.ndarray]): Log standard deviations of the
distribution.
|
_process_episodes
|
python
|
rlworkgroup/garage
|
src/garage/tf/algos/te_npo.py
|
https://github.com/rlworkgroup/garage/blob/master/src/garage/tf/algos/te_npo.py
|
MIT
|
def _build_inputs(self):
"""Build input variables.
Returns:
namedtuple: Collection of variables to compute policy loss.
namedtuple: Collection of variables to do policy optimization.
namedtuple: Collection of variables to compute inference loss.
namedtuple: Collection of variables to do inference optimization.
"""
# pylint: disable=too-many-statements
observation_space = self.policy.observation_space
action_space = self.policy.action_space
task_space = self.policy.task_space
latent_space = self.policy.latent_space
trajectory_space = self._inference.spec.input_space
with tf.name_scope('inputs'):
obs_var = observation_space.to_tf_placeholder(name='obs',
batch_dims=2)
task_var = tf.compat.v1.placeholder(
tf.float32,
shape=[None, None, task_space.flat_dim],
name='task')
trajectory_var = tf.compat.v1.placeholder(
tf.float32, shape=[None, None, trajectory_space.flat_dim])
latent_var = tf.compat.v1.placeholder(
tf.float32, shape=[None, None, latent_space.flat_dim])
action_var = action_space.to_tf_placeholder(name='action',
batch_dims=2)
reward_var = tf.compat.v1.placeholder(tf.float32,
shape=[None, None],
name='reward')
baseline_var = tf.compat.v1.placeholder(tf.float32,
shape=[None, None],
name='baseline')
valid_var = tf.compat.v1.placeholder(tf.float32,
shape=[None, None],
name='valid')
# Policy state (for RNNs)
policy_state_info_vars = {
k: tf.compat.v1.placeholder(tf.float32,
shape=[None] * 2 + list(shape),
name=k)
for k, shape in self.policy.state_info_specs
}
policy_state_info_vars_list = [
policy_state_info_vars[k] for k in self.policy.state_info_keys
]
# Encoder state (for RNNs)
embed_state_info_vars = {
k: tf.compat.v1.placeholder(tf.float32,
shape=[None] * 2 + list(shape),
name='embed_%s' % k)
for k, shape in self.policy.encoder.state_info_specs
}
embed_state_info_vars_list = [
embed_state_info_vars[k]
for k in self.policy.encoder.state_info_keys
]
# Inference distribution state (for RNNs)
infer_state_info_vars = {
k: tf.compat.v1.placeholder(tf.float32,
shape=[None] * 2 + list(shape),
name='infer_%s' % k)
for k, shape in self._inference.state_info_specs
}
infer_state_info_vars_list = [
infer_state_info_vars[k]
for k in self._inference.state_info_keys
]
extra_obs_var = [
tf.cast(v, tf.float32) for v in policy_state_info_vars_list
]
# Pylint false alarm
# pylint: disable=unexpected-keyword-arg, no-value-for-parameter
augmented_obs_var = tf.concat([obs_var] + extra_obs_var, axis=-1)
extra_traj_var = [
tf.cast(v, tf.float32) for v in infer_state_info_vars_list
]
augmented_traj_var = tf.concat([trajectory_var] + extra_traj_var, -1)
# Policy and encoder network loss and optimizer inputs
policy_loss_inputs = graph_inputs(
'PolicyLossInputs',
augmented_obs_var=augmented_obs_var,
augmented_traj_var=augmented_traj_var,
task_var=task_var,
latent_var=latent_var,
action_var=action_var,
reward_var=reward_var,
baseline_var=baseline_var,
valid_var=valid_var)
policy_opt_inputs = graph_inputs(
'PolicyOptInputs',
obs_var=obs_var,
action_var=action_var,
reward_var=reward_var,
baseline_var=baseline_var,
trajectory_var=trajectory_var,
task_var=task_var,
latent_var=latent_var,
valid_var=valid_var,
policy_state_info_vars_list=policy_state_info_vars_list,
embed_state_info_vars_list=embed_state_info_vars_list,
)
# Inference network loss and optimizer inputs
inference_loss_inputs = graph_inputs('InferenceLossInputs',
latent_var=latent_var,
valid_var=valid_var)
inference_opt_inputs = graph_inputs(
'InferenceOptInputs',
latent_var=latent_var,
trajectory_var=trajectory_var,
valid_var=valid_var,
infer_state_info_vars_list=infer_state_info_vars_list,
)
return (policy_loss_inputs, policy_opt_inputs, inference_loss_inputs,
inference_opt_inputs)
|
Build input variables.
Returns:
namedtuple: Collection of variables to compute policy loss.
namedtuple: Collection of variables to do policy optimization.
namedtuple: Collection of variables to compute inference loss.
namedtuple: Collection of variables to do inference optimization.
|
_build_inputs
|
python
|
rlworkgroup/garage
|
src/garage/tf/algos/te_npo.py
|
https://github.com/rlworkgroup/garage/blob/master/src/garage/tf/algos/te_npo.py
|
MIT
|
def _build_policy_loss(self, i):
"""Build policy loss and other output tensors.
Args:
i (namedtuple): Collection of variables to compute policy loss.
Returns:
tf.Tensor: Policy loss.
tf.Tensor: Mean policy KL divergence.
"""
# pylint: disable=too-many-statements
self._policy_network, self._encoder_network = (self.policy.build(
i.augmented_obs_var, i.task_var, name='loss_policy'))
self._old_policy_network, self._old_encoder_network = (
self._old_policy.build(i.augmented_obs_var,
i.task_var,
name='loss_old_policy'))
self._infer_network = self._inference.build(i.augmented_traj_var,
name='loss_infer')
self._old_infer_network = self._old_inference.build(
i.augmented_traj_var, name='loss_old_infer')
pol_dist = self._policy_network.dist
old_pol_dist = self._old_policy_network.dist
# Entropy terms
encoder_entropy, inference_ce, policy_entropy = (
self._build_entropy_terms(i))
# Augment the path rewards with entropy terms
with tf.name_scope('augmented_rewards'):
rewards = (i.reward_var -
(self.inference_ce_coeff * inference_ce) +
(self._policy_ent_coeff * policy_entropy))
with tf.name_scope('policy_loss'):
with tf.name_scope('advantages'):
adv = compute_advantages(self._discount,
self._gae_lambda,
self.max_episode_length,
i.baseline_var,
rewards,
name='advantages')
adv = tf.reshape(adv, [-1, self.max_episode_length])
# Optionally normalize advantages
eps = tf.constant(1e-8, dtype=tf.float32)
if self._center_adv:
adv = center_advs(adv, axes=[0], eps=eps)
if self._positive_adv:
adv = positive_advs(adv, eps)
# Calculate loss function and KL divergence
with tf.name_scope('kl'):
kl = old_pol_dist.kl_divergence(pol_dist)
pol_mean_kl = tf.reduce_mean(kl)
ll = pol_dist.log_prob(i.action_var, name='log_likelihood')
# Calculate surrogate loss
with tf.name_scope('surr_loss'):
old_ll = old_pol_dist.log_prob(i.action_var)
old_ll = tf.stop_gradient(old_ll)
# Clip early to avoid overflow
lr = tf.exp(
tf.minimum(ll - old_ll, np.log(1 + self._lr_clip_range)))
surrogate = lr * adv
surrogate = tf.debugging.check_numerics(surrogate,
message='surrogate')
# Finalize objective function
with tf.name_scope('loss'):
lr_clip = tf.clip_by_value(lr,
1 - self._lr_clip_range,
1 + self._lr_clip_range,
name='lr_clip')
surr_clip = lr_clip * adv
obj = tf.minimum(surrogate, surr_clip, name='surr_obj')
obj = tf.boolean_mask(obj, i.valid_var)
# Maximize E[surrogate objective] by minimizing
# -E_t[surrogate objective]
loss = -tf.reduce_mean(obj)
# Encoder entropy bonus
loss -= self.encoder_ent_coeff * encoder_entropy
encoder_mean_kl = self._build_encoder_kl()
# Diagnostic functions
self._f_policy_kl = tf.compat.v1.get_default_session(
).make_callable(pol_mean_kl,
feed_list=flatten_inputs(self._policy_opt_inputs))
self._f_rewards = tf.compat.v1.get_default_session().make_callable(
rewards, feed_list=flatten_inputs(self._policy_opt_inputs))
returns = discounted_returns(self._discount,
self.max_episode_length,
rewards,
name='returns')
self._f_returns = tf.compat.v1.get_default_session().make_callable(
returns, feed_list=flatten_inputs(self._policy_opt_inputs))
return loss, pol_mean_kl, encoder_mean_kl
|
Build policy loss and other output tensors.
Args:
i (namedtuple): Collection of variables to compute policy loss.
Returns:
tf.Tensor: Policy loss.
tf.Tensor: Mean policy KL divergence.
|
_build_policy_loss
|
python
|
rlworkgroup/garage
|
src/garage/tf/algos/te_npo.py
|
https://github.com/rlworkgroup/garage/blob/master/src/garage/tf/algos/te_npo.py
|
MIT
|
def _build_entropy_terms(self, i):
"""Build policy entropy tensor.
Args:
i (namedtuple): Collection of variables to compute policy loss.
Returns:
tf.Tensor: Policy entropy.
"""
pol_dist = self._policy_network.dist
infer_dist = self._infer_network.dist
enc_dist = self._encoder_network.dist
with tf.name_scope('entropy_terms'):
# 1. Encoder distribution total entropy
with tf.name_scope('encoder_entropy'):
encoder_dist, _, _ = self.policy.encoder.build(
i.task_var, name='encoder_entropy').outputs
encoder_all_task_entropies = -encoder_dist.log_prob(
i.latent_var)
if self._use_softplus_entropy:
encoder_entropy = tf.nn.softplus(
encoder_all_task_entropies)
encoder_entropy = tf.reduce_mean(encoder_entropy,
name='encoder_entropy')
encoder_entropy = tf.stop_gradient(encoder_entropy)
# 2. Infernece distribution cross-entropy (log-likelihood)
with tf.name_scope('inference_ce'):
# Build inference with trajectory windows
traj_ll = infer_dist.log_prob(
enc_dist.sample(seed=deterministic.get_tf_seed_stream()),
name='traj_ll')
inference_ce_raw = -traj_ll
inference_ce = tf.clip_by_value(inference_ce_raw, -3, 3)
if self._use_softplus_entropy:
inference_ce = tf.nn.softplus(inference_ce)
if self._stop_ce_gradient:
inference_ce = tf.stop_gradient(inference_ce)
# 3. Policy path entropies
with tf.name_scope('policy_entropy'):
policy_entropy = -pol_dist.log_prob(i.action_var,
name='policy_log_likeli')
# This prevents entropy from becoming negative
# for small policy std
if self._use_softplus_entropy:
policy_entropy = tf.nn.softplus(policy_entropy)
policy_entropy = tf.stop_gradient(policy_entropy)
# Diagnostic functions
self._f_task_entropies = compile_function(
flatten_inputs(self._policy_opt_inputs),
encoder_all_task_entropies)
self._f_encoder_entropy = compile_function(
flatten_inputs(self._policy_opt_inputs), encoder_entropy)
self._f_inference_ce = compile_function(
flatten_inputs(self._policy_opt_inputs),
tf.reduce_mean(inference_ce * i.valid_var))
self._f_policy_entropy = compile_function(
flatten_inputs(self._policy_opt_inputs), policy_entropy)
return encoder_entropy, inference_ce, policy_entropy
|
Build policy entropy tensor.
Args:
i (namedtuple): Collection of variables to compute policy loss.
Returns:
tf.Tensor: Policy entropy.
|
_build_entropy_terms
|
python
|
rlworkgroup/garage
|
src/garage/tf/algos/te_npo.py
|
https://github.com/rlworkgroup/garage/blob/master/src/garage/tf/algos/te_npo.py
|
MIT
|
def _build_encoder_kl(self):
"""Build graph for encoder KL divergence.
Returns:
tf.Tensor: Encoder KL divergence.
"""
dist = self._encoder_network.dist
old_dist = self._old_encoder_network.dist
with tf.name_scope('encoder_kl'):
kl = old_dist.kl_divergence(dist)
mean_kl = tf.reduce_mean(kl)
# Diagnostic function
self._f_encoder_kl = compile_function(
flatten_inputs(self._policy_opt_inputs), mean_kl)
return mean_kl
|
Build graph for encoder KL divergence.
Returns:
tf.Tensor: Encoder KL divergence.
|
_build_encoder_kl
|
python
|
rlworkgroup/garage
|
src/garage/tf/algos/te_npo.py
|
https://github.com/rlworkgroup/garage/blob/master/src/garage/tf/algos/te_npo.py
|
MIT
|
def _build_inference_loss(self, i):
"""Build loss function for the inference network.
Args:
i (namedtuple): Collection of variables to compute inference loss.
Returns:
tf.Tensor: Inference loss.
"""
dist = self._infer_network.dist
old_dist = self._old_infer_network.dist
with tf.name_scope('infer_loss'):
traj_ll = dist.log_prob(i.latent_var, name='traj_ll_2')
# Calculate loss
traj_gammas = tf.constant(float(self._discount),
dtype=tf.float32,
shape=[self.max_episode_length])
# Pylint false alarm
# pylint: disable=no-value-for-parameter
traj_discounts = tf.compat.v1.cumprod(traj_gammas,
exclusive=True,
name='traj_discounts')
discount_traj_ll = traj_discounts * traj_ll
discount_traj_ll = tf.boolean_mask(discount_traj_ll, i.valid_var)
with tf.name_scope('loss'):
infer_loss = -tf.reduce_mean(discount_traj_ll,
name='infer_loss')
with tf.name_scope('kl'):
# Calculate predicted encoder distributions for each timestep
# Calculate KL divergence
kl = old_dist.kl_divergence(dist)
infer_kl = tf.reduce_mean(kl, name='infer_kl')
return infer_loss, infer_kl
|
Build loss function for the inference network.
Args:
i (namedtuple): Collection of variables to compute inference loss.
Returns:
tf.Tensor: Inference loss.
|
_build_inference_loss
|
python
|
rlworkgroup/garage
|
src/garage/tf/algos/te_npo.py
|
https://github.com/rlworkgroup/garage/blob/master/src/garage/tf/algos/te_npo.py
|
MIT
|
def _policy_opt_input_values(self, episodes, baselines, embed_eps):
"""Map episode samples to the policy optimizer inputs.
Args:
episodes (EpisodeBatch): Batch of episodes.
baselines (np.ndarray): Baseline predictions.
embed_eps (np.ndarray): Embedding episodes.
Returns:
list(np.ndarray): Flatten policy optimization input values.
"""
actions = [
self._env_spec.action_space.flatten_n(act)
for act in episodes.actions_list
]
actions = pad_batch_array(np.concatenate(actions), episodes.lengths,
self.max_episode_length)
tasks = pad_batch_array(episodes.env_infos['task_onehot'],
episodes.lengths, self.max_episode_length)
latents = pad_batch_array(episodes.agent_infos['latent'],
episodes.lengths, self.max_episode_length)
agent_infos = episodes.padded_agent_infos
policy_state_info_list = [
agent_infos[k] for k in self.policy.state_info_keys
]
embed_state_info_list = [
agent_infos['latent_' + k]
for k in self.policy.encoder.state_info_keys
]
# pylint: disable=unexpected-keyword-arg
policy_opt_input_values = self._policy_opt_inputs._replace(
obs_var=episodes.padded_observations,
action_var=actions,
reward_var=episodes.padded_rewards,
baseline_var=baselines,
trajectory_var=embed_eps,
task_var=tasks,
latent_var=latents,
valid_var=episodes.valids,
policy_state_info_vars_list=policy_state_info_list,
embed_state_info_vars_list=embed_state_info_list,
)
return flatten_inputs(policy_opt_input_values)
|
Map episode samples to the policy optimizer inputs.
Args:
episodes (EpisodeBatch): Batch of episodes.
baselines (np.ndarray): Baseline predictions.
embed_eps (np.ndarray): Embedding episodes.
Returns:
list(np.ndarray): Flatten policy optimization input values.
|
_policy_opt_input_values
|
python
|
rlworkgroup/garage
|
src/garage/tf/algos/te_npo.py
|
https://github.com/rlworkgroup/garage/blob/master/src/garage/tf/algos/te_npo.py
|
MIT
|
def _inference_opt_input_values(self, episodes, embed_eps, embed_ep_infos):
"""Map episode samples to the inference optimizer inputs.
Args:
episodes (EpisodeBatch): Batch of episodes.
embed_eps (np.ndarray): Embedding episodes.
embed_ep_infos (dict): Embedding distribution information.
Returns:
list(np.ndarray): Flatten inference optimization input values.
"""
latents = pad_batch_array(episodes.agent_infos['latent'],
episodes.lengths, self.max_episode_length)
infer_state_info_list = [
embed_ep_infos[k] for k in self._inference.state_info_keys
]
# pylint: disable=unexpected-keyword-arg
inference_opt_input_values = self._inference_opt_inputs._replace(
latent_var=latents,
trajectory_var=embed_eps,
valid_var=episodes.valids,
infer_state_info_vars_list=infer_state_info_list,
)
return flatten_inputs(inference_opt_input_values)
|
Map episode samples to the inference optimizer inputs.
Args:
episodes (EpisodeBatch): Batch of episodes.
embed_eps (np.ndarray): Embedding episodes.
embed_ep_infos (dict): Embedding distribution information.
Returns:
list(np.ndarray): Flatten inference optimization input values.
|
_inference_opt_input_values
|
python
|
rlworkgroup/garage
|
src/garage/tf/algos/te_npo.py
|
https://github.com/rlworkgroup/garage/blob/master/src/garage/tf/algos/te_npo.py
|
MIT
|
def _evaluate(self, policy_opt_input_values, episodes, baselines,
embed_ep_infos):
"""Evaluate rewards and everything else.
Args:
policy_opt_input_values (list[np.ndarray]): Flattened
policy optimization input values.
episodes (EpisodeBatch): Batch of episodes.
baselines (np.ndarray): Baseline predictions.
embed_ep_infos (dict): Embedding distribution information.
Returns:
dict: Paths for fitting the baseline.
"""
# pylint: disable=too-many-statements
fit_paths = []
valids = episodes.valids
observations = episodes.padded_observations
tasks = pad_batch_array(episodes.env_infos['task_onehot'],
episodes.lengths, self.max_episode_length)
latents = pad_batch_array(episodes.agent_infos['latent'],
episodes.lengths, self.max_episode_length)
baselines_list = []
for baseline, valid in zip(baselines, valids):
baselines_list.append(baseline[valid.astype(np.bool)])
# Augment reward from baselines
rewards_tensor = self._f_rewards(*policy_opt_input_values)
returns_tensor = self._f_returns(*policy_opt_input_values)
returns_tensor = np.squeeze(returns_tensor, -1)
env_rewards = episodes.rewards
env_returns = [
discount_cumsum(rwd, self._discount)
for rwd in episodes.padded_rewards
]
env_average_discounted_return = np.mean(
[ret[0] for ret in env_returns])
# Recompute returns and prepare paths for fitting the baseline
aug_rewards = []
aug_returns = []
for rew, ret, val, task, latent, obs in zip(rewards_tensor,
returns_tensor, valids,
tasks, latents,
observations):
returns = ret[val.astype(np.bool)]
task = task[val.astype(np.bool)]
latent = latent[val.astype(np.bool)]
obs = obs[val.astype(np.bool)]
aug_rewards.append(rew[val.astype(np.bool)])
aug_returns.append(returns)
fit_paths.append(
dict(observations=obs,
tasks=task,
latents=latent,
returns=returns))
aug_rewards = concat_tensor_list(aug_rewards)
aug_returns = concat_tensor_list(aug_returns)
# Calculate effect of the entropy terms
d_rewards = np.mean(aug_rewards - env_rewards)
tabular.record('{}/EntRewards'.format(self.policy.name), d_rewards)
aug_average_discounted_return = (np.mean(
[ret[0] for ret in returns_tensor]))
d_returns = np.mean(aug_average_discounted_return -
env_average_discounted_return)
tabular.record('{}/EntReturns'.format(self.policy.name), d_returns)
# Calculate explained variance
ev = explained_variance_1d(np.concatenate(baselines_list), aug_returns)
tabular.record('{}/ExplainedVariance'.format(self._baseline.name), ev)
inference_rmse = (embed_ep_infos['mean'] - latents)**2.
inference_rmse = np.sqrt(inference_rmse.mean())
tabular.record('Inference/RMSE', inference_rmse)
inference_rrse = rrse(latents, embed_ep_infos['mean'])
tabular.record('Inference/RRSE', inference_rrse)
embed_ent = self._f_encoder_entropy(*policy_opt_input_values)
tabular.record('{}/Encoder/Entropy'.format(self.policy.name),
embed_ent)
infer_ce = self._f_inference_ce(*policy_opt_input_values)
tabular.record('Inference/CrossEntropy', infer_ce)
pol_ent = self._f_policy_entropy(*policy_opt_input_values)
pol_ent = np.sum(pol_ent) / np.sum(episodes.lengths)
tabular.record('{}/Entropy'.format(self.policy.name), pol_ent)
task_ents = self._f_task_entropies(*policy_opt_input_values)
tasks = tasks[:, 0, :]
_, task_indices = np.nonzero(tasks)
path_lengths = np.sum(valids, axis=1)
for t in range(self.policy.task_space.flat_dim):
lengths = path_lengths[task_indices == t]
completed = lengths < self.max_episode_length
pct_completed = np.mean(completed)
tabular.record('Tasks/EpisodeLength/t={}'.format(t),
np.mean(lengths))
tabular.record('Tasks/TerminationRate/t={}'.format(t),
pct_completed)
tabular.record('Tasks/Entropy/t={}'.format(t), task_ents[t])
return fit_paths
|
Evaluate rewards and everything else.
Args:
policy_opt_input_values (list[np.ndarray]): Flattened
policy optimization input values.
episodes (EpisodeBatch): Batch of episodes.
baselines (np.ndarray): Baseline predictions.
embed_ep_infos (dict): Embedding distribution information.
Returns:
dict: Paths for fitting the baseline.
|
_evaluate
|
python
|
rlworkgroup/garage
|
src/garage/tf/algos/te_npo.py
|
https://github.com/rlworkgroup/garage/blob/master/src/garage/tf/algos/te_npo.py
|
MIT
|
def _train_policy_and_encoder_networks(self, policy_opt_input_values):
"""Joint optimization of policy and encoder networks.
Args:
policy_opt_input_values (list(np.ndarray)): Flatten policy
optimization input values.
Returns:
float: Policy loss after optimization.
"""
logger.log('Computing loss before')
loss_before = self._optimizer.loss(policy_opt_input_values)
logger.log('Computing KL before')
policy_kl_before = self._f_policy_kl(*policy_opt_input_values)
embed_kl_before = self._f_encoder_kl(*policy_opt_input_values)
logger.log('Optimizing')
self._optimizer.optimize(policy_opt_input_values)
logger.log('Computing KL after')
policy_kl = self._f_policy_kl(*policy_opt_input_values)
embed_kl = self._f_encoder_kl(*policy_opt_input_values)
logger.log('Computing loss after')
loss_after = self._optimizer.loss(policy_opt_input_values)
tabular.record('{}/LossBefore'.format(self.policy.name), loss_before)
tabular.record('{}/LossAfter'.format(self.policy.name), loss_after)
tabular.record('{}/dLoss'.format(self.policy.name),
loss_before - loss_after)
tabular.record('{}/KLBefore'.format(self.policy.name),
policy_kl_before)
tabular.record('{}/KL'.format(self.policy.name), policy_kl)
tabular.record('{}/Encoder/KLBefore'.format(self.policy.name),
embed_kl_before)
tabular.record('{}/Encoder/KL'.format(self.policy.name), embed_kl)
return loss_after
|
Joint optimization of policy and encoder networks.
Args:
policy_opt_input_values (list(np.ndarray)): Flatten policy
optimization input values.
Returns:
float: Policy loss after optimization.
|
_train_policy_and_encoder_networks
|
python
|
rlworkgroup/garage
|
src/garage/tf/algos/te_npo.py
|
https://github.com/rlworkgroup/garage/blob/master/src/garage/tf/algos/te_npo.py
|
MIT
|
def _train_inference_network(self, inference_opt_input_values):
"""Optimize inference network.
Args:
inference_opt_input_values (list(np.ndarray)): Flatten inference
optimization input values.
Returns:
float: Inference loss after optmization.
"""
logger.log('Optimizing inference network...')
infer_loss_before = self.inference_optimizer.loss(
inference_opt_input_values)
tabular.record('Inference/Loss', infer_loss_before)
self.inference_optimizer.optimize(inference_opt_input_values)
infer_loss_after = self.inference_optimizer.loss(
inference_opt_input_values)
tabular.record('Inference/dLoss', infer_loss_before - infer_loss_after)
return infer_loss_after
|
Optimize inference network.
Args:
inference_opt_input_values (list(np.ndarray)): Flatten inference
optimization input values.
Returns:
float: Inference loss after optmization.
|
_train_inference_network
|
python
|
rlworkgroup/garage
|
src/garage/tf/algos/te_npo.py
|
https://github.com/rlworkgroup/garage/blob/master/src/garage/tf/algos/te_npo.py
|
MIT
|
def _get_latent_space(cls, latent_dim):
"""Get latent space given latent length.
Args:
latent_dim (int): Length of latent.
Returns:
akro.Space: Space of latent.
"""
latent_lb = np.zeros(latent_dim, )
latent_up = np.ones(latent_dim, )
return akro.Box(latent_lb, latent_up)
|
Get latent space given latent length.
Args:
latent_dim (int): Length of latent.
Returns:
akro.Space: Space of latent.
|
_get_latent_space
|
python
|
rlworkgroup/garage
|
src/garage/tf/algos/te_npo.py
|
https://github.com/rlworkgroup/garage/blob/master/src/garage/tf/algos/te_npo.py
|
MIT
|
def get_encoder_spec(cls, task_space, latent_dim):
"""Get the embedding spec of the encoder.
Args:
task_space (akro.Space): Task spec.
latent_dim (int): Latent dimension.
Returns:
garage.InOutSpec: Encoder spec.
"""
latent_space = cls._get_latent_space(latent_dim)
return InOutSpec(task_space, latent_space)
|
Get the embedding spec of the encoder.
Args:
task_space (akro.Space): Task spec.
latent_dim (int): Latent dimension.
Returns:
garage.InOutSpec: Encoder spec.
|
get_encoder_spec
|
python
|
rlworkgroup/garage
|
src/garage/tf/algos/te_npo.py
|
https://github.com/rlworkgroup/garage/blob/master/src/garage/tf/algos/te_npo.py
|
MIT
|
def get_infer_spec(cls, env_spec, latent_dim, inference_window_size):
"""Get the embedding spec of the inference.
Every `inference_window_size` timesteps in the trajectory will be used
as the inference network input.
Args:
env_spec (garage.envs.EnvSpec): Environment spec.
latent_dim (int): Latent dimension.
inference_window_size (int): Length of inference window.
Returns:
garage.InOutSpec: Inference spec.
"""
latent_space = cls._get_latent_space(latent_dim)
obs_lb, obs_ub = env_spec.observation_space.bounds
obs_lb_flat = env_spec.observation_space.flatten(obs_lb)
obs_ub_flat = env_spec.observation_space.flatten(obs_ub)
traj_lb = np.stack([obs_lb_flat] * inference_window_size)
traj_ub = np.stack([obs_ub_flat] * inference_window_size)
traj_space = akro.Box(traj_lb, traj_ub)
return InOutSpec(traj_space, latent_space)
|
Get the embedding spec of the inference.
Every `inference_window_size` timesteps in the trajectory will be used
as the inference network input.
Args:
env_spec (garage.envs.EnvSpec): Environment spec.
latent_dim (int): Latent dimension.
inference_window_size (int): Length of inference window.
Returns:
garage.InOutSpec: Inference spec.
|
get_infer_spec
|
python
|
rlworkgroup/garage
|
src/garage/tf/algos/te_npo.py
|
https://github.com/rlworkgroup/garage/blob/master/src/garage/tf/algos/te_npo.py
|
MIT
|
def __getstate__(self):
"""Parameters to save in snapshot.
Returns:
dict: Parameters to save.
"""
data = self.__dict__.copy()
del data['_name_scope']
del data['_inference_opt_inputs']
del data['_policy_opt_inputs']
del data['_f_inference_ce']
del data['_f_task_entropies']
del data['_f_encoder_entropy']
del data['_f_encoder_kl']
del data['_f_policy_entropy']
del data['_f_policy_kl']
del data['_f_rewards']
del data['_f_returns']
del data['_policy_network']
del data['_old_policy_network']
del data['_encoder_network']
del data['_old_encoder_network']
del data['_infer_network']
del data['_old_infer_network']
return data
|
Parameters to save in snapshot.
Returns:
dict: Parameters to save.
|
__getstate__
|
python
|
rlworkgroup/garage
|
src/garage/tf/algos/te_npo.py
|
https://github.com/rlworkgroup/garage/blob/master/src/garage/tf/algos/te_npo.py
|
MIT
|
def __setstate__(self, state):
"""Parameters to restore from snapshot.
Args:
state (dict): Parameters to restore from.
"""
self.__dict__ = state
self._name_scope = tf.name_scope(self._name)
self._init_opt()
|
Parameters to restore from snapshot.
Args:
state (dict): Parameters to restore from.
|
__setstate__
|
python
|
rlworkgroup/garage
|
src/garage/tf/algos/te_npo.py
|
https://github.com/rlworkgroup/garage/blob/master/src/garage/tf/algos/te_npo.py
|
MIT
|
def optimize_policy(self, episodes):
"""Optimize policy.
Args:
episodes (EpisodeBatch): Batch of episodes.
"""
# Baseline predictions with all zeros for feeding inputs of Tensorflow
baselines = np.zeros((len(episodes.lengths), max(episodes.lengths)))
returns = self._fit_baseline_with_data(episodes, baselines)
baselines = self._get_baseline_prediction(episodes)
policy_opt_input_values = self._policy_opt_input_values(
episodes, baselines)
# Train policy network
logger.log('Computing loss before')
loss_before = self._optimizer.loss(policy_opt_input_values)
logger.log('Computing KL before')
policy_kl_before = self._f_policy_kl(*policy_opt_input_values)
logger.log('Optimizing')
self._optimizer.optimize(policy_opt_input_values)
logger.log('Computing KL after')
policy_kl = self._f_policy_kl(*policy_opt_input_values)
logger.log('Computing loss after')
loss_after = self._optimizer.loss(policy_opt_input_values)
tabular.record('{}/LossBefore'.format(self.policy.name), loss_before)
tabular.record('{}/LossAfter'.format(self.policy.name), loss_after)
tabular.record('{}/dLoss'.format(self.policy.name),
loss_before - loss_after)
tabular.record('{}/KLBefore'.format(self.policy.name),
policy_kl_before)
tabular.record('{}/KL'.format(self.policy.name), policy_kl)
pol_ent = self._f_policy_entropy(*policy_opt_input_values)
tabular.record('{}/Entropy'.format(self.policy.name), np.mean(pol_ent))
ev = explained_variance_1d(baselines, returns, episodes.valids)
tabular.record('{}/ExplainedVariance'.format(self._baseline.name), ev)
self._old_policy.parameters = self.policy.parameters
|
Optimize policy.
Args:
episodes (EpisodeBatch): Batch of episodes.
|
optimize_policy
|
python
|
rlworkgroup/garage
|
src/garage/tf/algos/_rl2npo.py
|
https://github.com/rlworkgroup/garage/blob/master/src/garage/tf/algos/_rl2npo.py
|
MIT
|
def _get_baseline_prediction(self, episodes):
"""Get baseline prediction.
Args:
episodes (EpisodeBatch): Batch of episodes.
Returns:
np.ndarray: Baseline prediction, with shape
:math:`(N, max_episode_length * episode_per_task)`.
"""
obs = [
self._baseline.predict({'observations': obs})
for obs in episodes.observations_list
]
return pad_batch_array(np.concatenate(obs), episodes.lengths,
self.max_episode_length)
|
Get baseline prediction.
Args:
episodes (EpisodeBatch): Batch of episodes.
Returns:
np.ndarray: Baseline prediction, with shape
:math:`(N, max_episode_length * episode_per_task)`.
|
_get_baseline_prediction
|
python
|
rlworkgroup/garage
|
src/garage/tf/algos/_rl2npo.py
|
https://github.com/rlworkgroup/garage/blob/master/src/garage/tf/algos/_rl2npo.py
|
MIT
|
def fit(self, paths):
"""Fit regressor based on paths.
Args:
paths (dict[numpy.ndarray]): Sample paths.
"""
xs = np.concatenate([p['observations'] for p in paths])
if not isinstance(xs, np.ndarray) or len(xs.shape) > 2:
xs = self._env_spec.observation_space.flatten_n(xs)
ys = np.concatenate([p['returns'] for p in paths])
ys = ys.reshape((-1, 1))
if self._normalize_inputs:
# recompute normalizing constants for inputs
self._x_mean.load(np.mean(xs, axis=0, keepdims=True))
self._x_std.load(np.std(xs, axis=0, keepdims=True) + 1e-8)
inputs = [xs, ys]
loss_before = self._optimizer.loss(inputs)
tabular.record('{}/LossBefore'.format(self._name), loss_before)
self._optimizer.optimize(inputs)
loss_after = self._optimizer.loss(inputs)
tabular.record('{}/LossAfter'.format(self._name), loss_after)
tabular.record('{}/dLoss'.format(self._name), loss_before - loss_after)
|
Fit regressor based on paths.
Args:
paths (dict[numpy.ndarray]): Sample paths.
|
fit
|
python
|
rlworkgroup/garage
|
src/garage/tf/baselines/continuous_mlp_baseline.py
|
https://github.com/rlworkgroup/garage/blob/master/src/garage/tf/baselines/continuous_mlp_baseline.py
|
MIT
|
def predict(self, paths):
"""Predict value based on paths.
Args:
paths (dict[numpy.ndarray]): Sample paths.
Returns:
numpy.ndarray: Predicted value.
"""
obs = paths['observations']
if not isinstance(obs, np.ndarray) or len(obs.shape) > 2:
obs = self._env_spec.observation_space.flatten_n(obs)
return self._f_predict(obs).flatten()
|
Predict value based on paths.
Args:
paths (dict[numpy.ndarray]): Sample paths.
Returns:
numpy.ndarray: Predicted value.
|
predict
|
python
|
rlworkgroup/garage
|
src/garage/tf/baselines/continuous_mlp_baseline.py
|
https://github.com/rlworkgroup/garage/blob/master/src/garage/tf/baselines/continuous_mlp_baseline.py
|
MIT
|
def __getstate__(self):
"""Object.__getstate__.
Returns:
dict: the state to be pickled for the instance.
"""
new_dict = super().__getstate__()
del new_dict['_f_predict']
del new_dict['_x_mean']
del new_dict['_x_std']
del new_dict['_y_hat']
return new_dict
|
Object.__getstate__.
Returns:
dict: the state to be pickled for the instance.
|
__getstate__
|
python
|
rlworkgroup/garage
|
src/garage/tf/baselines/continuous_mlp_baseline.py
|
https://github.com/rlworkgroup/garage/blob/master/src/garage/tf/baselines/continuous_mlp_baseline.py
|
MIT
|
def fit(self, paths):
"""Fit regressor based on paths.
Args:
paths (dict[numpy.ndarray]): Sample paths.
"""
xs = np.concatenate([p['observations'] for p in paths])
if isinstance(self._env_spec.observation_space, akro.Image) and \
len(xs[0].shape) < \
len(self._env_spec.observation_space.shape):
xs = self._env_spec.observation_space.unflatten_n(xs)
ys = np.concatenate([p['returns'] for p in paths])
ys = ys.reshape((-1, 1))
if self._subsample_factor < 1:
num_samples_tot = xs.shape[0]
idx = np.random.randint(
0, num_samples_tot,
int(num_samples_tot * self._subsample_factor))
xs, ys = xs[idx], ys[idx]
if self._normalize_inputs:
# recompute normalizing constants for inputs
self._x_mean.load(np.mean(xs, axis=0, keepdims=True))
self._x_std.load(np.std(xs, axis=0, keepdims=True) + 1e-8)
self._old_network.x_mean.load(np.mean(xs, axis=0, keepdims=True))
self._old_network.x_std.load(
np.std(xs, axis=0, keepdims=True) + 1e-8)
if self._normalize_outputs:
# recompute normalizing constants for outputs
self._y_mean.load(np.mean(ys, axis=0, keepdims=True))
self._y_std.load(np.std(ys, axis=0, keepdims=True) + 1e-8)
self._old_network.y_mean.load(np.mean(ys, axis=0, keepdims=True))
self._old_network.y_std.load(
np.std(ys, axis=0, keepdims=True) + 1e-8)
inputs = [xs, ys]
loss_before = self._optimizer.loss(inputs)
tabular.record('{}/LossBefore'.format(self._name), loss_before)
self._optimizer.optimize(inputs)
loss_after = self._optimizer.loss(inputs)
tabular.record('{}/LossAfter'.format(self._name), loss_after)
if self._use_trust_region:
tabular.record('{}/MeanKL'.format(self._name),
self._optimizer.constraint_val(inputs))
tabular.record('{}/dLoss'.format(self._name), loss_before - loss_after)
self._old_model.parameters = self.parameters
|
Fit regressor based on paths.
Args:
paths (dict[numpy.ndarray]): Sample paths.
|
fit
|
python
|
rlworkgroup/garage
|
src/garage/tf/baselines/gaussian_cnn_baseline.py
|
https://github.com/rlworkgroup/garage/blob/master/src/garage/tf/baselines/gaussian_cnn_baseline.py
|
MIT
|
def predict(self, paths):
"""Predict ys based on input xs.
Args:
paths (dict[numpy.ndarray]): Sample paths.
Return:
numpy.ndarray: The predicted ys.
"""
xs = paths['observations']
if isinstance(self._env_spec.observation_space, akro.Image) and \
len(xs[0].shape) < \
len(self._env_spec.observation_space.shape):
xs = self._env_spec.observation_space.unflatten_n(xs)
return self._f_predict(xs).flatten()
|
Predict ys based on input xs.
Args:
paths (dict[numpy.ndarray]): Sample paths.
Return:
numpy.ndarray: The predicted ys.
|
predict
|
python
|
rlworkgroup/garage
|
src/garage/tf/baselines/gaussian_cnn_baseline.py
|
https://github.com/rlworkgroup/garage/blob/master/src/garage/tf/baselines/gaussian_cnn_baseline.py
|
MIT
|
def __getstate__(self):
"""Object.__getstate__.
Returns:
dict: The state to be pickled for the instance.
"""
new_dict = super().__getstate__()
del new_dict['_f_predict']
del new_dict['_old_network']
del new_dict['_x_mean']
del new_dict['_x_std']
del new_dict['_y_mean']
del new_dict['_y_std']
return new_dict
|
Object.__getstate__.
Returns:
dict: The state to be pickled for the instance.
|
__getstate__
|
python
|
rlworkgroup/garage
|
src/garage/tf/baselines/gaussian_cnn_baseline.py
|
https://github.com/rlworkgroup/garage/blob/master/src/garage/tf/baselines/gaussian_cnn_baseline.py
|
MIT
|
def network_output_spec(self):
"""Network output spec.
Return:
list[str]: List of key(str) for the network outputs.
"""
return [
'sample', 'std_param', 'normalized_dist', 'normalized_mean',
'normalized_log_std', 'dist', 'mean', 'log_std', 'x_mean', 'x_std',
'y_mean', 'y_std'
]
|
Network output spec.
Return:
list[str]: List of key(str) for the network outputs.
|
network_output_spec
|
python
|
rlworkgroup/garage
|
src/garage/tf/baselines/gaussian_cnn_baseline_model.py
|
https://github.com/rlworkgroup/garage/blob/master/src/garage/tf/baselines/gaussian_cnn_baseline_model.py
|
MIT
|
def _build(self, state_input, name=None):
"""Build model given input placeholder(s).
Args:
state_input (tf.Tensor): Place holder for state input.
name (str): Inner model name, also the variable scope of the
inner model, if exist. One example is
garage.tf.models.Sequential.
Return:
tf.Tensor: Sampled action.
tf.Tensor: Parameterized log_std.
tfp.distributions.MultivariateNormalDiag: Normlizaed distribution.
tf.Tensor: Normalized mean.
tf.Tensor: Normalized log_std.
tfp.distributions.MultivariateNormalDiag: Vanilla distribution.
tf.Tensor: Vanilla mean.
tf.Tensor: Vanilla log_std.
tf.Tensor: Mean for data.
tf.Tensor: log_std for data.
tf.Tensor: Mean for label.
tf.Tensor: log_std for label.
"""
with tf.compat.v1.variable_scope('normalized_vars'):
x_mean_var = tf.compat.v1.get_variable(
name='x_mean',
shape=(1, ) + self._input_shape,
dtype=np.float32,
initializer=tf.zeros_initializer(),
trainable=False)
x_std_var = tf.compat.v1.get_variable(
name='x_std_var',
shape=(1, ) + self._input_shape,
dtype=np.float32,
initializer=tf.ones_initializer(),
trainable=False)
y_mean_var = tf.compat.v1.get_variable(
name='y_mean_var',
shape=(1, self._output_dim),
dtype=np.float32,
initializer=tf.zeros_initializer(),
trainable=False)
y_std_var = tf.compat.v1.get_variable(
name='y_std_var',
shape=(1, self._output_dim),
dtype=np.float32,
initializer=tf.ones_initializer(),
trainable=False)
normalized_xs_var = (state_input - x_mean_var) / x_std_var
(sample, normalized_dist_mean, normalized_dist_log_std, std_param,
_) = super()._build(normalized_xs_var)
with tf.name_scope('mean_network'):
means_var = normalized_dist_mean * y_std_var + y_mean_var
with tf.name_scope('std_network'):
log_stds_var = normalized_dist_log_std + tf.math.log(y_std_var)
normalized_dist = tfp.distributions.MultivariateNormalDiag(
loc=normalized_dist_mean,
scale_diag=tf.exp(normalized_dist_log_std))
vanilla_dist = tfp.distributions.MultivariateNormalDiag(
loc=means_var, scale_diag=tf.exp(log_stds_var))
return (sample, std_param, normalized_dist, normalized_dist_mean,
normalized_dist_log_std, vanilla_dist, means_var, log_stds_var,
x_mean_var, x_std_var, y_mean_var, y_std_var)
|
Build model given input placeholder(s).
Args:
state_input (tf.Tensor): Place holder for state input.
name (str): Inner model name, also the variable scope of the
inner model, if exist. One example is
garage.tf.models.Sequential.
Return:
tf.Tensor: Sampled action.
tf.Tensor: Parameterized log_std.
tfp.distributions.MultivariateNormalDiag: Normlizaed distribution.
tf.Tensor: Normalized mean.
tf.Tensor: Normalized log_std.
tfp.distributions.MultivariateNormalDiag: Vanilla distribution.
tf.Tensor: Vanilla mean.
tf.Tensor: Vanilla log_std.
tf.Tensor: Mean for data.
tf.Tensor: log_std for data.
tf.Tensor: Mean for label.
tf.Tensor: log_std for label.
|
_build
|
python
|
rlworkgroup/garage
|
src/garage/tf/baselines/gaussian_cnn_baseline_model.py
|
https://github.com/rlworkgroup/garage/blob/master/src/garage/tf/baselines/gaussian_cnn_baseline_model.py
|
MIT
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.