code
stringlengths 66
870k
| docstring
stringlengths 19
26.7k
| func_name
stringlengths 1
138
| language
stringclasses 1
value | repo
stringlengths 7
68
| path
stringlengths 5
324
| url
stringlengths 46
389
| license
stringclasses 7
values |
---|---|---|---|---|---|---|---|
def __getitem__(self, key):
"""See :meth:`object.__getitem__`.
Args:
key (Hashable): Key associated with the value to retrieve.
Returns:
object: Lazily-evaluated value of the :class:`Callable` associated
with key.
"""
if key not in self._dict:
self._dict[key] = self._lazy_dict[key]()
return self._dict[key]
|
See :meth:`object.__getitem__`.
Args:
key (Hashable): Key associated with the value to retrieve.
Returns:
object: Lazily-evaluated value of the :class:`Callable` associated
with key.
|
__getitem__
|
python
|
rlworkgroup/garage
|
src/garage/tf/optimizers/_dtypes.py
|
https://github.com/rlworkgroup/garage/blob/master/src/garage/tf/optimizers/_dtypes.py
|
MIT
|
def get(self, key, default=None):
"""See :meth:`dict.get`.
Args:
key (Hashable): Key associated with the value to retreive.
default (object): Value to return if key is not present in this
:class:`LazyDict`.
Returns:
object: Value associated with key if the key is present, otherwise
default.
"""
if key in self._lazy_dict:
return self[key]
return default
|
See :meth:`dict.get`.
Args:
key (Hashable): Key associated with the value to retreive.
default (object): Value to return if key is not present in this
:class:`LazyDict`.
Returns:
object: Value associated with key if the key is present, otherwise
default.
|
get
|
python
|
rlworkgroup/garage
|
src/garage/tf/optimizers/_dtypes.py
|
https://github.com/rlworkgroup/garage/blob/master/src/garage/tf/optimizers/_dtypes.py
|
MIT
|
def update_plot(self, policy, max_length=np.inf):
"""Update the policy being plotted.
Args:
policy (garage.tf.Policy): Policy to visualize.
max_length (int or float): The maximum length to allow an episode
to be. Defaults to infinity.
"""
if self.worker_thread.is_alive():
self.queue.put(
Message(op=Op.DEMO,
args=(policy.get_param_values(), max_length),
kwargs=None))
|
Update the policy being plotted.
Args:
policy (garage.tf.Policy): Policy to visualize.
max_length (int or float): The maximum length to allow an episode
to be. Defaults to infinity.
|
update_plot
|
python
|
rlworkgroup/garage
|
src/garage/tf/plotter/plotter.py
|
https://github.com/rlworkgroup/garage/blob/master/src/garage/tf/plotter/plotter.py
|
MIT
|
def get_action(self, observation):
"""Return a single action.
Args:
observation (numpy.ndarray): Observations.
Returns:
int: Action given input observation.
dict(numpy.ndarray): Distribution parameters.
"""
sample, prob = self.get_actions([observation])
return sample, {k: v[0] for k, v in prob.items()}
|
Return a single action.
Args:
observation (numpy.ndarray): Observations.
Returns:
int: Action given input observation.
dict(numpy.ndarray): Distribution parameters.
|
get_action
|
python
|
rlworkgroup/garage
|
src/garage/tf/policies/categorical_cnn_policy.py
|
https://github.com/rlworkgroup/garage/blob/master/src/garage/tf/policies/categorical_cnn_policy.py
|
MIT
|
def get_actions(self, observations):
"""Return multiple actions.
Args:
observations (numpy.ndarray): Observations.
Returns:
list[int]: Actions given input observations.
dict(numpy.ndarray): Distribution parameters.
"""
if not isinstance(observations[0],
np.ndarray) or len(observations[0].shape) > 1:
observations = self.observation_space.flatten_n(observations)
samples, probs = self._f_prob(np.expand_dims(observations, 1))
return np.squeeze(samples), dict(prob=np.squeeze(probs, axis=1))
|
Return multiple actions.
Args:
observations (numpy.ndarray): Observations.
Returns:
list[int]: Actions given input observations.
dict(numpy.ndarray): Distribution parameters.
|
get_actions
|
python
|
rlworkgroup/garage
|
src/garage/tf/policies/categorical_cnn_policy.py
|
https://github.com/rlworkgroup/garage/blob/master/src/garage/tf/policies/categorical_cnn_policy.py
|
MIT
|
def __getstate__(self):
"""Object.__getstate__.
Returns:
dict: The state to be pickled for the instance.
"""
new_dict = super().__getstate__()
del new_dict['_f_prob']
return new_dict
|
Object.__getstate__.
Returns:
dict: The state to be pickled for the instance.
|
__getstate__
|
python
|
rlworkgroup/garage
|
src/garage/tf/policies/categorical_cnn_policy.py
|
https://github.com/rlworkgroup/garage/blob/master/src/garage/tf/policies/categorical_cnn_policy.py
|
MIT
|
def build(self, state_input, name=None):
"""Build policy.
Args:
state_input (tf.Tensor) : State input.
name (str): Name of the policy, which is also the name scope.
Returns:
tfp.distributions.OneHotCategorical: Policy distribution.
tf.Tensor: Step output, with shape :math:`(N, S^*)`.
tf.Tensor: Step hidden state, with shape :math:`(N, S^*)`.
tf.Tensor: Initial hidden state , used to reset the hidden state
when policy resets. Shape: :math:`(S^*)`.
"""
_, step_input_var, step_hidden_var = self.inputs
return super().build(state_input,
step_input_var,
step_hidden_var,
name=name)
|
Build policy.
Args:
state_input (tf.Tensor) : State input.
name (str): Name of the policy, which is also the name scope.
Returns:
tfp.distributions.OneHotCategorical: Policy distribution.
tf.Tensor: Step output, with shape :math:`(N, S^*)`.
tf.Tensor: Step hidden state, with shape :math:`(N, S^*)`.
tf.Tensor: Initial hidden state , used to reset the hidden state
when policy resets. Shape: :math:`(S^*)`.
|
build
|
python
|
rlworkgroup/garage
|
src/garage/tf/policies/categorical_gru_policy.py
|
https://github.com/rlworkgroup/garage/blob/master/src/garage/tf/policies/categorical_gru_policy.py
|
MIT
|
def reset(self, do_resets=None):
"""Reset the policy.
Note:
If `do_resets` is None, it will be by default np.array([True]),
which implies the policy will not be "vectorized", i.e. number of
paralle environments for training data sampling = 1.
Args:
do_resets (numpy.ndarray): Bool that indicates terminal state(s).
"""
if do_resets is None:
do_resets = [True]
do_resets = np.asarray(do_resets)
if self._prev_actions is None or len(do_resets) != len(
self._prev_actions):
self._prev_actions = np.zeros(
(len(do_resets), self.action_space.flat_dim))
self._prev_hiddens = np.zeros((len(do_resets), self._hidden_dim))
self._prev_actions[do_resets] = 0.
self._prev_hiddens[do_resets] = self._init_hidden.eval()
|
Reset the policy.
Note:
If `do_resets` is None, it will be by default np.array([True]),
which implies the policy will not be "vectorized", i.e. number of
paralle environments for training data sampling = 1.
Args:
do_resets (numpy.ndarray): Bool that indicates terminal state(s).
|
reset
|
python
|
rlworkgroup/garage
|
src/garage/tf/policies/categorical_gru_policy.py
|
https://github.com/rlworkgroup/garage/blob/master/src/garage/tf/policies/categorical_gru_policy.py
|
MIT
|
def get_action(self, observation):
"""Return a single action.
Args:
observation (numpy.ndarray): Observations.
Returns:
int: Action given input observation.
dict(numpy.ndarray): Distribution parameters.
"""
actions, agent_infos = self.get_actions([observation])
return actions[0], {k: v[0] for k, v in agent_infos.items()}
|
Return a single action.
Args:
observation (numpy.ndarray): Observations.
Returns:
int: Action given input observation.
dict(numpy.ndarray): Distribution parameters.
|
get_action
|
python
|
rlworkgroup/garage
|
src/garage/tf/policies/categorical_gru_policy.py
|
https://github.com/rlworkgroup/garage/blob/master/src/garage/tf/policies/categorical_gru_policy.py
|
MIT
|
def get_actions(self, observations):
"""Return multiple actions.
Args:
observations (numpy.ndarray): Observations.
Returns:
list[int]: Actions given input observations.
dict(numpy.ndarray): Distribution parameters.
"""
if not isinstance(observations[0],
np.ndarray) or len(observations[0].shape) > 1:
observations = self.observation_space.flatten_n(observations)
if self._state_include_action:
assert self._prev_actions is not None
all_input = np.concatenate([observations, self._prev_actions],
axis=-1)
else:
all_input = observations
probs, hidden_vec = self._f_step_prob(all_input, self._prev_hiddens)
actions = list(map(self.action_space.weighted_sample, probs))
prev_actions = self._prev_actions
self._prev_actions = self.action_space.flatten_n(actions)
self._prev_hiddens = hidden_vec
agent_info = dict(prob=probs)
if self._state_include_action:
agent_info['prev_action'] = np.copy(prev_actions)
return actions, agent_info
|
Return multiple actions.
Args:
observations (numpy.ndarray): Observations.
Returns:
list[int]: Actions given input observations.
dict(numpy.ndarray): Distribution parameters.
|
get_actions
|
python
|
rlworkgroup/garage
|
src/garage/tf/policies/categorical_gru_policy.py
|
https://github.com/rlworkgroup/garage/blob/master/src/garage/tf/policies/categorical_gru_policy.py
|
MIT
|
def state_info_specs(self):
"""State info specifcation.
Returns:
List[str]: keys and shapes for the information related to the
policy's state when taking an action.
"""
if self._state_include_action:
return [
('prev_action', (self._action_dim, )),
]
return []
|
State info specifcation.
Returns:
List[str]: keys and shapes for the information related to the
policy's state when taking an action.
|
state_info_specs
|
python
|
rlworkgroup/garage
|
src/garage/tf/policies/categorical_gru_policy.py
|
https://github.com/rlworkgroup/garage/blob/master/src/garage/tf/policies/categorical_gru_policy.py
|
MIT
|
def __getstate__(self):
"""Object.__getstate__.
Returns:
dict: the state to be pickled for the instance.
"""
new_dict = super().__getstate__()
del new_dict['_f_step_prob']
del new_dict['_init_hidden']
return new_dict
|
Object.__getstate__.
Returns:
dict: the state to be pickled for the instance.
|
__getstate__
|
python
|
rlworkgroup/garage
|
src/garage/tf/policies/categorical_gru_policy.py
|
https://github.com/rlworkgroup/garage/blob/master/src/garage/tf/policies/categorical_gru_policy.py
|
MIT
|
def build(self, state_input, name=None):
"""Build policy.
Args:
state_input (tf.Tensor) : State input.
name (str): Name of the policy, which is also the name scope.
Returns:
tfp.distributions.OneHotCategorical: Policy distribution.
tf.Tensor: Step output, with shape :math:`(N, S^*)`
tf.Tensor: Step hidden state, with shape :math:`(N, S^*)`
tf.Tensor: Step cell state, with shape :math:`(N, S^*)`
tf.Tensor: Initial hidden state, used to reset the hidden state
when policy resets. Shape: :math:`(S^*)`
tf.Tensor: Initial cell state, used to reset the cell state
when policy resets. Shape: :math:`(S^*)`
"""
_, step_input, step_hidden, step_cell = self.inputs
return super().build(state_input,
step_input,
step_hidden,
step_cell,
name=name)
|
Build policy.
Args:
state_input (tf.Tensor) : State input.
name (str): Name of the policy, which is also the name scope.
Returns:
tfp.distributions.OneHotCategorical: Policy distribution.
tf.Tensor: Step output, with shape :math:`(N, S^*)`
tf.Tensor: Step hidden state, with shape :math:`(N, S^*)`
tf.Tensor: Step cell state, with shape :math:`(N, S^*)`
tf.Tensor: Initial hidden state, used to reset the hidden state
when policy resets. Shape: :math:`(S^*)`
tf.Tensor: Initial cell state, used to reset the cell state
when policy resets. Shape: :math:`(S^*)`
|
build
|
python
|
rlworkgroup/garage
|
src/garage/tf/policies/categorical_lstm_policy.py
|
https://github.com/rlworkgroup/garage/blob/master/src/garage/tf/policies/categorical_lstm_policy.py
|
MIT
|
def reset(self, do_resets=None):
"""Reset the policy.
Note:
If `do_resets` is None, it will be by default np.array([True]),
which implies the policy will not be "vectorized", i.e. number of
paralle environments for training data sampling = 1.
Args:
do_resets (numpy.ndarray): Bool that indicates terminal state(s).
"""
if do_resets is None:
do_resets = [True]
do_resets = np.asarray(do_resets)
if self._prev_actions is None or len(do_resets) != len(
self._prev_actions):
self._prev_actions = np.zeros(
(len(do_resets), self.action_space.flat_dim))
self._prev_hiddens = np.zeros((len(do_resets), self._hidden_dim))
self._prev_cells = np.zeros((len(do_resets), self._hidden_dim))
self._prev_actions[do_resets] = 0.
self._prev_hiddens[do_resets] = self._init_hidden.eval()
self._prev_cells[do_resets] = self._init_cell.eval()
|
Reset the policy.
Note:
If `do_resets` is None, it will be by default np.array([True]),
which implies the policy will not be "vectorized", i.e. number of
paralle environments for training data sampling = 1.
Args:
do_resets (numpy.ndarray): Bool that indicates terminal state(s).
|
reset
|
python
|
rlworkgroup/garage
|
src/garage/tf/policies/categorical_lstm_policy.py
|
https://github.com/rlworkgroup/garage/blob/master/src/garage/tf/policies/categorical_lstm_policy.py
|
MIT
|
def get_action(self, observation):
"""Return a single action.
Args:
observation (numpy.ndarray): Observations.
Returns:
int: Action given input observation.
dict(numpy.ndarray): Distribution parameters.
"""
actions, agent_infos = self.get_actions([observation])
return actions[0], {k: v[0] for k, v in agent_infos.items()}
|
Return a single action.
Args:
observation (numpy.ndarray): Observations.
Returns:
int: Action given input observation.
dict(numpy.ndarray): Distribution parameters.
|
get_action
|
python
|
rlworkgroup/garage
|
src/garage/tf/policies/categorical_lstm_policy.py
|
https://github.com/rlworkgroup/garage/blob/master/src/garage/tf/policies/categorical_lstm_policy.py
|
MIT
|
def get_actions(self, observations):
"""Return multiple actions.
Args:
observations (numpy.ndarray): Observations.
Returns:
list[int]: Actions given input observations.
dict(numpy.ndarray): Distribution parameters.
"""
if not isinstance(observations[0],
np.ndarray) or len(observations[0].shape) > 1:
observations = self.observation_space.flatten_n(observations)
if self._state_include_action:
assert self._prev_actions is not None
all_input = np.concatenate([observations, self._prev_actions],
axis=-1)
else:
all_input = observations
probs, hidden_vec, cell_vec = self._f_step_prob(
all_input, self._prev_hiddens, self._prev_cells)
actions = list(map(self.action_space.weighted_sample, probs))
prev_actions = self._prev_actions
self._prev_actions = self.action_space.flatten_n(actions)
self._prev_hiddens = hidden_vec
self._prev_cells = cell_vec
agent_info = dict(prob=probs)
if self._state_include_action:
agent_info['prev_action'] = np.copy(prev_actions)
return actions, agent_info
|
Return multiple actions.
Args:
observations (numpy.ndarray): Observations.
Returns:
list[int]: Actions given input observations.
dict(numpy.ndarray): Distribution parameters.
|
get_actions
|
python
|
rlworkgroup/garage
|
src/garage/tf/policies/categorical_lstm_policy.py
|
https://github.com/rlworkgroup/garage/blob/master/src/garage/tf/policies/categorical_lstm_policy.py
|
MIT
|
def state_info_specs(self):
"""State info specifcation.
Returns:
List[str]: keys and shapes for the information related to the
policy's state when taking an action.
"""
if self._state_include_action:
return [
('prev_action', (self._action_dim, )),
]
return []
|
State info specifcation.
Returns:
List[str]: keys and shapes for the information related to the
policy's state when taking an action.
|
state_info_specs
|
python
|
rlworkgroup/garage
|
src/garage/tf/policies/categorical_lstm_policy.py
|
https://github.com/rlworkgroup/garage/blob/master/src/garage/tf/policies/categorical_lstm_policy.py
|
MIT
|
def __getstate__(self):
"""Object.__getstate__.
Returns:
dict: the state to be pickled for the instance.
"""
new_dict = super().__getstate__()
del new_dict['_f_step_prob']
del new_dict['_init_hidden']
del new_dict['_init_cell']
return new_dict
|
Object.__getstate__.
Returns:
dict: the state to be pickled for the instance.
|
__getstate__
|
python
|
rlworkgroup/garage
|
src/garage/tf/policies/categorical_lstm_policy.py
|
https://github.com/rlworkgroup/garage/blob/master/src/garage/tf/policies/categorical_lstm_policy.py
|
MIT
|
def get_action(self, observation):
"""Return a single action.
Args:
observation (numpy.ndarray): Observations.
Returns:
int: Action given input observation.
dict(numpy.ndarray): Distribution parameters.
"""
actions, agent_infos = self.get_actions([observation])
return actions, {k: v[0] for k, v in agent_infos.items()}
|
Return a single action.
Args:
observation (numpy.ndarray): Observations.
Returns:
int: Action given input observation.
dict(numpy.ndarray): Distribution parameters.
|
get_action
|
python
|
rlworkgroup/garage
|
src/garage/tf/policies/categorical_mlp_policy.py
|
https://github.com/rlworkgroup/garage/blob/master/src/garage/tf/policies/categorical_mlp_policy.py
|
MIT
|
def get_actions(self, observations):
"""Return multiple actions.
Args:
observations (numpy.ndarray): Observations.
Returns:
list[int]: Actions given input observations.
dict(numpy.ndarray): Distribution parameters.
"""
if not isinstance(observations[0],
np.ndarray) or len(observations[0].shape) > 1:
observations = self.observation_space.flatten_n(observations)
samples, probs = self._f_prob(np.expand_dims(observations, 1))
return np.squeeze(samples), dict(prob=np.squeeze(probs, axis=1))
|
Return multiple actions.
Args:
observations (numpy.ndarray): Observations.
Returns:
list[int]: Actions given input observations.
dict(numpy.ndarray): Distribution parameters.
|
get_actions
|
python
|
rlworkgroup/garage
|
src/garage/tf/policies/categorical_mlp_policy.py
|
https://github.com/rlworkgroup/garage/blob/master/src/garage/tf/policies/categorical_mlp_policy.py
|
MIT
|
def get_regularizable_vars(self):
"""Get regularizable weight variables under the Policy scope.
Returns:
list[tf.Tensor]: Trainable variables.
"""
trainable = self.get_trainable_vars()
return [
var for var in trainable
if 'hidden' in var.name and 'kernel' in var.name
]
|
Get regularizable weight variables under the Policy scope.
Returns:
list[tf.Tensor]: Trainable variables.
|
get_regularizable_vars
|
python
|
rlworkgroup/garage
|
src/garage/tf/policies/categorical_mlp_policy.py
|
https://github.com/rlworkgroup/garage/blob/master/src/garage/tf/policies/categorical_mlp_policy.py
|
MIT
|
def get_action(self, observation):
"""Get single action from this policy for the input observation.
Args:
observation (numpy.ndarray): Observation from environment.
Returns:
numpy.ndarray: Predicted action.
dict: Empty dict since this policy does not model a distribution.
"""
actions, agent_infos = self.get_actions([observation])
action = actions[0]
return action, {k: v[0] for k, v in agent_infos.items()}
|
Get single action from this policy for the input observation.
Args:
observation (numpy.ndarray): Observation from environment.
Returns:
numpy.ndarray: Predicted action.
dict: Empty dict since this policy does not model a distribution.
|
get_action
|
python
|
rlworkgroup/garage
|
src/garage/tf/policies/continuous_mlp_policy.py
|
https://github.com/rlworkgroup/garage/blob/master/src/garage/tf/policies/continuous_mlp_policy.py
|
MIT
|
def get_actions(self, observations):
"""Get multiple actions from this policy for the input observations.
Args:
observations (numpy.ndarray): Observations from environment.
Returns:
numpy.ndarray: Predicted actions.
dict: Empty dict since this policy does not model a distribution.
"""
if not isinstance(observations[0],
np.ndarray) or len(observations[0].shape) > 1:
observations = self.observation_space.flatten_n(observations)
actions = self._f_prob(observations)
actions = self.action_space.unflatten_n(actions)
return actions, dict()
|
Get multiple actions from this policy for the input observations.
Args:
observations (numpy.ndarray): Observations from environment.
Returns:
numpy.ndarray: Predicted actions.
dict: Empty dict since this policy does not model a distribution.
|
get_actions
|
python
|
rlworkgroup/garage
|
src/garage/tf/policies/continuous_mlp_policy.py
|
https://github.com/rlworkgroup/garage/blob/master/src/garage/tf/policies/continuous_mlp_policy.py
|
MIT
|
def get_regularizable_vars(self):
"""Get regularizable weight variables under the Policy scope.
Returns:
list(tf.Variable): List of regularizable variables.
"""
trainable = self.get_trainable_vars()
return [
var for var in trainable
if 'hidden' in var.name and 'kernel' in var.name
]
|
Get regularizable weight variables under the Policy scope.
Returns:
list(tf.Variable): List of regularizable variables.
|
get_regularizable_vars
|
python
|
rlworkgroup/garage
|
src/garage/tf/policies/continuous_mlp_policy.py
|
https://github.com/rlworkgroup/garage/blob/master/src/garage/tf/policies/continuous_mlp_policy.py
|
MIT
|
def __getstate__(self):
"""Object.__getstate__.
Returns:
dict: the state to be pickled as the contents for the instance.
"""
new_dict = super().__getstate__()
del new_dict['_f_prob']
return new_dict
|
Object.__getstate__.
Returns:
dict: the state to be pickled as the contents for the instance.
|
__getstate__
|
python
|
rlworkgroup/garage
|
src/garage/tf/policies/continuous_mlp_policy.py
|
https://github.com/rlworkgroup/garage/blob/master/src/garage/tf/policies/continuous_mlp_policy.py
|
MIT
|
def get_action(self, observation):
"""Get action from this policy for the input observation.
Args:
observation (numpy.ndarray): Observation from environment.
Returns:
numpy.ndarray: Single optimal action from this policy.
dict: Predicted action and agent information. It returns an empty
dict since there is no parameterization.
"""
opt_actions, agent_infos = self.get_actions([observation])
return opt_actions[0], {k: v[0] for k, v in agent_infos.items()}
|
Get action from this policy for the input observation.
Args:
observation (numpy.ndarray): Observation from environment.
Returns:
numpy.ndarray: Single optimal action from this policy.
dict: Predicted action and agent information. It returns an empty
dict since there is no parameterization.
|
get_action
|
python
|
rlworkgroup/garage
|
src/garage/tf/policies/discrete_qf_argmax_policy.py
|
https://github.com/rlworkgroup/garage/blob/master/src/garage/tf/policies/discrete_qf_argmax_policy.py
|
MIT
|
def get_actions(self, observations):
"""Get actions from this policy for the input observations.
Args:
observations (numpy.ndarray): Observations from environment.
Returns:
numpy.ndarray: Optimal actions from this policy.
dict: Predicted action and agent information. It returns an empty
dict since there is no parameterization.
"""
if isinstance(self.env_spec.observation_space, akro.Image) and \
len(observations[0].shape) < \
len(self.env_spec.observation_space.shape):
observations = self.env_spec.observation_space.unflatten_n(
observations)
q_vals = self._f_qval(observations)
opt_actions = np.argmax(q_vals, axis=1)
return opt_actions, dict()
|
Get actions from this policy for the input observations.
Args:
observations (numpy.ndarray): Observations from environment.
Returns:
numpy.ndarray: Optimal actions from this policy.
dict: Predicted action and agent information. It returns an empty
dict since there is no parameterization.
|
get_actions
|
python
|
rlworkgroup/garage
|
src/garage/tf/policies/discrete_qf_argmax_policy.py
|
https://github.com/rlworkgroup/garage/blob/master/src/garage/tf/policies/discrete_qf_argmax_policy.py
|
MIT
|
def __getstate__(self):
"""Object.__getstate__.
Returns:
dict: the state to be pickled for the instance.
"""
new_dict = super().__getstate__()
del new_dict['_f_qval']
return new_dict
|
Object.__getstate__.
Returns:
dict: the state to be pickled for the instance.
|
__getstate__
|
python
|
rlworkgroup/garage
|
src/garage/tf/policies/discrete_qf_argmax_policy.py
|
https://github.com/rlworkgroup/garage/blob/master/src/garage/tf/policies/discrete_qf_argmax_policy.py
|
MIT
|
def build(self, state_input, name=None):
"""Build policy.
Args:
state_input (tf.Tensor) : State input.
name (str): Name of the policy, which is also the name scope.
Returns:
tfp.distributions.MultivariateNormalDiag: Policy distribution.
tf.Tensor: Step means, with shape :math:`(N, S^*)`.
tf.Tensor: Step log std, with shape :math:`(N, S^*)`.
tf.Tensor: Step hidden state, with shape :math:`(N, S^*)`.
tf.Tensor: Initial hidden state, with shape :math:`(S^*)`.
"""
_, step_input_var, step_hidden_var = self.inputs
return super().build(state_input,
step_input_var,
step_hidden_var,
name=name)
|
Build policy.
Args:
state_input (tf.Tensor) : State input.
name (str): Name of the policy, which is also the name scope.
Returns:
tfp.distributions.MultivariateNormalDiag: Policy distribution.
tf.Tensor: Step means, with shape :math:`(N, S^*)`.
tf.Tensor: Step log std, with shape :math:`(N, S^*)`.
tf.Tensor: Step hidden state, with shape :math:`(N, S^*)`.
tf.Tensor: Initial hidden state, with shape :math:`(S^*)`.
|
build
|
python
|
rlworkgroup/garage
|
src/garage/tf/policies/gaussian_gru_policy.py
|
https://github.com/rlworkgroup/garage/blob/master/src/garage/tf/policies/gaussian_gru_policy.py
|
MIT
|
def reset(self, do_resets=None):
"""Reset the policy.
Note:
If `do_resets` is None, it will be by default `np.array([True])`
which implies the policy will not be "vectorized", i.e. number of
parallel environments for training data sampling = 1.
Args:
do_resets (numpy.ndarray): Bool that indicates terminal state(s).
"""
if do_resets is None:
do_resets = np.array([True])
if self._prev_actions is None or len(do_resets) != len(
self._prev_actions):
self._prev_actions = np.zeros(
(len(do_resets), self.action_space.flat_dim))
self._prev_hiddens = np.zeros((len(do_resets), self._hidden_dim))
self._prev_actions[do_resets] = 0.
self._prev_hiddens[do_resets] = self._init_hidden.eval()
|
Reset the policy.
Note:
If `do_resets` is None, it will be by default `np.array([True])`
which implies the policy will not be "vectorized", i.e. number of
parallel environments for training data sampling = 1.
Args:
do_resets (numpy.ndarray): Bool that indicates terminal state(s).
|
reset
|
python
|
rlworkgroup/garage
|
src/garage/tf/policies/gaussian_gru_policy.py
|
https://github.com/rlworkgroup/garage/blob/master/src/garage/tf/policies/gaussian_gru_policy.py
|
MIT
|
def get_action(self, observation):
"""Get single action from this policy for the input observation.
Args:
observation (numpy.ndarray): Observation from environment.
Returns:
numpy.ndarray: Actions
dict: Predicted action and agent information.
Note:
It returns an action and a dict, with keys
- mean (numpy.ndarray): Mean of the distribution.
- log_std (numpy.ndarray): Log standard deviation of the
distribution.
- prev_action (numpy.ndarray): Previous action, only present if
self._state_include_action is True.
"""
actions, agent_infos = self.get_actions([observation])
return actions[0], {k: v[0] for k, v in agent_infos.items()}
|
Get single action from this policy for the input observation.
Args:
observation (numpy.ndarray): Observation from environment.
Returns:
numpy.ndarray: Actions
dict: Predicted action and agent information.
Note:
It returns an action and a dict, with keys
- mean (numpy.ndarray): Mean of the distribution.
- log_std (numpy.ndarray): Log standard deviation of the
distribution.
- prev_action (numpy.ndarray): Previous action, only present if
self._state_include_action is True.
|
get_action
|
python
|
rlworkgroup/garage
|
src/garage/tf/policies/gaussian_gru_policy.py
|
https://github.com/rlworkgroup/garage/blob/master/src/garage/tf/policies/gaussian_gru_policy.py
|
MIT
|
def get_actions(self, observations):
"""Get multiple actions from this policy for the input observations.
Args:
observations (numpy.ndarray): Observations from environment.
Returns:
numpy.ndarray: Actions
dict: Predicted action and agent information.
Note:
It returns an action and a dict, with keys
- mean (numpy.ndarray): Means of the distribution.
- log_std (numpy.ndarray): Log standard deviations of the
distribution.
- prev_action (numpy.ndarray): Previous action, only present if
self._state_include_action is True.
"""
if not isinstance(observations[0],
np.ndarray) or len(observations[0].shape) > 1:
observations = self.observation_space.flatten_n(observations)
if self._state_include_action:
assert self._prev_actions is not None
all_input = np.concatenate([observations, self._prev_actions],
axis=-1)
else:
all_input = observations
means, log_stds, hidden_vec = self._f_step_mean_std(
all_input, self._prev_hiddens)
rnd = np.random.normal(size=means.shape)
samples = rnd * np.exp(log_stds) + means
samples = self.action_space.unflatten_n(samples)
prev_actions = self._prev_actions
self._prev_actions = samples
self._prev_hiddens = hidden_vec
agent_infos = dict(mean=means, log_std=log_stds)
if self._state_include_action:
agent_infos['prev_action'] = np.copy(prev_actions)
return samples, agent_infos
|
Get multiple actions from this policy for the input observations.
Args:
observations (numpy.ndarray): Observations from environment.
Returns:
numpy.ndarray: Actions
dict: Predicted action and agent information.
Note:
It returns an action and a dict, with keys
- mean (numpy.ndarray): Means of the distribution.
- log_std (numpy.ndarray): Log standard deviations of the
distribution.
- prev_action (numpy.ndarray): Previous action, only present if
self._state_include_action is True.
|
get_actions
|
python
|
rlworkgroup/garage
|
src/garage/tf/policies/gaussian_gru_policy.py
|
https://github.com/rlworkgroup/garage/blob/master/src/garage/tf/policies/gaussian_gru_policy.py
|
MIT
|
def state_info_specs(self):
"""State info specifcation.
Returns:
List[str]: keys and shapes for the information related to the
policy's state when taking an action.
"""
if self._state_include_action:
return [
('prev_action', (self._action_dim, )),
]
return []
|
State info specifcation.
Returns:
List[str]: keys and shapes for the information related to the
policy's state when taking an action.
|
state_info_specs
|
python
|
rlworkgroup/garage
|
src/garage/tf/policies/gaussian_gru_policy.py
|
https://github.com/rlworkgroup/garage/blob/master/src/garage/tf/policies/gaussian_gru_policy.py
|
MIT
|
def __getstate__(self):
"""Object.__getstate__.
Returns:
dict: the state to be pickled for the instance.
"""
new_dict = super().__getstate__()
del new_dict['_f_step_mean_std']
del new_dict['_init_hidden']
return new_dict
|
Object.__getstate__.
Returns:
dict: the state to be pickled for the instance.
|
__getstate__
|
python
|
rlworkgroup/garage
|
src/garage/tf/policies/gaussian_gru_policy.py
|
https://github.com/rlworkgroup/garage/blob/master/src/garage/tf/policies/gaussian_gru_policy.py
|
MIT
|
def build(self, state_input, name=None):
"""Build policy.
Args:
state_input (tf.Tensor) : State input.
name (str): Name of the policy, which is also the name scope.
Returns:
tfp.distributions.MultivariateNormalDiag: Policy distribution.
tf.Tensor: Step means, with shape :math:`(N, S^*)`.
tf.Tensor: Step log std, with shape :math:`(N, S^*)`.
tf.Tensor: Step hidden state, with shape :math:`(N, S^*)`.
tf.Tensor: Step cell state, with shape :math:`(N, S^*)`.
tf.Tensor: Initial hidden state, with shape :math:`(S^*)`.
tf.Tensor: Initial cell state, with shape :math:`(S^*)`
"""
_, step_input, step_hidden, step_cell = self.inputs
return super().build(state_input,
step_input,
step_hidden,
step_cell,
name=name)
|
Build policy.
Args:
state_input (tf.Tensor) : State input.
name (str): Name of the policy, which is also the name scope.
Returns:
tfp.distributions.MultivariateNormalDiag: Policy distribution.
tf.Tensor: Step means, with shape :math:`(N, S^*)`.
tf.Tensor: Step log std, with shape :math:`(N, S^*)`.
tf.Tensor: Step hidden state, with shape :math:`(N, S^*)`.
tf.Tensor: Step cell state, with shape :math:`(N, S^*)`.
tf.Tensor: Initial hidden state, with shape :math:`(S^*)`.
tf.Tensor: Initial cell state, with shape :math:`(S^*)`
|
build
|
python
|
rlworkgroup/garage
|
src/garage/tf/policies/gaussian_lstm_policy.py
|
https://github.com/rlworkgroup/garage/blob/master/src/garage/tf/policies/gaussian_lstm_policy.py
|
MIT
|
def reset(self, do_resets=None):
"""Reset the policy.
Note:
If `do_resets` is None, it will be by default np.array([True]),
which implies the policy will not be "vectorized", i.e. number of
paralle environments for training data sampling = 1.
Args:
do_resets (numpy.ndarray): Bool that indicates terminal state(s).
"""
if do_resets is None:
do_resets = np.array([True])
if self._prev_actions is None or len(do_resets) != len(
self._prev_actions):
self._prev_actions = np.zeros(
(len(do_resets), self.action_space.flat_dim))
self._prev_hiddens = np.zeros((len(do_resets), self._hidden_dim))
self._prev_cells = np.zeros((len(do_resets), self._hidden_dim))
self._prev_actions[do_resets] = 0.
self._prev_hiddens[do_resets] = self._init_hidden.eval()
self._prev_cells[do_resets] = self._init_cell.eval()
|
Reset the policy.
Note:
If `do_resets` is None, it will be by default np.array([True]),
which implies the policy will not be "vectorized", i.e. number of
paralle environments for training data sampling = 1.
Args:
do_resets (numpy.ndarray): Bool that indicates terminal state(s).
|
reset
|
python
|
rlworkgroup/garage
|
src/garage/tf/policies/gaussian_lstm_policy.py
|
https://github.com/rlworkgroup/garage/blob/master/src/garage/tf/policies/gaussian_lstm_policy.py
|
MIT
|
def get_action(self, observation):
"""Get single action from this policy for the input observation.
Args:
observation (numpy.ndarray): Observation from environment.
Returns:
numpy.ndarray: Actions
dict: Predicted action and agent information.
Note:
It returns an action and a dict, with keys
- mean (numpy.ndarray): Mean of the distribution.
- log_std (numpy.ndarray): Log standard deviation of the
distribution.
- prev_action (numpy.ndarray): Previous action, only present if
self._state_include_action is True.
"""
actions, agent_infos = self.get_actions([observation])
return actions[0], {k: v[0] for k, v in agent_infos.items()}
|
Get single action from this policy for the input observation.
Args:
observation (numpy.ndarray): Observation from environment.
Returns:
numpy.ndarray: Actions
dict: Predicted action and agent information.
Note:
It returns an action and a dict, with keys
- mean (numpy.ndarray): Mean of the distribution.
- log_std (numpy.ndarray): Log standard deviation of the
distribution.
- prev_action (numpy.ndarray): Previous action, only present if
self._state_include_action is True.
|
get_action
|
python
|
rlworkgroup/garage
|
src/garage/tf/policies/gaussian_lstm_policy.py
|
https://github.com/rlworkgroup/garage/blob/master/src/garage/tf/policies/gaussian_lstm_policy.py
|
MIT
|
def get_actions(self, observations):
"""Get multiple actions from this policy for the input observations.
Args:
observations (numpy.ndarray): Observations from environment.
Returns:
numpy.ndarray: Actions
dict: Predicted action and agent information.
Note:
It returns an action and a dict, with keys
- mean (numpy.ndarray): Means of the distribution.
- log_std (numpy.ndarray): Log standard deviations of the
distribution.
- prev_action (numpy.ndarray): Previous action, only present if
self._state_include_action is True.
"""
if not isinstance(observations[0],
np.ndarray) or len(observations[0].shape) > 1:
observations = self.observation_space.flatten_n(observations)
if self._state_include_action:
assert self._prev_actions is not None
all_input = np.concatenate([observations, self._prev_actions],
axis=-1)
else:
all_input = observations
means, log_stds, hidden_vec, cell_vec = self._f_step_mean_std(
all_input, self._prev_hiddens, self._prev_cells)
rnd = np.random.normal(size=means.shape)
samples = rnd * np.exp(log_stds) + means
samples = self.action_space.unflatten_n(samples)
prev_actions = self._prev_actions
self._prev_actions = samples
self._prev_hiddens = hidden_vec
self._prev_cells = cell_vec
agent_infos = dict(mean=means, log_std=log_stds)
if self._state_include_action:
agent_infos['prev_action'] = np.copy(prev_actions)
return samples, agent_infos
|
Get multiple actions from this policy for the input observations.
Args:
observations (numpy.ndarray): Observations from environment.
Returns:
numpy.ndarray: Actions
dict: Predicted action and agent information.
Note:
It returns an action and a dict, with keys
- mean (numpy.ndarray): Means of the distribution.
- log_std (numpy.ndarray): Log standard deviations of the
distribution.
- prev_action (numpy.ndarray): Previous action, only present if
self._state_include_action is True.
|
get_actions
|
python
|
rlworkgroup/garage
|
src/garage/tf/policies/gaussian_lstm_policy.py
|
https://github.com/rlworkgroup/garage/blob/master/src/garage/tf/policies/gaussian_lstm_policy.py
|
MIT
|
def state_info_specs(self):
"""State info specifcation.
Returns:
List[str]: keys and shapes for the information related to the
policy's state when taking an action.
"""
if self._state_include_action:
return [
('prev_action', (self._action_dim, )),
]
return []
|
State info specifcation.
Returns:
List[str]: keys and shapes for the information related to the
policy's state when taking an action.
|
state_info_specs
|
python
|
rlworkgroup/garage
|
src/garage/tf/policies/gaussian_lstm_policy.py
|
https://github.com/rlworkgroup/garage/blob/master/src/garage/tf/policies/gaussian_lstm_policy.py
|
MIT
|
def __getstate__(self):
"""Object.__getstate__.
Returns:
dict: the state to be pickled for the instance.
"""
new_dict = super().__getstate__()
del new_dict['_f_step_mean_std']
del new_dict['_init_hidden']
del new_dict['_init_cell']
return new_dict
|
Object.__getstate__.
Returns:
dict: the state to be pickled for the instance.
|
__getstate__
|
python
|
rlworkgroup/garage
|
src/garage/tf/policies/gaussian_lstm_policy.py
|
https://github.com/rlworkgroup/garage/blob/master/src/garage/tf/policies/gaussian_lstm_policy.py
|
MIT
|
def get_action(self, observation):
"""Get single action from this policy for the input observation.
Args:
observation (numpy.ndarray): Observation from environment.
Returns:
numpy.ndarray: Actions
dict: Predicted action and agent information.
Note:
It returns an action and a dict, with keys
- mean (numpy.ndarray): Mean of the distribution.
- log_std (numpy.ndarray): Log standard deviation of the
distribution.
"""
actions, agent_infos = self.get_actions([observation])
return actions[0], {k: v[0] for k, v in agent_infos.items()}
|
Get single action from this policy for the input observation.
Args:
observation (numpy.ndarray): Observation from environment.
Returns:
numpy.ndarray: Actions
dict: Predicted action and agent information.
Note:
It returns an action and a dict, with keys
- mean (numpy.ndarray): Mean of the distribution.
- log_std (numpy.ndarray): Log standard deviation of the
distribution.
|
get_action
|
python
|
rlworkgroup/garage
|
src/garage/tf/policies/gaussian_mlp_policy.py
|
https://github.com/rlworkgroup/garage/blob/master/src/garage/tf/policies/gaussian_mlp_policy.py
|
MIT
|
def get_actions(self, observations):
"""Get multiple actions from this policy for the input observations.
Args:
observations (numpy.ndarray): Observations from environment.
Returns:
numpy.ndarray: Actions
dict: Predicted action and agent information.
Note:
It returns actions and a dict, with keys
- mean (numpy.ndarray): Means of the distribution.
- log_std (numpy.ndarray): Log standard deviations of the
distribution.
"""
if not isinstance(observations[0],
np.ndarray) or len(observations[0].shape) > 1:
observations = self.observation_space.flatten_n(observations)
samples, means, log_stds = self._f_dist(np.expand_dims(
observations, 1))
samples = self.action_space.unflatten_n(np.squeeze(samples, 1))
means = self.action_space.unflatten_n(np.squeeze(means, 1))
log_stds = self.action_space.unflatten_n(np.squeeze(log_stds, 1))
return samples, dict(mean=means, log_std=log_stds)
|
Get multiple actions from this policy for the input observations.
Args:
observations (numpy.ndarray): Observations from environment.
Returns:
numpy.ndarray: Actions
dict: Predicted action and agent information.
Note:
It returns actions and a dict, with keys
- mean (numpy.ndarray): Means of the distribution.
- log_std (numpy.ndarray): Log standard deviations of the
distribution.
|
get_actions
|
python
|
rlworkgroup/garage
|
src/garage/tf/policies/gaussian_mlp_policy.py
|
https://github.com/rlworkgroup/garage/blob/master/src/garage/tf/policies/gaussian_mlp_policy.py
|
MIT
|
def __getstate__(self):
"""Object.__getstate__.
Returns:
dict: the state to be pickled for the instance.
"""
new_dict = super().__getstate__()
del new_dict['_f_dist']
return new_dict
|
Object.__getstate__.
Returns:
dict: the state to be pickled for the instance.
|
__getstate__
|
python
|
rlworkgroup/garage
|
src/garage/tf/policies/gaussian_mlp_policy.py
|
https://github.com/rlworkgroup/garage/blob/master/src/garage/tf/policies/gaussian_mlp_policy.py
|
MIT
|
def _initialize(self):
"""Build policy to support sampling.
After build, get_action_*() methods will be available.
"""
obs_input = tf.compat.v1.placeholder(tf.float32,
shape=(None, None, self.obs_dim))
encoder_input = tf.compat.v1.placeholder(
tf.float32, shape=(None, None, self._encoder.input_dim))
latent_input = tf.compat.v1.placeholder(
tf.float32, shape=(None, None, self._encoder.output_dim))
with tf.compat.v1.variable_scope(self._encoder.name):
encoder_dist = self._encoder.build(encoder_input,
name='encoder').dist
with tf.compat.v1.variable_scope('concat_obs_latent'):
obs_latent_input = tf.concat([obs_input, latent_input], -1)
dist, mean_var, log_std_var = super().build(
obs_latent_input,
# Must named 'default' to
# compensate tf default worker
name='default').outputs
embed_state_input = tf.concat([
obs_input,
encoder_dist.sample(seed=deterministic.get_tf_seed_stream())
], -1)
dist_given_task, mean_g_t, log_std_g_t = super().build(
embed_state_input, name='given_task').outputs
self._f_dist_obs_latent = tf.compat.v1.get_default_session(
).make_callable([
dist.sample(seed=deterministic.get_tf_seed_stream()), mean_var,
log_std_var
],
feed_list=[obs_input, latent_input])
self._f_dist_obs_task = tf.compat.v1.get_default_session(
).make_callable([
dist_given_task.sample(seed=deterministic.get_tf_seed_stream()),
mean_g_t, log_std_g_t
],
feed_list=[obs_input, encoder_input])
|
Build policy to support sampling.
After build, get_action_*() methods will be available.
|
_initialize
|
python
|
rlworkgroup/garage
|
src/garage/tf/policies/gaussian_mlp_task_embedding_policy.py
|
https://github.com/rlworkgroup/garage/blob/master/src/garage/tf/policies/gaussian_mlp_task_embedding_policy.py
|
MIT
|
def build(self, obs_input, task_input, name=None):
"""Build policy.
Args:
obs_input (tf.Tensor): Observation input.
task_input (tf.Tensor): One-hot task id input.
name (str): Name of the model, which is also the name scope.
Returns:
namedtuple: Policy network.
namedtuple: Encoder network.
"""
name = name or 'additional'
# Encoder should be outside policy scope
with tf.compat.v1.variable_scope(self._encoder.name):
enc_net = self._encoder.build(task_input, name=name)
latent_var = enc_net.dist.loc
embed_state_input = tf.concat([obs_input, latent_var], -1)
return super().build(embed_state_input, name=name), enc_net
|
Build policy.
Args:
obs_input (tf.Tensor): Observation input.
task_input (tf.Tensor): One-hot task id input.
name (str): Name of the model, which is also the name scope.
Returns:
namedtuple: Policy network.
namedtuple: Encoder network.
|
build
|
python
|
rlworkgroup/garage
|
src/garage/tf/policies/gaussian_mlp_task_embedding_policy.py
|
https://github.com/rlworkgroup/garage/blob/master/src/garage/tf/policies/gaussian_mlp_task_embedding_policy.py
|
MIT
|
def get_action(self, observation):
"""Get action sampled from the policy.
Args:
observation (np.ndarray): Augmented observation from the
environment, with shape :math:`(O+N, )`. O is the dimension
of observation, N is the number of tasks.
Returns:
np.ndarray: Action sampled from the policy,
with shape :math:`(A, )`. A is the dimension of action.
dict: Action distribution information, with keys:
- mean (numpy.ndarray): Mean of the distribution,
with shape :math:`(A, )`. A is the dimension of
action.
- log_std (numpy.ndarray): Log standard deviation of the
distribution, with shape :math:`(A, )`.
A is the dimension of action.
"""
actions, agent_infos = self.get_actions([observation])
return actions[0], {k: v[0] for k, v in agent_infos.items()}
|
Get action sampled from the policy.
Args:
observation (np.ndarray): Augmented observation from the
environment, with shape :math:`(O+N, )`. O is the dimension
of observation, N is the number of tasks.
Returns:
np.ndarray: Action sampled from the policy,
with shape :math:`(A, )`. A is the dimension of action.
dict: Action distribution information, with keys:
- mean (numpy.ndarray): Mean of the distribution,
with shape :math:`(A, )`. A is the dimension of
action.
- log_std (numpy.ndarray): Log standard deviation of the
distribution, with shape :math:`(A, )`.
A is the dimension of action.
|
get_action
|
python
|
rlworkgroup/garage
|
src/garage/tf/policies/gaussian_mlp_task_embedding_policy.py
|
https://github.com/rlworkgroup/garage/blob/master/src/garage/tf/policies/gaussian_mlp_task_embedding_policy.py
|
MIT
|
def get_actions(self, observations):
"""Get actions sampled from the policy.
Args:
observations (np.ndarray): Augmented observation from the
environment, with shape :math:`(T, O+N)`. T is the number of
environment steps, O is the dimension of observation, N is the
number of tasks.
Returns:
np.ndarray: Actions sampled from the policy,
with shape :math:`(T, A)`. T is the number of environment
steps, A is the dimension of action.
dict: Action distribution information, with keys:
- mean (numpy.ndarray): Mean of the distribution,
with shape :math:`(T, A)`. T is the number of environment
steps, A is the dimension of action.
- log_std (numpy.ndarray): Log standard deviation of the
distribution, with shape :math:`(T, A)`. T is the number of
environment steps, Z is the dimension of action.
"""
obses, tasks = zip(*[
self.split_augmented_observation(aug_obs)
for aug_obs in observations
])
return self.get_actions_given_tasks(np.array(obses), np.array(tasks))
|
Get actions sampled from the policy.
Args:
observations (np.ndarray): Augmented observation from the
environment, with shape :math:`(T, O+N)`. T is the number of
environment steps, O is the dimension of observation, N is the
number of tasks.
Returns:
np.ndarray: Actions sampled from the policy,
with shape :math:`(T, A)`. T is the number of environment
steps, A is the dimension of action.
dict: Action distribution information, with keys:
- mean (numpy.ndarray): Mean of the distribution,
with shape :math:`(T, A)`. T is the number of environment
steps, A is the dimension of action.
- log_std (numpy.ndarray): Log standard deviation of the
distribution, with shape :math:`(T, A)`. T is the number of
environment steps, Z is the dimension of action.
|
get_actions
|
python
|
rlworkgroup/garage
|
src/garage/tf/policies/gaussian_mlp_task_embedding_policy.py
|
https://github.com/rlworkgroup/garage/blob/master/src/garage/tf/policies/gaussian_mlp_task_embedding_policy.py
|
MIT
|
def get_action_given_latent(self, observation, latent):
"""Sample an action given observation and latent.
Args:
observation (np.ndarray): Observation from the environment,
with shape :math:`(O, )`. O is the dimension of observation.
latent (np.ndarray): Latent, with shape :math:`(Z, )`. Z is the
dimension of the latent embedding.
Returns:
np.ndarray: Action sampled from the policy,
with shape :math:`(A, )`. A is the dimension of action.
dict: Action distribution information, with keys:
- mean (numpy.ndarray): Mean of the distribution,
with shape :math:`(A, )`. A is the dimension of action.
- log_std (numpy.ndarray): Log standard deviation of the
distribution, with shape :math:`(A, )`. A is the dimension
of action.
"""
flat_obs = self.observation_space.flatten(observation)
flat_obs = np.expand_dims([flat_obs], 1)
flat_latent = self.latent_space.flatten(latent)
flat_latent = np.expand_dims([flat_latent], 1)
sample, mean, log_std = self._f_dist_obs_latent(flat_obs, flat_latent)
sample = self.action_space.unflatten(np.squeeze(sample, 1)[0])
mean = self.action_space.unflatten(np.squeeze(mean, 1)[0])
log_std = self.action_space.unflatten(np.squeeze(log_std, 1)[0])
return sample, dict(mean=mean, log_std=log_std)
|
Sample an action given observation and latent.
Args:
observation (np.ndarray): Observation from the environment,
with shape :math:`(O, )`. O is the dimension of observation.
latent (np.ndarray): Latent, with shape :math:`(Z, )`. Z is the
dimension of the latent embedding.
Returns:
np.ndarray: Action sampled from the policy,
with shape :math:`(A, )`. A is the dimension of action.
dict: Action distribution information, with keys:
- mean (numpy.ndarray): Mean of the distribution,
with shape :math:`(A, )`. A is the dimension of action.
- log_std (numpy.ndarray): Log standard deviation of the
distribution, with shape :math:`(A, )`. A is the dimension
of action.
|
get_action_given_latent
|
python
|
rlworkgroup/garage
|
src/garage/tf/policies/gaussian_mlp_task_embedding_policy.py
|
https://github.com/rlworkgroup/garage/blob/master/src/garage/tf/policies/gaussian_mlp_task_embedding_policy.py
|
MIT
|
def get_actions_given_latents(self, observations, latents):
"""Sample a batch of actions given observations and latents.
Args:
observations (np.ndarray): Observations from the environment, with
shape :math:`(T, O)`. T is the number of environment steps, O
is the dimension of observation.
latents (np.ndarray): Latents, with shape :math:`(T, Z)`. T is the
number of environment steps, Z is the dimension of
latent embedding.
Returns:
np.ndarray: Actions sampled from the policy,
with shape :math:`(T, A)`. T is the number of environment
steps, A is the dimension of action.
dict: Action distribution information, , with keys:
- mean (numpy.ndarray): Mean of the distribution,
with shape :math:`(T, A)`. T is the number of
environment steps. A is the dimension of action.
- log_std (numpy.ndarray): Log standard deviation of the
distribution, with shape :math:`(T, A)`. T is the number of
environment steps. A is the dimension of action.
"""
flat_obses = self.observation_space.flatten_n(observations)
flat_obses = np.expand_dims(flat_obses, 1)
flat_latents = self.latent_space.flatten_n(latents)
flat_latents = np.expand_dims(flat_latents, 1)
samples, means, log_stds = self._f_dist_obs_latent(
flat_obses, flat_latents)
samples = self.action_space.unflatten_n(np.squeeze(samples, 1))
means = self.action_space.unflatten_n(np.squeeze(means, 1))
log_stds = self.action_space.unflatten_n(np.squeeze(log_stds, 1))
return samples, dict(mean=means, log_std=log_stds)
|
Sample a batch of actions given observations and latents.
Args:
observations (np.ndarray): Observations from the environment, with
shape :math:`(T, O)`. T is the number of environment steps, O
is the dimension of observation.
latents (np.ndarray): Latents, with shape :math:`(T, Z)`. T is the
number of environment steps, Z is the dimension of
latent embedding.
Returns:
np.ndarray: Actions sampled from the policy,
with shape :math:`(T, A)`. T is the number of environment
steps, A is the dimension of action.
dict: Action distribution information, , with keys:
- mean (numpy.ndarray): Mean of the distribution,
with shape :math:`(T, A)`. T is the number of
environment steps. A is the dimension of action.
- log_std (numpy.ndarray): Log standard deviation of the
distribution, with shape :math:`(T, A)`. T is the number of
environment steps. A is the dimension of action.
|
get_actions_given_latents
|
python
|
rlworkgroup/garage
|
src/garage/tf/policies/gaussian_mlp_task_embedding_policy.py
|
https://github.com/rlworkgroup/garage/blob/master/src/garage/tf/policies/gaussian_mlp_task_embedding_policy.py
|
MIT
|
def get_action_given_task(self, observation, task_id):
"""Sample an action given observation and task id.
Args:
observation (np.ndarray): Observation from the environment, with
shape :math:`(O, )`. O is the dimension of the observation.
task_id (np.ndarray): One-hot task id, with shape :math:`(N, ).
N is the number of tasks.
Returns:
np.ndarray: Action sampled from the policy, with shape
:math:`(A, )`. A is the dimension of action.
dict: Action distribution information, with keys:
- mean (numpy.ndarray): Mean of the distribution,
with shape :math:`(A, )`. A is the dimension of action.
- log_std (numpy.ndarray): Log standard deviation of the
distribution, with shape :math:`(A, )`. A is the dimension
of action.
"""
flat_obs = self.observation_space.flatten(observation)
flat_obs = np.expand_dims([flat_obs], 1)
task_id = np.expand_dims([task_id], 1)
sample, mean, log_std = self._f_dist_obs_task(flat_obs, task_id)
sample = self.action_space.unflatten(np.squeeze(sample, 1)[0])
mean = self.action_space.unflatten(np.squeeze(mean, 1)[0])
log_std = self.action_space.unflatten(np.squeeze(log_std, 1)[0])
return sample, dict(mean=mean, log_std=log_std)
|
Sample an action given observation and task id.
Args:
observation (np.ndarray): Observation from the environment, with
shape :math:`(O, )`. O is the dimension of the observation.
task_id (np.ndarray): One-hot task id, with shape :math:`(N, ).
N is the number of tasks.
Returns:
np.ndarray: Action sampled from the policy, with shape
:math:`(A, )`. A is the dimension of action.
dict: Action distribution information, with keys:
- mean (numpy.ndarray): Mean of the distribution,
with shape :math:`(A, )`. A is the dimension of action.
- log_std (numpy.ndarray): Log standard deviation of the
distribution, with shape :math:`(A, )`. A is the dimension
of action.
|
get_action_given_task
|
python
|
rlworkgroup/garage
|
src/garage/tf/policies/gaussian_mlp_task_embedding_policy.py
|
https://github.com/rlworkgroup/garage/blob/master/src/garage/tf/policies/gaussian_mlp_task_embedding_policy.py
|
MIT
|
def get_actions_given_tasks(self, observations, task_ids):
"""Sample a batch of actions given observations and task ids.
Args:
observations (np.ndarray): Observations from the environment, with
shape :math:`(T, O)`. T is the number of environment steps,
O is the dimension of observation.
task_ids (np.ndarry): One-hot task ids, with shape :math:`(T, N)`.
T is the number of environment steps, N is the number of tasks.
Returns:
np.ndarray: Actions sampled from the policy,
with shape :math:`(T, A)`. T is the number of environment
steps, A is the dimension of action.
dict: Action distribution information, , with keys:
- mean (numpy.ndarray): Mean of the distribution,
with shape :math:`(T, A)`. T is the number of
environment steps. A is the dimension of action.
- log_std (numpy.ndarray): Log standard deviation of the
distribution, with shape :math:`(T, A)`. T is the number of
environment steps. A is the dimension of action.
"""
flat_obses = self.observation_space.flatten_n(observations)
flat_obses = np.expand_dims(flat_obses, 1)
task_ids = np.expand_dims(task_ids, 1)
samples, means, log_stds = self._f_dist_obs_task(flat_obses, task_ids)
samples = self.action_space.unflatten_n(np.squeeze(samples, 1))
means = self.action_space.unflatten_n(np.squeeze(means, 1))
log_stds = self.action_space.unflatten_n(np.squeeze(log_stds, 1))
return samples, dict(mean=means, log_std=log_stds)
|
Sample a batch of actions given observations and task ids.
Args:
observations (np.ndarray): Observations from the environment, with
shape :math:`(T, O)`. T is the number of environment steps,
O is the dimension of observation.
task_ids (np.ndarry): One-hot task ids, with shape :math:`(T, N)`.
T is the number of environment steps, N is the number of tasks.
Returns:
np.ndarray: Actions sampled from the policy,
with shape :math:`(T, A)`. T is the number of environment
steps, A is the dimension of action.
dict: Action distribution information, , with keys:
- mean (numpy.ndarray): Mean of the distribution,
with shape :math:`(T, A)`. T is the number of
environment steps. A is the dimension of action.
- log_std (numpy.ndarray): Log standard deviation of the
distribution, with shape :math:`(T, A)`. T is the number of
environment steps. A is the dimension of action.
|
get_actions_given_tasks
|
python
|
rlworkgroup/garage
|
src/garage/tf/policies/gaussian_mlp_task_embedding_policy.py
|
https://github.com/rlworkgroup/garage/blob/master/src/garage/tf/policies/gaussian_mlp_task_embedding_policy.py
|
MIT
|
def __getstate__(self):
"""Object.__getstate__.
Returns:
dict: The state to be pickled for the instance.
"""
new_dict = super().__getstate__()
del new_dict['_f_dist_obs_latent']
del new_dict['_f_dist_obs_task']
return new_dict
|
Object.__getstate__.
Returns:
dict: The state to be pickled for the instance.
|
__getstate__
|
python
|
rlworkgroup/garage
|
src/garage/tf/policies/gaussian_mlp_task_embedding_policy.py
|
https://github.com/rlworkgroup/garage/blob/master/src/garage/tf/policies/gaussian_mlp_task_embedding_policy.py
|
MIT
|
def get_action(self, observation):
"""Get action sampled from the policy.
Args:
observation (np.ndarray): Observation from the environment.
Returns:
Tuple[np.ndarray, dict[str,np.ndarray]]: Action and extra agent
info.
"""
|
Get action sampled from the policy.
Args:
observation (np.ndarray): Observation from the environment.
Returns:
Tuple[np.ndarray, dict[str,np.ndarray]]: Action and extra agent
info.
|
get_action
|
python
|
rlworkgroup/garage
|
src/garage/tf/policies/policy.py
|
https://github.com/rlworkgroup/garage/blob/master/src/garage/tf/policies/policy.py
|
MIT
|
def get_actions(self, observations):
"""Get actions given observations.
Args:
observations (np.ndarray): Observations from the environment.
Returns:
Tuple[np.ndarray, dict[str,np.ndarray]]: Actions and extra agent
infos.
"""
|
Get actions given observations.
Args:
observations (np.ndarray): Observations from the environment.
Returns:
Tuple[np.ndarray, dict[str,np.ndarray]]: Actions and extra agent
infos.
|
get_actions
|
python
|
rlworkgroup/garage
|
src/garage/tf/policies/policy.py
|
https://github.com/rlworkgroup/garage/blob/master/src/garage/tf/policies/policy.py
|
MIT
|
def get_action(self, observation):
"""Get action sampled from the policy.
Args:
observation (np.ndarray): Augmented observation from the
environment, with shape :math:`(O+N, )`. O is the dimension of
observation, N is the number of tasks.
Returns:
np.ndarray: Action sampled from the policy,
with shape :math:`(A, )`. A is the dimension of action.
dict: Action distribution information.
"""
|
Get action sampled from the policy.
Args:
observation (np.ndarray): Augmented observation from the
environment, with shape :math:`(O+N, )`. O is the dimension of
observation, N is the number of tasks.
Returns:
np.ndarray: Action sampled from the policy,
with shape :math:`(A, )`. A is the dimension of action.
dict: Action distribution information.
|
get_action
|
python
|
rlworkgroup/garage
|
src/garage/tf/policies/task_embedding_policy.py
|
https://github.com/rlworkgroup/garage/blob/master/src/garage/tf/policies/task_embedding_policy.py
|
MIT
|
def get_actions(self, observations):
"""Get actions sampled from the policy.
Args:
observations (np.ndarray): Augmented observation from the
environment, with shape :math:`(T, O+N)`. T is the number of
environment steps, O is the dimension of observation, N is the
number of tasks.
Returns:
np.ndarray: Actions sampled from the policy,
with shape :math:`(T, A)`. T is the number of environment
steps, A is the dimension of action.
dict: Action distribution information.
"""
|
Get actions sampled from the policy.
Args:
observations (np.ndarray): Augmented observation from the
environment, with shape :math:`(T, O+N)`. T is the number of
environment steps, O is the dimension of observation, N is the
number of tasks.
Returns:
np.ndarray: Actions sampled from the policy,
with shape :math:`(T, A)`. T is the number of environment
steps, A is the dimension of action.
dict: Action distribution information.
|
get_actions
|
python
|
rlworkgroup/garage
|
src/garage/tf/policies/task_embedding_policy.py
|
https://github.com/rlworkgroup/garage/blob/master/src/garage/tf/policies/task_embedding_policy.py
|
MIT
|
def get_action_given_task(self, observation, task_id):
"""Sample an action given observation and task id.
Args:
observation (np.ndarray): Observation from the environment, with
shape :math:`(O, )`. O is the dimension of the observation.
task_id (np.ndarray): One-hot task id, with shape :math:`(N, ).
N is the number of tasks.
Returns:
np.ndarray: Action sampled from the policy, with shape
:math:`(A, )`. A is the dimension of action.
dict: Action distribution information.
"""
|
Sample an action given observation and task id.
Args:
observation (np.ndarray): Observation from the environment, with
shape :math:`(O, )`. O is the dimension of the observation.
task_id (np.ndarray): One-hot task id, with shape :math:`(N, ).
N is the number of tasks.
Returns:
np.ndarray: Action sampled from the policy, with shape
:math:`(A, )`. A is the dimension of action.
dict: Action distribution information.
|
get_action_given_task
|
python
|
rlworkgroup/garage
|
src/garage/tf/policies/task_embedding_policy.py
|
https://github.com/rlworkgroup/garage/blob/master/src/garage/tf/policies/task_embedding_policy.py
|
MIT
|
def get_actions_given_tasks(self, observations, task_ids):
"""Sample a batch of actions given observations and task ids.
Args:
observations (np.ndarray): Observations from the environment, with
shape :math:`(T, O)`. T is the number of environment steps,
O is the dimension of observation.
task_ids (np.ndarry): One-hot task ids, with shape :math:`(T, N)`.
T is the number of environment steps, N is the number of tasks.
Returns:
np.ndarray: Actions sampled from the policy,
with shape :math:`(T, A)`. T is the number of environment
steps, A is the dimension of action.
dict: Action distribution information.
"""
|
Sample a batch of actions given observations and task ids.
Args:
observations (np.ndarray): Observations from the environment, with
shape :math:`(T, O)`. T is the number of environment steps,
O is the dimension of observation.
task_ids (np.ndarry): One-hot task ids, with shape :math:`(T, N)`.
T is the number of environment steps, N is the number of tasks.
Returns:
np.ndarray: Actions sampled from the policy,
with shape :math:`(T, A)`. T is the number of environment
steps, A is the dimension of action.
dict: Action distribution information.
|
get_actions_given_tasks
|
python
|
rlworkgroup/garage
|
src/garage/tf/policies/task_embedding_policy.py
|
https://github.com/rlworkgroup/garage/blob/master/src/garage/tf/policies/task_embedding_policy.py
|
MIT
|
def get_action_given_latent(self, observation, latent):
"""Sample an action given observation and latent.
Args:
observation (np.ndarray): Observation from the environment,
with shape :math:`(O, )`. O is the dimension of observation.
latent (np.ndarray): Latent, with shape :math:`(Z, )`. Z is the
dimension of latent embedding.
Returns:
np.ndarray: Action sampled from the policy,
with shape :math:`(A, )`. A is the dimension of action.
dict: Action distribution information.
"""
|
Sample an action given observation and latent.
Args:
observation (np.ndarray): Observation from the environment,
with shape :math:`(O, )`. O is the dimension of observation.
latent (np.ndarray): Latent, with shape :math:`(Z, )`. Z is the
dimension of latent embedding.
Returns:
np.ndarray: Action sampled from the policy,
with shape :math:`(A, )`. A is the dimension of action.
dict: Action distribution information.
|
get_action_given_latent
|
python
|
rlworkgroup/garage
|
src/garage/tf/policies/task_embedding_policy.py
|
https://github.com/rlworkgroup/garage/blob/master/src/garage/tf/policies/task_embedding_policy.py
|
MIT
|
def get_actions_given_latents(self, observations, latents):
"""Sample a batch of actions given observations and latents.
Args:
observations (np.ndarray): Observations from the environment, with
shape :math:`(T, O)`. T is the number of environment steps, O
is the dimension of observation.
latents (np.ndarray): Latents, with shape :math:`(T, Z)`. T is the
number of environment steps, Z is the dimension of
latent embedding.
Returns:
np.ndarray: Actions sampled from the policy,
with shape :math:`(T, A)`. T is the number of environment
steps, A is the dimension of action.
dict: Action distribution information.
"""
|
Sample a batch of actions given observations and latents.
Args:
observations (np.ndarray): Observations from the environment, with
shape :math:`(T, O)`. T is the number of environment steps, O
is the dimension of observation.
latents (np.ndarray): Latents, with shape :math:`(T, Z)`. T is the
number of environment steps, Z is the dimension of
latent embedding.
Returns:
np.ndarray: Actions sampled from the policy,
with shape :math:`(T, A)`. T is the number of environment
steps, A is the dimension of action.
dict: Action distribution information.
|
get_actions_given_latents
|
python
|
rlworkgroup/garage
|
src/garage/tf/policies/task_embedding_policy.py
|
https://github.com/rlworkgroup/garage/blob/master/src/garage/tf/policies/task_embedding_policy.py
|
MIT
|
def split_augmented_observation(self, collated):
"""Splits up observation into one-hot task and environment observation.
Args:
collated (np.ndarray): Environment observation concatenated with
task one-hot, with shape :math:`(O+N, )`. O is the dimension of
observation, N is the number of tasks.
Returns:
np.ndarray: Vanilla environment observation,
with shape :math:`(O, )`. O is the dimension of observation.
np.ndarray: Task one-hot, with shape :math:`(N, )`. N is the number
of tasks.
"""
task_dim = self.task_space.flat_dim
return collated[:-task_dim], collated[-task_dim:]
|
Splits up observation into one-hot task and environment observation.
Args:
collated (np.ndarray): Environment observation concatenated with
task one-hot, with shape :math:`(O+N, )`. O is the dimension of
observation, N is the number of tasks.
Returns:
np.ndarray: Vanilla environment observation,
with shape :math:`(O, )`. O is the dimension of observation.
np.ndarray: Task one-hot, with shape :math:`(N, )`. N is the number
of tasks.
|
split_augmented_observation
|
python
|
rlworkgroup/garage
|
src/garage/tf/policies/task_embedding_policy.py
|
https://github.com/rlworkgroup/garage/blob/master/src/garage/tf/policies/task_embedding_policy.py
|
MIT
|
def get_qval(self, observation, action):
"""Q Value of the network.
Args:
observation (np.ndarray): Observation input of shape
:math:`(N, O*)`.
action (np.ndarray): Action input of shape :math:`(N, A*)`.
Returns:
np.ndarray: Array of shape :math:`(N, )` containing Q values
corresponding to each (obs, act) pair.
"""
if len(observation[0].shape) < len(self._obs_dim):
observation = self._env_spec.observation_space.unflatten_n(
observation)
return self._f_qval(observation, action)
|
Q Value of the network.
Args:
observation (np.ndarray): Observation input of shape
:math:`(N, O*)`.
action (np.ndarray): Action input of shape :math:`(N, A*)`.
Returns:
np.ndarray: Array of shape :math:`(N, )` containing Q values
corresponding to each (obs, act) pair.
|
get_qval
|
python
|
rlworkgroup/garage
|
src/garage/tf/q_functions/continuous_cnn_q_function.py
|
https://github.com/rlworkgroup/garage/blob/master/src/garage/tf/q_functions/continuous_cnn_q_function.py
|
MIT
|
def build(self, state_input, action_input, name):
"""Build the symbolic graph for q-network.
Args:
state_input (tf.Tensor): The state input tf.Tensor of shape
:math:`(N, O*)`.
action_input (tf.Tensor): The action input tf.Tensor of shape
:math:`(N, A*)`.
name (str): Network variable scope.
Return:
tf.Tensor: The output Q value tensor of shape :math:`(N, )`.
"""
augmented_state_input = state_input
if isinstance(self._env_spec.observation_space, akro.Image):
augmented_state_input = tf.cast(state_input, tf.float32) / 255.0
return super().build(augmented_state_input, action_input,
name=name).outputs
|
Build the symbolic graph for q-network.
Args:
state_input (tf.Tensor): The state input tf.Tensor of shape
:math:`(N, O*)`.
action_input (tf.Tensor): The action input tf.Tensor of shape
:math:`(N, A*)`.
name (str): Network variable scope.
Return:
tf.Tensor: The output Q value tensor of shape :math:`(N, )`.
|
build
|
python
|
rlworkgroup/garage
|
src/garage/tf/q_functions/continuous_cnn_q_function.py
|
https://github.com/rlworkgroup/garage/blob/master/src/garage/tf/q_functions/continuous_cnn_q_function.py
|
MIT
|
def build(self, state_input, name):
"""Build the symbolic graph for q-network.
Args:
state_input (tf.Tensor): The state input tf.Tensor to the network.
name (str): Network variable scope.
Return:
tf.Tensor: The tf.Tensor output of Discrete CNN QFunction.
"""
augmented_state_input = state_input
if isinstance(self._env_spec.observation_space, akro.Image):
augmented_state_input = tf.cast(state_input, tf.float32) / 255.0
return super().build(augmented_state_input, name=name).outputs
|
Build the symbolic graph for q-network.
Args:
state_input (tf.Tensor): The state input tf.Tensor to the network.
name (str): Network variable scope.
Return:
tf.Tensor: The tf.Tensor output of Discrete CNN QFunction.
|
build
|
python
|
rlworkgroup/garage
|
src/garage/tf/q_functions/discrete_cnn_q_function.py
|
https://github.com/rlworkgroup/garage/blob/master/src/garage/tf/q_functions/discrete_cnn_q_function.py
|
MIT
|
def __call__(self, *args, **kwargs):
"""Construct the inner class and wrap it.
Args:
*args: Passed on to inner worker class.
**kwargs: Passed on to inner worker class.
Returns:
TFWorkerWrapper: The wrapped worker.
"""
wrapper = TFWorkerWrapper()
# Need to construct the wrapped class after we've entered the Session.
wrapper._inner_worker = self._wrapped_class(*args, **kwargs)
return wrapper
|
Construct the inner class and wrap it.
Args:
*args: Passed on to inner worker class.
**kwargs: Passed on to inner worker class.
Returns:
TFWorkerWrapper: The wrapped worker.
|
__call__
|
python
|
rlworkgroup/garage
|
src/garage/tf/samplers/worker.py
|
https://github.com/rlworkgroup/garage/blob/master/src/garage/tf/samplers/worker.py
|
MIT
|
def zero_optim_grads(optim, set_to_none=True):
"""Sets the gradient of all optimized tensors to None.
This is an optimization alternative to calling `optimizer.zero_grad()`
Args:
optim (torch.nn.Optimizer): The optimizer instance
to zero parameter gradients.
set_to_none (bool): Set gradients to None
instead of calling `zero_grad()`which
sets to 0.
"""
if not set_to_none:
optim.zero_grad()
return
for group in optim.param_groups:
for param in group['params']:
param.grad = None
|
Sets the gradient of all optimized tensors to None.
This is an optimization alternative to calling `optimizer.zero_grad()`
Args:
optim (torch.nn.Optimizer): The optimizer instance
to zero parameter gradients.
set_to_none (bool): Set gradients to None
instead of calling `zero_grad()`which
sets to 0.
|
zero_optim_grads
|
python
|
rlworkgroup/garage
|
src/garage/torch/_functions.py
|
https://github.com/rlworkgroup/garage/blob/master/src/garage/torch/_functions.py
|
MIT
|
def compute_advantages(discount, gae_lambda, max_episode_length, baselines,
rewards):
"""Calculate advantages.
Advantages are a discounted cumulative sum.
Calculate advantages using a baseline according to Generalized Advantage
Estimation (GAE)
The discounted cumulative sum can be computed using conv2d with filter.
filter:
[1, (discount * gae_lambda), (discount * gae_lambda) ^ 2, ...]
where the length is same with max_episode_length.
baselines and rewards are also has same shape.
baselines:
[ [b_11, b_12, b_13, ... b_1n],
[b_21, b_22, b_23, ... b_2n],
...
[b_m1, b_m2, b_m3, ... b_mn] ]
rewards:
[ [r_11, r_12, r_13, ... r_1n],
[r_21, r_22, r_23, ... r_2n],
...
[r_m1, r_m2, r_m3, ... r_mn] ]
Args:
discount (float): RL discount factor (i.e. gamma).
gae_lambda (float): Lambda, as used for Generalized Advantage
Estimation (GAE).
max_episode_length (int): Maximum length of a single episode.
baselines (torch.Tensor): A 2D vector of value function estimates with
shape (N, T), where N is the batch dimension (number of episodes)
and T is the maximum episode length experienced by the agent. If an
episode terminates in fewer than T time steps, the remaining
elements in that episode should be set to 0.
rewards (torch.Tensor): A 2D vector of per-step rewards with shape
(N, T), where N is the batch dimension (number of episodes) and T
is the maximum episode length experienced by the agent. If an
episode terminates in fewer than T time steps, the remaining
elements in that episode should be set to 0.
Returns:
torch.Tensor: A 2D vector of calculated advantage values with shape
(N, T), where N is the batch dimension (number of episodes) and T
is the maximum episode length experienced by the agent. If an
episode terminates in fewer than T time steps, the remaining values
in that episode should be set to 0.
"""
adv_filter = torch.full((1, 1, 1, max_episode_length - 1),
discount * gae_lambda,
dtype=torch.float)
adv_filter = torch.cumprod(F.pad(adv_filter, (1, 0), value=1), dim=-1)
deltas = (rewards + discount * F.pad(baselines, (0, 1))[:, 1:] - baselines)
deltas = F.pad(deltas,
(0, max_episode_length - 1)).unsqueeze(0).unsqueeze(0)
advantages = F.conv2d(deltas, adv_filter, stride=1).reshape(rewards.shape)
return advantages
|
Calculate advantages.
Advantages are a discounted cumulative sum.
Calculate advantages using a baseline according to Generalized Advantage
Estimation (GAE)
The discounted cumulative sum can be computed using conv2d with filter.
filter:
[1, (discount * gae_lambda), (discount * gae_lambda) ^ 2, ...]
where the length is same with max_episode_length.
baselines and rewards are also has same shape.
baselines:
[ [b_11, b_12, b_13, ... b_1n],
[b_21, b_22, b_23, ... b_2n],
...
[b_m1, b_m2, b_m3, ... b_mn] ]
rewards:
[ [r_11, r_12, r_13, ... r_1n],
[r_21, r_22, r_23, ... r_2n],
...
[r_m1, r_m2, r_m3, ... r_mn] ]
Args:
discount (float): RL discount factor (i.e. gamma).
gae_lambda (float): Lambda, as used for Generalized Advantage
Estimation (GAE).
max_episode_length (int): Maximum length of a single episode.
baselines (torch.Tensor): A 2D vector of value function estimates with
shape (N, T), where N is the batch dimension (number of episodes)
and T is the maximum episode length experienced by the agent. If an
episode terminates in fewer than T time steps, the remaining
elements in that episode should be set to 0.
rewards (torch.Tensor): A 2D vector of per-step rewards with shape
(N, T), where N is the batch dimension (number of episodes) and T
is the maximum episode length experienced by the agent. If an
episode terminates in fewer than T time steps, the remaining
elements in that episode should be set to 0.
Returns:
torch.Tensor: A 2D vector of calculated advantage values with shape
(N, T), where N is the batch dimension (number of episodes) and T
is the maximum episode length experienced by the agent. If an
episode terminates in fewer than T time steps, the remaining values
in that episode should be set to 0.
|
compute_advantages
|
python
|
rlworkgroup/garage
|
src/garage/torch/_functions.py
|
https://github.com/rlworkgroup/garage/blob/master/src/garage/torch/_functions.py
|
MIT
|
def pad_to_last(nums, total_length, axis=-1, val=0):
"""Pad val to last in nums in given axis.
length of the result in given axis should be total_length.
Raises:
IndexError: If the input axis value is out of range of the nums array
Args:
nums (numpy.ndarray): The array to pad.
total_length (int): The final width of the Array.
axis (int): Axis along which a sum is performed.
val (int): The value to set the padded value.
Returns:
torch.Tensor: Padded array
"""
tensor = torch.Tensor(nums)
axis = (axis + len(tensor.shape)) if axis < 0 else axis
if len(tensor.shape) <= axis:
raise IndexError('axis {} is out of range {}'.format(
axis, tensor.shape))
padding_config = [0, 0] * len(tensor.shape)
padding_idx = abs(axis - len(tensor.shape)) * 2 - 1
padding_config[padding_idx] = max(total_length - tensor.shape[axis], val)
return F.pad(tensor, padding_config)
|
Pad val to last in nums in given axis.
length of the result in given axis should be total_length.
Raises:
IndexError: If the input axis value is out of range of the nums array
Args:
nums (numpy.ndarray): The array to pad.
total_length (int): The final width of the Array.
axis (int): Axis along which a sum is performed.
val (int): The value to set the padded value.
Returns:
torch.Tensor: Padded array
|
pad_to_last
|
python
|
rlworkgroup/garage
|
src/garage/torch/_functions.py
|
https://github.com/rlworkgroup/garage/blob/master/src/garage/torch/_functions.py
|
MIT
|
def np_to_torch(array):
"""Numpy arrays to PyTorch tensors.
Args:
array (np.ndarray): Data in numpy array.
Returns:
torch.Tensor: float tensor on the global device.
"""
tensor = torch.from_numpy(array)
if tensor.dtype != torch.float32:
tensor = tensor.float()
return tensor.to(global_device())
|
Numpy arrays to PyTorch tensors.
Args:
array (np.ndarray): Data in numpy array.
Returns:
torch.Tensor: float tensor on the global device.
|
np_to_torch
|
python
|
rlworkgroup/garage
|
src/garage/torch/_functions.py
|
https://github.com/rlworkgroup/garage/blob/master/src/garage/torch/_functions.py
|
MIT
|
def as_torch_dict(array_dict):
"""Convert a dict whose values are numpy arrays to PyTorch tensors.
Modifies array_dict in place.
Args:
array_dict (dict): Dictionary of data in numpy arrays
Returns:
dict: Dictionary of data in PyTorch tensors
"""
for key, value in array_dict.items():
array_dict[key] = np_to_torch(value)
return array_dict
|
Convert a dict whose values are numpy arrays to PyTorch tensors.
Modifies array_dict in place.
Args:
array_dict (dict): Dictionary of data in numpy arrays
Returns:
dict: Dictionary of data in PyTorch tensors
|
as_torch_dict
|
python
|
rlworkgroup/garage
|
src/garage/torch/_functions.py
|
https://github.com/rlworkgroup/garage/blob/master/src/garage/torch/_functions.py
|
MIT
|
def flatten_to_single_vector(tensor):
"""Collapse the C x H x W values per representation into a single long vector.
Reshape a tensor of size (N, C, H, W) into (N, C * H * W).
Args:
tensor (torch.tensor): batch of data.
Returns:
torch.Tensor: Reshaped view of that data (analogous to numpy.reshape)
"""
N = tensor.shape[0] # read in N, C, H, W
return tensor.view(N, -1)
|
Collapse the C x H x W values per representation into a single long vector.
Reshape a tensor of size (N, C, H, W) into (N, C * H * W).
Args:
tensor (torch.tensor): batch of data.
Returns:
torch.Tensor: Reshaped view of that data (analogous to numpy.reshape)
|
flatten_to_single_vector
|
python
|
rlworkgroup/garage
|
src/garage/torch/_functions.py
|
https://github.com/rlworkgroup/garage/blob/master/src/garage/torch/_functions.py
|
MIT
|
def update_module_params(module, new_params): # noqa: D202
"""Load parameters to a module.
This function acts like `torch.nn.Module._load_from_state_dict()`, but
it replaces the tensors in module with those in new_params, while
`_load_from_state_dict()` loads only the value. Use this function so
that the `grad` and `grad_fn` of `new_params` can be restored
Args:
module (torch.nn.Module): A torch module.
new_params (dict): A dict of torch tensor used as the new
parameters of this module. This parameters dict should be
generated by `torch.nn.Module.named_parameters()`
"""
named_modules = dict(module.named_modules())
# pylint: disable=protected-access
def update(m, name, param):
del m._parameters[name] # noqa: E501
setattr(m, name, param)
m._parameters[name] = param # noqa: E501
for name, new_param in new_params.items():
if '.' in name:
module_name, param_name = tuple(name.rsplit('.', 1))
if module_name in named_modules:
update(named_modules[module_name], param_name, new_param)
else:
update(module, name, new_param)
|
Load parameters to a module.
This function acts like `torch.nn.Module._load_from_state_dict()`, but
it replaces the tensors in module with those in new_params, while
`_load_from_state_dict()` loads only the value. Use this function so
that the `grad` and `grad_fn` of `new_params` can be restored
Args:
module (torch.nn.Module): A torch module.
new_params (dict): A dict of torch tensor used as the new
parameters of this module. This parameters dict should be
generated by `torch.nn.Module.named_parameters()`
|
update_module_params
|
python
|
rlworkgroup/garage
|
src/garage/torch/_functions.py
|
https://github.com/rlworkgroup/garage/blob/master/src/garage/torch/_functions.py
|
MIT
|
def soft_update_model(target_model, source_model, tau):
"""Update model parameter of target and source model.
# noqa: D417
Args:
target_model
(garage.torch.Policy/garage.torch.QFunction):
Target model to update.
source_model
(garage.torch.Policy/QFunction):
Source network to update.
tau (float): Interpolation parameter for doing the
soft target update.
"""
for target_param, param in zip(target_model.parameters(),
source_model.parameters()):
target_param.data.copy_(target_param.data * (1.0 - tau) +
param.data * tau)
|
Update model parameter of target and source model.
# noqa: D417
Args:
target_model
(garage.torch.Policy/garage.torch.QFunction):
Target model to update.
source_model
(garage.torch.Policy/QFunction):
Source network to update.
tau (float): Interpolation parameter for doing the
soft target update.
|
soft_update_model
|
python
|
rlworkgroup/garage
|
src/garage/torch/_functions.py
|
https://github.com/rlworkgroup/garage/blob/master/src/garage/torch/_functions.py
|
MIT
|
def set_gpu_mode(mode, gpu_id=0):
"""Set GPU mode and device ID.
Args:
mode (bool): Whether or not to use GPU
gpu_id (int): GPU ID
"""
# pylint: disable=global-statement
global _GPU_ID
global _USE_GPU
global _DEVICE
_GPU_ID = gpu_id
_USE_GPU = mode
_DEVICE = torch.device(('cuda:' + str(_GPU_ID)) if _USE_GPU else 'cpu')
|
Set GPU mode and device ID.
Args:
mode (bool): Whether or not to use GPU
gpu_id (int): GPU ID
|
set_gpu_mode
|
python
|
rlworkgroup/garage
|
src/garage/torch/_functions.py
|
https://github.com/rlworkgroup/garage/blob/master/src/garage/torch/_functions.py
|
MIT
|
def prefer_gpu():
"""Prefer to use GPU(s) if GPU(s) is detected."""
if torch.cuda.is_available():
set_gpu_mode(True)
else:
set_gpu_mode(False)
|
Prefer to use GPU(s) if GPU(s) is detected.
|
prefer_gpu
|
python
|
rlworkgroup/garage
|
src/garage/torch/_functions.py
|
https://github.com/rlworkgroup/garage/blob/master/src/garage/torch/_functions.py
|
MIT
|
def global_device():
"""Returns the global device that torch.Tensors should be placed on.
Note: The global device is set by using the function
`garage.torch._functions.set_gpu_mode.`
If this functions is never called
`garage.torch._functions.device()` returns None.
Returns:
`torch.Device`: The global device that newly created torch.Tensors
should be placed on.
"""
# pylint: disable=global-statement
global _DEVICE
return _DEVICE
|
Returns the global device that torch.Tensors should be placed on.
Note: The global device is set by using the function
`garage.torch._functions.set_gpu_mode.`
If this functions is never called
`garage.torch._functions.device()` returns None.
Returns:
`torch.Device`: The global device that newly created torch.Tensors
should be placed on.
|
global_device
|
python
|
rlworkgroup/garage
|
src/garage/torch/_functions.py
|
https://github.com/rlworkgroup/garage/blob/master/src/garage/torch/_functions.py
|
MIT
|
def product_of_gaussians(mus, sigmas_squared):
"""Compute mu, sigma of product of gaussians.
Args:
mus (torch.Tensor): Means, with shape :math:`(N, M)`. M is the number
of mean values.
sigmas_squared (torch.Tensor): Variances, with shape :math:`(N, V)`. V
is the number of variance values.
Returns:
torch.Tensor: Mu of product of gaussians, with shape :math:`(N, 1)`.
torch.Tensor: Sigma of product of gaussians, with shape :math:`(N, 1)`.
"""
sigmas_squared = torch.clamp(sigmas_squared, min=1e-7)
sigma_squared = 1. / torch.sum(torch.reciprocal(sigmas_squared), dim=0)
mu = sigma_squared * torch.sum(mus / sigmas_squared, dim=0)
return mu, sigma_squared
|
Compute mu, sigma of product of gaussians.
Args:
mus (torch.Tensor): Means, with shape :math:`(N, M)`. M is the number
of mean values.
sigmas_squared (torch.Tensor): Variances, with shape :math:`(N, V)`. V
is the number of variance values.
Returns:
torch.Tensor: Mu of product of gaussians, with shape :math:`(N, 1)`.
torch.Tensor: Sigma of product of gaussians, with shape :math:`(N, 1)`.
|
product_of_gaussians
|
python
|
rlworkgroup/garage
|
src/garage/torch/_functions.py
|
https://github.com/rlworkgroup/garage/blob/master/src/garage/torch/_functions.py
|
MIT
|
def state_dict_to(state_dict, device):
"""Move optimizer to a specified device.
Args:
state_dict (dict): state dictionary to be moved
device (str): ID of GPU or CPU.
Returns:
dict: state dictionary moved to device
"""
for param in state_dict.values():
if isinstance(param, torch.Tensor):
param.data = param.data.to(device)
elif isinstance(param, dict):
state_dict_to(param, device)
return state_dict
|
Move optimizer to a specified device.
Args:
state_dict (dict): state dictionary to be moved
device (str): ID of GPU or CPU.
Returns:
dict: state dictionary moved to device
|
state_dict_to
|
python
|
rlworkgroup/garage
|
src/garage/torch/_functions.py
|
https://github.com/rlworkgroup/garage/blob/master/src/garage/torch/_functions.py
|
MIT
|
def _value_at_axis(value, axis):
"""Get the value for a particular axis.
Args:
value (tuple or list or int): Possible tuple of per-axis values.
axis (int): Axis to get value for.
Returns:
int: the value at the available axis.
"""
if not isinstance(value, (list, tuple)):
return value
if len(value) == 1:
return value[0]
else:
return value[axis]
|
Get the value for a particular axis.
Args:
value (tuple or list or int): Possible tuple of per-axis values.
axis (int): Axis to get value for.
Returns:
int: the value at the available axis.
|
_value_at_axis
|
python
|
rlworkgroup/garage
|
src/garage/torch/_functions.py
|
https://github.com/rlworkgroup/garage/blob/master/src/garage/torch/_functions.py
|
MIT
|
def output_height_2d(layer, height):
"""Compute the output height of a torch.nn.Conv2d, assuming NCHW format.
This requires knowing the input height. Because NCHW format makes this very
easy to mix up, this is a seperate function from conv2d_output_height.
It also works on torch.nn.MaxPool2d.
This function implements the formula described in the torch.nn.Conv2d
documentation:
https://pytorch.org/docs/stable/generated/torch.nn.Conv2d.html
Args:
layer (torch.nn.Conv2d): The layer to compute output size for.
height (int): The height of the input image.
Returns:
int: The height of the output image.
"""
assert isinstance(layer, (torch.nn.Conv2d, torch.nn.MaxPool2d))
padding = _value_at_axis(layer.padding, 0)
dilation = _value_at_axis(layer.dilation, 0)
kernel_size = _value_at_axis(layer.kernel_size, 0)
stride = _value_at_axis(layer.stride, 0)
return math.floor((height + 2 * padding - dilation *
(kernel_size - 1) - 1) / stride + 1)
|
Compute the output height of a torch.nn.Conv2d, assuming NCHW format.
This requires knowing the input height. Because NCHW format makes this very
easy to mix up, this is a seperate function from conv2d_output_height.
It also works on torch.nn.MaxPool2d.
This function implements the formula described in the torch.nn.Conv2d
documentation:
https://pytorch.org/docs/stable/generated/torch.nn.Conv2d.html
Args:
layer (torch.nn.Conv2d): The layer to compute output size for.
height (int): The height of the input image.
Returns:
int: The height of the output image.
|
output_height_2d
|
python
|
rlworkgroup/garage
|
src/garage/torch/_functions.py
|
https://github.com/rlworkgroup/garage/blob/master/src/garage/torch/_functions.py
|
MIT
|
def output_width_2d(layer, width):
"""Compute the output width of a torch.nn.Conv2d, assuming NCHW format.
This requires knowing the input width. Because NCHW format makes this very
easy to mix up, this is a seperate function from conv2d_output_height.
It also works on torch.nn.MaxPool2d.
This function implements the formula described in the torch.nn.Conv2d
documentation:
https://pytorch.org/docs/stable/generated/torch.nn.Conv2d.html
Args:
layer (torch.nn.Conv2d): The layer to compute output size for.
width (int): The width of the input image.
Returns:
int: The width of the output image.
"""
assert isinstance(layer, (torch.nn.Conv2d, torch.nn.MaxPool2d))
padding = _value_at_axis(layer.padding, 1)
dilation = _value_at_axis(layer.dilation, 1)
kernel_size = _value_at_axis(layer.kernel_size, 1)
stride = _value_at_axis(layer.stride, 1)
return math.floor((width + 2 * padding - dilation *
(kernel_size - 1) - 1) / stride + 1)
|
Compute the output width of a torch.nn.Conv2d, assuming NCHW format.
This requires knowing the input width. Because NCHW format makes this very
easy to mix up, this is a seperate function from conv2d_output_height.
It also works on torch.nn.MaxPool2d.
This function implements the formula described in the torch.nn.Conv2d
documentation:
https://pytorch.org/docs/stable/generated/torch.nn.Conv2d.html
Args:
layer (torch.nn.Conv2d): The layer to compute output size for.
width (int): The width of the input image.
Returns:
int: The width of the output image.
|
output_width_2d
|
python
|
rlworkgroup/garage
|
src/garage/torch/_functions.py
|
https://github.com/rlworkgroup/garage/blob/master/src/garage/torch/_functions.py
|
MIT
|
def train(self, trainer):
"""Obtain samplers and start actual training for each epoch.
Args:
trainer (Trainer): Experiment trainer, for services such as
snapshotting and sampler control.
"""
if not self._eval_env:
self._eval_env = trainer.get_env_copy()
for epoch in trainer.step_epochs():
if self._eval_env is not None:
log_performance(epoch,
obtain_evaluation_episodes(
self.learner, self._eval_env),
discount=1.0)
losses = self._train_once(trainer, epoch)
with tabular.prefix(self._name + '/'):
tabular.record('MeanLoss', np.mean(losses))
tabular.record('StdLoss', np.std(losses))
|
Obtain samplers and start actual training for each epoch.
Args:
trainer (Trainer): Experiment trainer, for services such as
snapshotting and sampler control.
|
train
|
python
|
rlworkgroup/garage
|
src/garage/torch/algos/bc.py
|
https://github.com/rlworkgroup/garage/blob/master/src/garage/torch/algos/bc.py
|
MIT
|
def _train_once(self, trainer, epoch):
"""Obtain samplers and train for one epoch.
Args:
trainer (Trainer): Experiment trainer, which may be used to
obtain samples.
epoch (int): The current epoch.
Returns:
List[float]: Losses.
"""
batch = self._obtain_samples(trainer, epoch)
indices = np.random.permutation(len(batch.actions))
minibatches = np.array_split(indices, self._minibatches_per_epoch)
losses = []
for minibatch in minibatches:
observations = np_to_torch(batch.observations[minibatch])
actions = np_to_torch(batch.actions[minibatch])
self._optimizer.zero_grad()
loss = self._compute_loss(observations, actions)
loss.backward()
losses.append(loss.item())
self._optimizer.step()
return losses
|
Obtain samplers and train for one epoch.
Args:
trainer (Trainer): Experiment trainer, which may be used to
obtain samples.
epoch (int): The current epoch.
Returns:
List[float]: Losses.
|
_train_once
|
python
|
rlworkgroup/garage
|
src/garage/torch/algos/bc.py
|
https://github.com/rlworkgroup/garage/blob/master/src/garage/torch/algos/bc.py
|
MIT
|
def _obtain_samples(self, trainer, epoch):
"""Obtain samples from self._source.
Args:
trainer (Trainer): Experiment trainer, which may be used to
obtain samples.
epoch (int): The current epoch.
Returns:
TimeStepBatch: Batch of samples.
"""
if isinstance(self._source, Policy):
batch = trainer.obtain_episodes(epoch)
log_performance(epoch, batch, 1.0, prefix='Expert')
return batch
else:
batches = []
while (sum(len(batch.actions)
for batch in batches) < self._batch_size):
batches.append(next(self._source))
return TimeStepBatch.concatenate(*batches)
|
Obtain samples from self._source.
Args:
trainer (Trainer): Experiment trainer, which may be used to
obtain samples.
epoch (int): The current epoch.
Returns:
TimeStepBatch: Batch of samples.
|
_obtain_samples
|
python
|
rlworkgroup/garage
|
src/garage/torch/algos/bc.py
|
https://github.com/rlworkgroup/garage/blob/master/src/garage/torch/algos/bc.py
|
MIT
|
def _compute_loss(self, observations, expert_actions):
"""Compute loss of self._learner on the expert_actions.
Args:
observations (torch.Tensor): Observations used to select actions.
Has shape :math:`(B, O^*)`, where :math:`B` is the batch
dimension and :math:`O^*` are the observation dimensions.
expert_actions (torch.Tensor): The actions of the expert.
Has shape :math:`(B, A^*)`, where :math:`B` is the batch
dimension and :math:`A^*` are the action dimensions.
Returns:
torch.Tensor: The loss through which gradient can be propagated
back to the learner. Depends on self._loss.
"""
learner_output = self.learner(observations)
if self._loss == 'mse':
if isinstance(learner_output, torch.Tensor):
# We must have a deterministic policy as the learner.
learner_actions = learner_output
else:
# We must have a StochasticPolicy as the learner.
action_dist, _ = learner_output
learner_actions = action_dist.rsample()
return torch.mean((expert_actions - learner_actions)**2)
else:
assert self._loss == 'log_prob'
# We already checked that we have a StochasticPolicy as the learner
action_dist, _ = learner_output
return -torch.mean(action_dist.log_prob(expert_actions))
|
Compute loss of self._learner on the expert_actions.
Args:
observations (torch.Tensor): Observations used to select actions.
Has shape :math:`(B, O^*)`, where :math:`B` is the batch
dimension and :math:`O^*` are the observation dimensions.
expert_actions (torch.Tensor): The actions of the expert.
Has shape :math:`(B, A^*)`, where :math:`B` is the batch
dimension and :math:`A^*` are the action dimensions.
Returns:
torch.Tensor: The loss through which gradient can be propagated
back to the learner. Depends on self._loss.
|
_compute_loss
|
python
|
rlworkgroup/garage
|
src/garage/torch/algos/bc.py
|
https://github.com/rlworkgroup/garage/blob/master/src/garage/torch/algos/bc.py
|
MIT
|
def train(self, trainer):
"""Obtain samplers and start actual training for each epoch.
Args:
trainer (Trainer): Experiment trainer.
Returns:
float: The average return in last epoch cycle.
"""
if not self._eval_env:
self._eval_env = trainer.get_env_copy()
last_returns = [float('nan')]
trainer.enable_logging = False
for _ in trainer.step_epochs():
for cycle in range(self._steps_per_epoch):
trainer.step_episode = trainer.obtain_episodes(
trainer.step_itr)
if hasattr(self.exploration_policy, 'update'):
self.exploration_policy.update(trainer.step_episode)
self.train_once(trainer.step_itr, trainer.step_episode)
if (cycle == 0 and self.replay_buffer.n_transitions_stored >=
self._min_buffer_size):
trainer.enable_logging = True
eval_eps = obtain_evaluation_episodes(
self.policy, self._eval_env)
last_returns = log_performance(trainer.step_itr,
eval_eps,
discount=self._discount)
trainer.step_itr += 1
return np.mean(last_returns)
|
Obtain samplers and start actual training for each epoch.
Args:
trainer (Trainer): Experiment trainer.
Returns:
float: The average return in last epoch cycle.
|
train
|
python
|
rlworkgroup/garage
|
src/garage/torch/algos/ddpg.py
|
https://github.com/rlworkgroup/garage/blob/master/src/garage/torch/algos/ddpg.py
|
MIT
|
def train_once(self, itr, episodes):
"""Perform one iteration of training.
Args:
itr (int): Iteration number.
episodes (EpisodeBatch): Batch of episodes.
"""
self.replay_buffer.add_episode_batch(episodes)
epoch = itr / self._steps_per_epoch
for _ in range(self._n_train_steps):
if (self.replay_buffer.n_transitions_stored >=
self._min_buffer_size):
samples = self.replay_buffer.sample_transitions(
self._buffer_batch_size)
samples['rewards'] *= self._reward_scale
qf_loss, y, q, policy_loss = torch_to_np(
self.optimize_policy(samples))
self._episode_policy_losses.append(policy_loss)
self._episode_qf_losses.append(qf_loss)
self._epoch_ys.append(y)
self._epoch_qs.append(q)
if itr % self._steps_per_epoch == 0:
logger.log('Training finished')
if (self.replay_buffer.n_transitions_stored >=
self._min_buffer_size):
tabular.record('Epoch', epoch)
tabular.record('Policy/AveragePolicyLoss',
np.mean(self._episode_policy_losses))
tabular.record('QFunction/AverageQFunctionLoss',
np.mean(self._episode_qf_losses))
tabular.record('QFunction/AverageQ', np.mean(self._epoch_qs))
tabular.record('QFunction/MaxQ', np.max(self._epoch_qs))
tabular.record('QFunction/AverageAbsQ',
np.mean(np.abs(self._epoch_qs)))
tabular.record('QFunction/AverageY', np.mean(self._epoch_ys))
tabular.record('QFunction/MaxY', np.max(self._epoch_ys))
tabular.record('QFunction/AverageAbsY',
np.mean(np.abs(self._epoch_ys)))
|
Perform one iteration of training.
Args:
itr (int): Iteration number.
episodes (EpisodeBatch): Batch of episodes.
|
train_once
|
python
|
rlworkgroup/garage
|
src/garage/torch/algos/ddpg.py
|
https://github.com/rlworkgroup/garage/blob/master/src/garage/torch/algos/ddpg.py
|
MIT
|
def optimize_policy(self, samples_data):
"""Perform algorithm optimizing.
Args:
samples_data (dict): Processed batch data.
Returns:
action_loss: Loss of action predicted by the policy network.
qval_loss: Loss of Q-value predicted by the Q-network.
ys: y_s.
qval: Q-value predicted by the Q-network.
"""
transitions = as_torch_dict(samples_data)
observations = transitions['observations']
rewards = transitions['rewards'].reshape(-1, 1)
actions = transitions['actions']
next_observations = transitions['next_observations']
terminals = transitions['terminals'].reshape(-1, 1)
next_inputs = next_observations
inputs = observations
with torch.no_grad():
next_actions = self._target_policy(next_inputs)
target_qvals = self._target_qf(next_inputs, next_actions)
clip_range = (-self._clip_return,
0. if self._clip_pos_returns else self._clip_return)
y_target = rewards + (1.0 - terminals) * self._discount * target_qvals
y_target = torch.clamp(y_target, clip_range[0], clip_range[1])
# optimize critic
qval = self._qf(inputs, actions)
qf_loss = torch.nn.MSELoss()
qval_loss = qf_loss(qval, y_target)
zero_optim_grads(self._qf_optimizer)
qval_loss.backward()
self._qf_optimizer.step()
# optimize actor
actions = self.policy(inputs)
action_loss = -1 * self._qf(inputs, actions).mean()
zero_optim_grads(self._policy_optimizer)
action_loss.backward()
self._policy_optimizer.step()
# update target networks
self.update_target()
return (qval_loss.detach(), y_target, qval.detach(),
action_loss.detach())
|
Perform algorithm optimizing.
Args:
samples_data (dict): Processed batch data.
Returns:
action_loss: Loss of action predicted by the policy network.
qval_loss: Loss of Q-value predicted by the Q-network.
ys: y_s.
qval: Q-value predicted by the Q-network.
|
optimize_policy
|
python
|
rlworkgroup/garage
|
src/garage/torch/algos/ddpg.py
|
https://github.com/rlworkgroup/garage/blob/master/src/garage/torch/algos/ddpg.py
|
MIT
|
def update_target(self):
"""Update parameters in the target policy and Q-value network."""
for t_param, param in zip(self._target_qf.parameters(),
self._qf.parameters()):
t_param.data.copy_(t_param.data * (1.0 - self._tau) +
param.data * self._tau)
for t_param, param in zip(self._target_policy.parameters(),
self.policy.parameters()):
t_param.data.copy_(t_param.data * (1.0 - self._tau) +
param.data * self._tau)
|
Update parameters in the target policy and Q-value network.
|
update_target
|
python
|
rlworkgroup/garage
|
src/garage/torch/algos/ddpg.py
|
https://github.com/rlworkgroup/garage/blob/master/src/garage/torch/algos/ddpg.py
|
MIT
|
def train(self, trainer):
"""Obtain samplers and start actual training for each epoch.
Args:
trainer (Trainer): Experiment trainer.
Returns:
float: The average return in last epoch cycle.
"""
if not self._eval_env:
self._eval_env = trainer.get_env_copy()
last_returns = [float('nan')]
if self._min_buffer_size > self.replay_buffer.n_transitions_stored:
num_warmup_steps = (self._min_buffer_size -
self.replay_buffer.n_transitions_stored)
self.replay_buffer.add_episode_batch(
trainer.obtain_episodes(0, num_warmup_steps))
trainer.enable_logging = True
for _ in trainer.step_epochs():
if (self.replay_buffer.n_transitions_stored >=
self._min_buffer_size):
logger.log('Evaluating policy')
params_before = self.exploration_policy.get_param_values()
eval_eps = obtain_evaluation_episodes(
(self.exploration_policy
if not self._deterministic_eval else self.policy),
self._eval_env,
num_eps=self._num_eval_episodes,
max_episode_length=self._max_episode_length_eval)
self.exploration_policy.set_param_values(params_before)
last_returns = log_performance(trainer.step_itr,
eval_eps,
discount=self._discount)
self._episode_reward_mean.extend(last_returns)
tabular.record('Evaluation/100EpRewardMean',
np.mean(self._episode_reward_mean))
for _ in range(self._steps_per_epoch):
trainer.step_episode = trainer.obtain_episodes(
trainer.step_itr)
if hasattr(self.exploration_policy, 'update'):
self.exploration_policy.update(trainer.step_episode)
self._train_once(trainer.step_itr, trainer.step_episode)
trainer.step_itr += 1
return np.mean(last_returns)
|
Obtain samplers and start actual training for each epoch.
Args:
trainer (Trainer): Experiment trainer.
Returns:
float: The average return in last epoch cycle.
|
train
|
python
|
rlworkgroup/garage
|
src/garage/torch/algos/dqn.py
|
https://github.com/rlworkgroup/garage/blob/master/src/garage/torch/algos/dqn.py
|
MIT
|
def _train_once(self, itr, episodes):
"""Perform one iteration of training.
Args:
itr (int): Iteration number.
episodes (EpisodeBatch): Batch of episodes.
"""
self.replay_buffer.add_episode_batch(episodes)
epoch = itr / self._steps_per_epoch
for _ in range(self._n_train_steps):
if (self.replay_buffer.n_transitions_stored >=
self._min_buffer_size):
timesteps = self.replay_buffer.sample_timesteps(
self._buffer_batch_size)
qf_loss, y, q = tuple(v.cpu().numpy()
for v in self._optimize_qf(timesteps))
self._episode_qf_losses.append(qf_loss)
self._epoch_ys.append(y)
self._epoch_qs.append(q)
if itr % self._steps_per_epoch == 0:
self._log_eval_results(epoch)
if itr % self._target_update_freq == 0:
self._target_qf = copy.deepcopy(self._qf)
|
Perform one iteration of training.
Args:
itr (int): Iteration number.
episodes (EpisodeBatch): Batch of episodes.
|
_train_once
|
python
|
rlworkgroup/garage
|
src/garage/torch/algos/dqn.py
|
https://github.com/rlworkgroup/garage/blob/master/src/garage/torch/algos/dqn.py
|
MIT
|
def _log_eval_results(self, epoch):
"""Log evaluation results after an epoch.
Args:
epoch (int): Current epoch.
"""
logger.log('Training finished')
if self.replay_buffer.n_transitions_stored >= self._min_buffer_size:
tabular.record('Epoch', epoch)
tabular.record('QFunction/AverageQFunctionLoss',
np.mean(self._episode_qf_losses))
tabular.record('QFunction/AverageQ', np.mean(self._epoch_qs))
tabular.record('QFunction/MaxQ', np.max(self._epoch_qs))
tabular.record('QFunction/AverageAbsQ',
np.mean(np.abs(self._epoch_qs)))
tabular.record('QFunction/AverageY', np.mean(self._epoch_ys))
tabular.record('QFunction/MaxY', np.max(self._epoch_ys))
tabular.record('QFunction/AverageAbsY',
np.mean(np.abs(self._epoch_ys)))
|
Log evaluation results after an epoch.
Args:
epoch (int): Current epoch.
|
_log_eval_results
|
python
|
rlworkgroup/garage
|
src/garage/torch/algos/dqn.py
|
https://github.com/rlworkgroup/garage/blob/master/src/garage/torch/algos/dqn.py
|
MIT
|
def _optimize_qf(self, timesteps):
"""Perform algorithm optimizing.
Args:
timesteps (TimeStepBatch): Processed batch data.
Returns:
qval_loss: Loss of Q-value predicted by the Q-network.
ys: y_s.
qval: Q-value predicted by the Q-network.
"""
observations = np_to_torch(timesteps.observations)
rewards = np_to_torch(timesteps.rewards).reshape(-1, 1)
rewards *= self._reward_scale
actions = np_to_torch(timesteps.actions)
next_observations = np_to_torch(timesteps.next_observations)
terminals = np_to_torch(timesteps.terminals).reshape(-1, 1)
next_inputs = next_observations
inputs = observations
with torch.no_grad():
if self._double_q:
# Use online qf to get optimal actions
selected_actions = torch.argmax(self._qf(next_inputs), axis=1)
# use target qf to get Q values for those actions
selected_actions = selected_actions.long().unsqueeze(1)
best_qvals = torch.gather(self._target_qf(next_inputs),
dim=1,
index=selected_actions)
else:
target_qvals = self._target_qf(next_inputs)
best_qvals, _ = torch.max(target_qvals, 1)
best_qvals = best_qvals.unsqueeze(1)
rewards_clipped = rewards
if self._clip_reward is not None:
rewards_clipped = torch.clamp(rewards, -1 * self._clip_reward,
self._clip_reward)
y_target = (rewards_clipped +
(1.0 - terminals) * self._discount * best_qvals)
y_target = y_target.squeeze(1)
# optimize qf
qvals = self._qf(inputs)
selected_qs = torch.sum(qvals * actions, axis=1)
qval_loss = F.smooth_l1_loss(selected_qs, y_target)
zero_optim_grads(self._qf_optimizer)
qval_loss.backward()
# optionally clip the gradients
if self._clip_grad is not None:
torch.nn.utils.clip_grad_norm_(self.policy.parameters(),
self._clip_grad)
self._qf_optimizer.step()
return (qval_loss.detach(), y_target, selected_qs.detach())
|
Perform algorithm optimizing.
Args:
timesteps (TimeStepBatch): Processed batch data.
Returns:
qval_loss: Loss of Q-value predicted by the Q-network.
ys: y_s.
qval: Q-value predicted by the Q-network.
|
_optimize_qf
|
python
|
rlworkgroup/garage
|
src/garage/torch/algos/dqn.py
|
https://github.com/rlworkgroup/garage/blob/master/src/garage/torch/algos/dqn.py
|
MIT
|
def to(self, device=None):
"""Put all the networks within the model on device.
Args:
device (str): ID of GPU or CPU.
"""
if device is None:
device = global_device()
logger.log('Using device: ' + str(device))
self._qf = self._qf.to(device)
self._target_qf = self._target_qf.to(device)
|
Put all the networks within the model on device.
Args:
device (str): ID of GPU or CPU.
|
to
|
python
|
rlworkgroup/garage
|
src/garage/torch/algos/dqn.py
|
https://github.com/rlworkgroup/garage/blob/master/src/garage/torch/algos/dqn.py
|
MIT
|
def train(self, trainer):
"""Obtain samples and start training for each epoch.
Args:
trainer (Trainer): Gives the algorithm access to
:method:`~Trainer.step_epochs()`, which provides services
such as snapshotting and sampler control.
Returns:
float: The average return in last epoch cycle.
"""
last_return = None
for _ in trainer.step_epochs():
all_samples, all_params = self._obtain_samples(trainer)
last_return = self._train_once(trainer, all_samples, all_params)
trainer.step_itr += 1
return last_return
|
Obtain samples and start training for each epoch.
Args:
trainer (Trainer): Gives the algorithm access to
:method:`~Trainer.step_epochs()`, which provides services
such as snapshotting and sampler control.
Returns:
float: The average return in last epoch cycle.
|
train
|
python
|
rlworkgroup/garage
|
src/garage/torch/algos/maml.py
|
https://github.com/rlworkgroup/garage/blob/master/src/garage/torch/algos/maml.py
|
MIT
|
def _train_once(self, trainer, all_samples, all_params):
"""Train the algorithm once.
Args:
trainer (Trainer): The experiment runner.
all_samples (list[list[_MAMLEpisodeBatch]]): A two
dimensional list of _MAMLEpisodeBatch of size
[meta_batch_size * (num_grad_updates + 1)]
all_params (list[dict]): A list of named parameter dictionaries.
Each dictionary contains key value pair of names (str) and
parameters (torch.Tensor).
Returns:
float: Average return.
"""
itr = trainer.step_itr
old_theta = dict(self._policy.named_parameters())
kl_before = self._compute_kl_constraint(all_samples,
all_params,
set_grad=False)
meta_objective = self._compute_meta_loss(all_samples, all_params)
zero_optim_grads(self._meta_optimizer)
meta_objective.backward()
self._meta_optimize(all_samples, all_params)
# Log
loss_after = self._compute_meta_loss(all_samples,
all_params,
set_grad=False)
kl_after = self._compute_kl_constraint(all_samples,
all_params,
set_grad=False)
with torch.no_grad():
policy_entropy = self._compute_policy_entropy(
[task_samples[0] for task_samples in all_samples])
average_return = self._log_performance(
itr, all_samples, meta_objective.item(), loss_after.item(),
kl_before.item(), kl_after.item(),
policy_entropy.mean().item())
if self._meta_evaluator and itr % self._evaluate_every_n_epochs == 0:
self._meta_evaluator.evaluate(self)
update_module_params(self._old_policy, old_theta)
return average_return
|
Train the algorithm once.
Args:
trainer (Trainer): The experiment runner.
all_samples (list[list[_MAMLEpisodeBatch]]): A two
dimensional list of _MAMLEpisodeBatch of size
[meta_batch_size * (num_grad_updates + 1)]
all_params (list[dict]): A list of named parameter dictionaries.
Each dictionary contains key value pair of names (str) and
parameters (torch.Tensor).
Returns:
float: Average return.
|
_train_once
|
python
|
rlworkgroup/garage
|
src/garage/torch/algos/maml.py
|
https://github.com/rlworkgroup/garage/blob/master/src/garage/torch/algos/maml.py
|
MIT
|
def _train_value_function(self, paths):
"""Train the value function.
Args:
paths (list[dict]): A list of collected paths.
Returns:
torch.Tensor: Calculated mean scalar value of value function loss
(float).
"""
# MAML resets a value function to its initial state before training.
self._value_function.load_state_dict(self._initial_vf_state)
obs = np.concatenate([path['observations'] for path in paths], axis=0)
returns = np.concatenate([path['returns'] for path in paths])
obs = np_to_torch(obs)
returns = np_to_torch(returns.astype(np.float32))
vf_loss = self._value_function.compute_loss(obs, returns)
# pylint: disable=protected-access
zero_optim_grads(self._inner_algo._vf_optimizer._optimizer)
vf_loss.backward()
# pylint: disable=protected-access
self._inner_algo._vf_optimizer.step()
return vf_loss
|
Train the value function.
Args:
paths (list[dict]): A list of collected paths.
Returns:
torch.Tensor: Calculated mean scalar value of value function loss
(float).
|
_train_value_function
|
python
|
rlworkgroup/garage
|
src/garage/torch/algos/maml.py
|
https://github.com/rlworkgroup/garage/blob/master/src/garage/torch/algos/maml.py
|
MIT
|
def _obtain_samples(self, trainer):
"""Obtain samples for each task before and after the fast-adaptation.
Args:
trainer (Trainer): A trainer instance to obtain samples.
Returns:
tuple: Tuple of (all_samples, all_params).
all_samples (list[_MAMLEpisodeBatch]): A list of size
[meta_batch_size * (num_grad_updates + 1)]
all_params (list[dict]): A list of named parameter
dictionaries.
"""
tasks = self._task_sampler.sample(self._meta_batch_size)
all_samples = [[] for _ in range(len(tasks))]
all_params = []
theta = dict(self._policy.named_parameters())
for i, env_up in enumerate(tasks):
for j in range(self._num_grad_updates + 1):
episodes = trainer.obtain_episodes(trainer.step_itr,
env_update=env_up)
batch_samples = self._process_samples(episodes)
all_samples[i].append(batch_samples)
# The last iteration does only sampling but no adapting
if j < self._num_grad_updates:
# A grad need to be kept for the next grad update
# Except for the last grad update
require_grad = j < self._num_grad_updates - 1
self._adapt(batch_samples, set_grad=require_grad)
all_params.append(dict(self._policy.named_parameters()))
# Restore to pre-updated policy
update_module_params(self._policy, theta)
return all_samples, all_params
|
Obtain samples for each task before and after the fast-adaptation.
Args:
trainer (Trainer): A trainer instance to obtain samples.
Returns:
tuple: Tuple of (all_samples, all_params).
all_samples (list[_MAMLEpisodeBatch]): A list of size
[meta_batch_size * (num_grad_updates + 1)]
all_params (list[dict]): A list of named parameter
dictionaries.
|
_obtain_samples
|
python
|
rlworkgroup/garage
|
src/garage/torch/algos/maml.py
|
https://github.com/rlworkgroup/garage/blob/master/src/garage/torch/algos/maml.py
|
MIT
|
def _adapt(self, batch_samples, set_grad=True):
"""Performs one MAML inner step to update the policy.
Args:
batch_samples (_MAMLEpisodeBatch): Samples data for one
task and one gradient step.
set_grad (bool): if False, update policy parameters in-place.
Else, allow taking gradient of functions of updated parameters
with respect to pre-updated parameters.
"""
# pylint: disable=protected-access
loss = self._inner_algo._compute_loss(*batch_samples[1:])
# Update policy parameters with one SGD step
self._inner_optimizer.set_grads_none()
loss.backward(create_graph=set_grad)
with torch.set_grad_enabled(set_grad):
self._inner_optimizer.step()
|
Performs one MAML inner step to update the policy.
Args:
batch_samples (_MAMLEpisodeBatch): Samples data for one
task and one gradient step.
set_grad (bool): if False, update policy parameters in-place.
Else, allow taking gradient of functions of updated parameters
with respect to pre-updated parameters.
|
_adapt
|
python
|
rlworkgroup/garage
|
src/garage/torch/algos/maml.py
|
https://github.com/rlworkgroup/garage/blob/master/src/garage/torch/algos/maml.py
|
MIT
|
def _compute_meta_loss(self, all_samples, all_params, set_grad=True):
"""Compute loss to meta-optimize.
Args:
all_samples (list[list[_MAMLEpisodeBatch]]): A two
dimensional list of _MAMLEpisodeBatch of size
[meta_batch_size * (num_grad_updates + 1)]
all_params (list[dict]): A list of named parameter dictionaries.
Each dictionary contains key value pair of names (str) and
parameters (torch.Tensor).
set_grad (bool): Whether to enable gradient calculation or not.
Returns:
torch.Tensor: Calculated mean value of loss.
"""
theta = dict(self._policy.named_parameters())
old_theta = dict(self._old_policy.named_parameters())
losses = []
for task_samples, task_params in zip(all_samples, all_params):
for i in range(self._num_grad_updates):
require_grad = i < self._num_grad_updates - 1 or set_grad
self._adapt(task_samples[i], set_grad=require_grad)
update_module_params(self._old_policy, task_params)
with torch.set_grad_enabled(set_grad):
# pylint: disable=protected-access
last_update = task_samples[-1]
loss = self._inner_algo._compute_loss(*last_update[1:])
losses.append(loss)
update_module_params(self._policy, theta)
update_module_params(self._old_policy, old_theta)
return torch.stack(losses).mean()
|
Compute loss to meta-optimize.
Args:
all_samples (list[list[_MAMLEpisodeBatch]]): A two
dimensional list of _MAMLEpisodeBatch of size
[meta_batch_size * (num_grad_updates + 1)]
all_params (list[dict]): A list of named parameter dictionaries.
Each dictionary contains key value pair of names (str) and
parameters (torch.Tensor).
set_grad (bool): Whether to enable gradient calculation or not.
Returns:
torch.Tensor: Calculated mean value of loss.
|
_compute_meta_loss
|
python
|
rlworkgroup/garage
|
src/garage/torch/algos/maml.py
|
https://github.com/rlworkgroup/garage/blob/master/src/garage/torch/algos/maml.py
|
MIT
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.