code
stringlengths 66
870k
| docstring
stringlengths 19
26.7k
| func_name
stringlengths 1
138
| language
stringclasses 1
value | repo
stringlengths 7
68
| path
stringlengths 5
324
| url
stringlengths 46
389
| license
stringclasses 7
values |
---|---|---|---|---|---|---|---|
def get_action(self, observation):
r"""Get a single action given an observation.
Args:
observation (np.ndarray): Observation from the environment.
Shape is :math:`env_spec.observation_space`.
Returns:
tuple:
* np.ndarray: Predicted action. Shape is
:math:`env_spec.action_space`.
* dict:
* np.ndarray[float]: Mean of the distribution
* np.ndarray[float]: Standard deviation of logarithmic
values of the distribution.
"""
if not isinstance(observation, np.ndarray) and not isinstance(
observation, torch.Tensor):
observation = self._env_spec.observation_space.flatten(observation)
elif isinstance(observation,
np.ndarray) and len(observation.shape) > 1:
observation = self._env_spec.observation_space.flatten(observation)
elif isinstance(observation,
torch.Tensor) and len(observation.shape) > 1:
observation = torch.flatten(observation)
with torch.no_grad():
if isinstance(observation, np.ndarray):
observation = np_to_torch(observation)
if not isinstance(observation, torch.Tensor):
observation = list_to_tensor(observation)
observation = observation.unsqueeze(0)
action, agent_infos = self.get_actions(observation)
return action[0], {k: v[0] for k, v in agent_infos.items()}
|
Get a single action given an observation.
Args:
observation (np.ndarray): Observation from the environment.
Shape is :math:`env_spec.observation_space`.
Returns:
tuple:
* np.ndarray: Predicted action. Shape is
:math:`env_spec.action_space`.
* dict:
* np.ndarray[float]: Mean of the distribution
* np.ndarray[float]: Standard deviation of logarithmic
values of the distribution.
|
get_action
|
python
|
rlworkgroup/garage
|
src/garage/torch/policies/stochastic_policy.py
|
https://github.com/rlworkgroup/garage/blob/master/src/garage/torch/policies/stochastic_policy.py
|
MIT
|
def get_actions(self, observations):
r"""Get actions given observations.
Args:
observations (np.ndarray): Observations from the environment.
Shape is :math:`batch_dim \bullet env_spec.observation_space`.
Returns:
tuple:
* np.ndarray: Predicted actions.
:math:`batch_dim \bullet env_spec.action_space`.
* dict:
* np.ndarray[float]: Mean of the distribution.
* np.ndarray[float]: Standard deviation of logarithmic
values of the distribution.
"""
if not isinstance(observations[0], np.ndarray) and not isinstance(
observations[0], torch.Tensor):
observations = self._env_spec.observation_space.flatten_n(
observations)
# frequently users like to pass lists of torch tensors or lists of
# numpy arrays. This handles those conversions.
if isinstance(observations, list):
if isinstance(observations[0], np.ndarray):
observations = np.stack(observations)
elif isinstance(observations[0], torch.Tensor):
observations = torch.stack(observations)
if isinstance(observations[0],
np.ndarray) and len(observations[0].shape) > 1:
observations = self._env_spec.observation_space.flatten_n(
observations)
elif isinstance(observations[0],
torch.Tensor) and len(observations[0].shape) > 1:
observations = torch.flatten(observations, start_dim=1)
with torch.no_grad():
if isinstance(observations, np.ndarray):
observations = np_to_torch(observations)
if not isinstance(observations, torch.Tensor):
observations = list_to_tensor(observations)
if isinstance(self._env_spec.observation_space, akro.Image):
observations /= 255.0 # scale image
dist, info = self.forward(observations)
return dist.sample().cpu().numpy(), {
k: v.detach().cpu().numpy()
for (k, v) in info.items()
}
|
Get actions given observations.
Args:
observations (np.ndarray): Observations from the environment.
Shape is :math:`batch_dim \bullet env_spec.observation_space`.
Returns:
tuple:
* np.ndarray: Predicted actions.
:math:`batch_dim \bullet env_spec.action_space`.
* dict:
* np.ndarray[float]: Mean of the distribution.
* np.ndarray[float]: Standard deviation of logarithmic
values of the distribution.
|
get_actions
|
python
|
rlworkgroup/garage
|
src/garage/torch/policies/stochastic_policy.py
|
https://github.com/rlworkgroup/garage/blob/master/src/garage/torch/policies/stochastic_policy.py
|
MIT
|
def forward(self, observations):
"""Compute the action distributions from the observations.
Args:
observations (torch.Tensor): Batch of observations on default
torch device.
Returns:
torch.distributions.Distribution: Batch distribution of actions.
dict[str, torch.Tensor]: Additional agent_info, as torch Tensors.
Do not need to be detached, and can be on any device.
"""
|
Compute the action distributions from the observations.
Args:
observations (torch.Tensor): Batch of observations on default
torch device.
Returns:
torch.distributions.Distribution: Batch distribution of actions.
dict[str, torch.Tensor]: Additional agent_info, as torch Tensors.
Do not need to be detached, and can be on any device.
|
forward
|
python
|
rlworkgroup/garage
|
src/garage/torch/policies/stochastic_policy.py
|
https://github.com/rlworkgroup/garage/blob/master/src/garage/torch/policies/stochastic_policy.py
|
MIT
|
def forward(self, observations):
"""Compute the action distributions from the observations.
Args:
observations (torch.Tensor): Batch of observations on default
torch device.
Returns:
torch.distributions.Distribution: Batch distribution of actions.
dict[str, torch.Tensor]: Additional agent_info, as torch Tensors
"""
dist = self._module(observations)
ret_mean = dist.mean.cpu()
ret_log_std = (dist.variance.sqrt()).log().cpu()
return dist, dict(mean=ret_mean, log_std=ret_log_std)
|
Compute the action distributions from the observations.
Args:
observations (torch.Tensor): Batch of observations on default
torch device.
Returns:
torch.distributions.Distribution: Batch distribution of actions.
dict[str, torch.Tensor]: Additional agent_info, as torch Tensors
|
forward
|
python
|
rlworkgroup/garage
|
src/garage/torch/policies/tanh_gaussian_mlp_policy.py
|
https://github.com/rlworkgroup/garage/blob/master/src/garage/torch/policies/tanh_gaussian_mlp_policy.py
|
MIT
|
def __init__(self, env_spec, **kwargs):
"""Initialize class with multiple attributes.
Args:
env_spec (EnvSpec): Environment specification.
**kwargs: Keyword arguments.
"""
self._env_spec = env_spec
self._obs_dim = env_spec.observation_space.flat_dim
self._action_dim = env_spec.action_space.flat_dim
MLPModule.__init__(self,
input_dim=self._obs_dim + self._action_dim,
output_dim=1,
**kwargs)
|
Initialize class with multiple attributes.
Args:
env_spec (EnvSpec): Environment specification.
**kwargs: Keyword arguments.
|
__init__
|
python
|
rlworkgroup/garage
|
src/garage/torch/q_functions/continuous_mlp_q_function.py
|
https://github.com/rlworkgroup/garage/blob/master/src/garage/torch/q_functions/continuous_mlp_q_function.py
|
MIT
|
def forward(self, observations):
"""Return Q-value(s).
Args:
observations (np.ndarray): observations of shape :math: `(N, O*)`.
Returns:
torch.Tensor: Output value
"""
# We're given flattened observations.
observations = observations.reshape(
-1, *self._env_spec.observation_space.shape)
return self._cnn_module(observations)
|
Return Q-value(s).
Args:
observations (np.ndarray): observations of shape :math: `(N, O*)`.
Returns:
torch.Tensor: Output value
|
forward
|
python
|
rlworkgroup/garage
|
src/garage/torch/q_functions/discrete_cnn_q_function.py
|
https://github.com/rlworkgroup/garage/blob/master/src/garage/torch/q_functions/discrete_cnn_q_function.py
|
MIT
|
def forward(self, observations):
"""Return Q-value(s).
Args:
observations (np.ndarray): observations of shape :math: `(N, O*)`.
Returns:
torch.Tensor: Output value
"""
# We're given flattened observations.
observations = observations.reshape(
-1, *self._env_spec.observation_space.shape)
out = self._module(observations)
val = self._val(out)
act = self._act(out)
act = act - act.mean(1).unsqueeze(1)
return val + act
|
Return Q-value(s).
Args:
observations (np.ndarray): observations of shape :math: `(N, O*)`.
Returns:
torch.Tensor: Output value
|
forward
|
python
|
rlworkgroup/garage
|
src/garage/torch/q_functions/discrete_dueling_cnn_q_function.py
|
https://github.com/rlworkgroup/garage/blob/master/src/garage/torch/q_functions/discrete_dueling_cnn_q_function.py
|
MIT
|
def compute_loss(self, obs, returns):
r"""Compute mean value of loss.
Args:
obs (torch.Tensor): Observation from the environment
with shape :math:`(N \dot [T], O*)`.
returns (torch.Tensor): Acquired returns with shape :math:`(N, )`.
Returns:
torch.Tensor: Calculated negative mean scalar value of
objective (float).
"""
dist = self.module(obs)
ll = dist.log_prob(returns.reshape(-1, 1))
loss = -ll.mean()
return loss
|
Compute mean value of loss.
Args:
obs (torch.Tensor): Observation from the environment
with shape :math:`(N \dot [T], O*)`.
returns (torch.Tensor): Acquired returns with shape :math:`(N, )`.
Returns:
torch.Tensor: Calculated negative mean scalar value of
objective (float).
|
compute_loss
|
python
|
rlworkgroup/garage
|
src/garage/torch/value_functions/gaussian_mlp_value_function.py
|
https://github.com/rlworkgroup/garage/blob/master/src/garage/torch/value_functions/gaussian_mlp_value_function.py
|
MIT
|
def compute_loss(self, obs, returns):
r"""Compute mean value of loss.
Args:
obs (torch.Tensor): Observation from the environment
with shape :math:`(N \dot [T], O*)`.
returns (torch.Tensor): Acquired returns with shape :math:`(N, )`.
Returns:
torch.Tensor: Calculated negative mean scalar value of
objective (float).
"""
|
Compute mean value of loss.
Args:
obs (torch.Tensor): Observation from the environment
with shape :math:`(N \dot [T], O*)`.
returns (torch.Tensor): Acquired returns with shape :math:`(N, )`.
Returns:
torch.Tensor: Calculated negative mean scalar value of
objective (float).
|
compute_loss
|
python
|
rlworkgroup/garage
|
src/garage/torch/value_functions/value_function.py
|
https://github.com/rlworkgroup/garage/blob/master/src/garage/torch/value_functions/value_function.py
|
MIT
|
def step_env(env, n=10, visualize=True):
"""Step env helper.
Args:
env (Environment): Input environment.
n (int): Steps.
visualize (bool): Whether visualize the environment.
"""
env.reset()
if visualize and issubclass(type(env), Environment):
env.visualize()
for _ in range(n):
print('itr:', _)
es = env.step(env.action_space.sample())
if es.last:
break
|
Step env helper.
Args:
env (Environment): Input environment.
n (int): Steps.
visualize (bool): Whether visualize the environment.
|
step_env
|
python
|
rlworkgroup/garage
|
tests/helpers.py
|
https://github.com/rlworkgroup/garage/blob/master/tests/helpers.py
|
MIT
|
def step_env_with_gym_quirks(env,
spec,
n=10,
visualize=True,
serialize_env=False):
"""Step env helper.
Args:
env (Environment): Input environment.
spec (EnvSpec): The environment specification.
n (int): Steps.
visualize (bool): Whether to visualize the environment.
serialize_env (bool): Whether to serialize the environment.
"""
if serialize_env:
# Roundtrip serialization
round_trip = cloudpickle.loads(cloudpickle.dumps(env))
assert round_trip.spec == env.spec
env = round_trip
env.reset()
for _ in range(n):
es = env.step(env.action_space.sample())
if visualize:
if spec.id not in KNOWN_GYM_RENDER_NOT_IMPLEMENTED:
env.visualize()
else:
with pytest.raises(NotImplementedError):
env.visualize()
if es.last:
break
env.close()
|
Step env helper.
Args:
env (Environment): Input environment.
spec (EnvSpec): The environment specification.
n (int): Steps.
visualize (bool): Whether to visualize the environment.
serialize_env (bool): Whether to serialize the environment.
|
step_env_with_gym_quirks
|
python
|
rlworkgroup/garage
|
tests/helpers.py
|
https://github.com/rlworkgroup/garage/blob/master/tests/helpers.py
|
MIT
|
def convolve(_input, filter_weights, filter_bias, strides, filters,
in_channels, hidden_nonlinearity):
"""Helper function for performing convolution.
Args:
_input (tf.Tensor): Input tf.Tensor to the CNN.
filter_weights (tuple(tf.Tensor)): The weights of the filters.
filter_bias (tuple(tf.Tensor)): The bias of the filters.
strides (tuple[int]): The stride of the sliding window. For example,
(1, 2) means there are two convolutional layers. The stride of the
filter for first layer is 1 and that of the second layer is 2.
filters (Tuple[Tuple[int, Tuple[int, int]], ...]): Number and dimension
of filters. For example, ((3, (3, 5)), (32, (3, 3))) means there
are two convolutional layers. The filter for the first layer have 3
channels and its shape is (3 x 5), while the filter for the second
layer have 32 channels and its shape is (3 x 3).
in_channels (tuple[int]): The number of input channels.
hidden_nonlinearity (callable): Activation function for intermediate
dense layer(s). It should return a tf.Tensor. Set it to
None to maintain a linear activation.
Return:
tf.Tensor: The output of the convolution.
"""
batch_size = _input.shape[0]
in_width = _input.shape[1]
in_height = _input.shape[2]
for filter_iter, in_shape, filter_weight, _filter_bias, stride in zip(
filters, in_channels, filter_weights, filter_bias, strides):
filter_width = filter_iter[1][1]
filter_height = filter_iter[1][0]
out_width = int((in_width - filter_width) / stride) + 1
out_height = int((in_height - filter_height) / stride) + 1
flatten_filter_size = filter_width * filter_height * in_shape
reshape_filter = filter_weight.reshape(flatten_filter_size, -1)
image_vector = np.empty(
(batch_size, out_width, out_height, flatten_filter_size))
for batch in range(batch_size):
for w in range(out_width):
for h in range(out_height):
image_vector[batch][w][h] = _construct_image_vector(
_input, batch, w, h, filter_width, filter_height,
in_shape)
_input = np.dot(image_vector, reshape_filter) + _filter_bias
_input = hidden_nonlinearity(_input).eval()
in_width = out_width
in_height = out_height
return _input
|
Helper function for performing convolution.
Args:
_input (tf.Tensor): Input tf.Tensor to the CNN.
filter_weights (tuple(tf.Tensor)): The weights of the filters.
filter_bias (tuple(tf.Tensor)): The bias of the filters.
strides (tuple[int]): The stride of the sliding window. For example,
(1, 2) means there are two convolutional layers. The stride of the
filter for first layer is 1 and that of the second layer is 2.
filters (Tuple[Tuple[int, Tuple[int, int]], ...]): Number and dimension
of filters. For example, ((3, (3, 5)), (32, (3, 3))) means there
are two convolutional layers. The filter for the first layer have 3
channels and its shape is (3 x 5), while the filter for the second
layer have 32 channels and its shape is (3 x 3).
in_channels (tuple[int]): The number of input channels.
hidden_nonlinearity (callable): Activation function for intermediate
dense layer(s). It should return a tf.Tensor. Set it to
None to maintain a linear activation.
Return:
tf.Tensor: The output of the convolution.
|
convolve
|
python
|
rlworkgroup/garage
|
tests/helpers.py
|
https://github.com/rlworkgroup/garage/blob/master/tests/helpers.py
|
MIT
|
def recurrent_step_lstm(input_val,
num_units,
step_hidden,
step_cell,
w_x_init,
w_h_init,
b_init,
nonlinearity,
gate_nonlinearity,
forget_bias=1.0):
"""Helper function for performing feedforward of a lstm cell.
Args:
input_val (tf.Tensor): Input placeholder.
num_units (int): Hidden dimension for LSTM cell.
step_hidden (tf.Tensor): Place holder for step hidden state.
step_cell (tf.Tensor): Place holder for step cell state.
nonlinearity (callable): Activation function for intermediate
dense layer(s). It should return a tf.Tensor. Set it to
None to maintain a linear activation.
w_x_init (callable): Initializer function for the weight
of intermediate dense layer(s). The function should return a
tf.Tensor.
b_init (callable): Initializer function for the bias
of intermediate dense layer(s). The function should return a
tf.Tensor.
gate_nonlinearity (callable): Activation function for recurrent
layers. It should return a tf.Tensor. Set it to None to
maintain a linear activation.
w_h_init (callable): Initializer function for the weight
of recurrent layer(s). The function should return a
tf.Tensor.
forget_bias (float): Bias to be added to the forget gate at
initialization. It's used to reduce the scale of forgetting at the
beginning of the training.
Returns:
tf.Tensor: Final hidden state after feedforward.
tf.Tensor: Final cell state after feedforward.
Note:
Incoming gate: i(t) = f_i(x(t) @ W_xi + h(t-1) @ W_hi +
w_ci * c(t-1) + b_i)
Forget gate: f(t) = f_f(x(t) @ W_xf + h(t-1) @ W_hf +
w_cf * c(t-1) + b_f)
Cell gate: c(t) = f(t) * c(t - 1) + i(t) * f_c(x(t) @ W_xc +
h(t-1) @ W_hc + b_c)
Out gate: o(t) = f_o(x(t) @ W_xo + h(t-1) W_ho +
w_co * c(t) + b_o)
New hidden state: h(t) = o(t) * f_h(c(t))
Incoming, forget, cell, and out vectors must have the same
dimension as the hidden state.
"""
def f(x):
"""Linear function.
Args:
x (float): Input variable.
Returns:
float: Ouput variable.
"""
return x
if nonlinearity is None:
nonlinearity = f
if gate_nonlinearity is None:
gate_nonlinearity = f
input_dim = np.prod(input_val.shape[1:])
# Weights for the input gate
w_xi = np.full((input_dim, num_units), w_x_init)
w_hi = np.full((num_units, num_units), w_h_init)
b_i = np.full((num_units, ), b_init)
# Weights for the forget gate
w_xf = np.full((input_dim, num_units), w_x_init)
w_hf = np.full((num_units, num_units), w_h_init)
b_f = np.full((num_units, ), b_init)
# Weights for the cell gate
w_xc = np.full((input_dim, num_units), w_x_init)
w_hc = np.full((num_units, num_units), w_h_init)
b_c = np.full((num_units, ), b_init)
# Weights for the out gate
w_xo = np.full((input_dim, num_units), w_x_init)
w_ho = np.full((num_units, num_units), w_h_init)
b_o = np.full((num_units, ), b_init)
w_x_ifco = np.concatenate([w_xi, w_xf, w_xc, w_xo], axis=1)
w_h_ifco = np.concatenate([w_hi, w_hf, w_hc, w_ho], axis=1)
x_ifco = np.matmul(input_val, w_x_ifco)
h_ifco = np.matmul(step_hidden, w_h_ifco)
x_i = x_ifco[:, :num_units]
x_f = x_ifco[:, num_units:num_units * 2]
x_c = x_ifco[:, num_units * 2:num_units * 3]
x_o = x_ifco[:, num_units * 3:num_units * 4]
h_i = h_ifco[:, :num_units]
h_f = h_ifco[:, num_units:num_units * 2]
h_c = h_ifco[:, num_units * 2:num_units * 3]
h_o = h_ifco[:, num_units * 3:num_units * 4]
i = gate_nonlinearity(x_i + h_i + b_i)
f = gate_nonlinearity(x_f + h_f + b_f + forget_bias)
o = gate_nonlinearity(x_o + h_o + b_o)
c = f * step_cell + i * nonlinearity(x_c + h_c + b_c)
h = o * nonlinearity(c)
return h, c
|
Helper function for performing feedforward of a lstm cell.
Args:
input_val (tf.Tensor): Input placeholder.
num_units (int): Hidden dimension for LSTM cell.
step_hidden (tf.Tensor): Place holder for step hidden state.
step_cell (tf.Tensor): Place holder for step cell state.
nonlinearity (callable): Activation function for intermediate
dense layer(s). It should return a tf.Tensor. Set it to
None to maintain a linear activation.
w_x_init (callable): Initializer function for the weight
of intermediate dense layer(s). The function should return a
tf.Tensor.
b_init (callable): Initializer function for the bias
of intermediate dense layer(s). The function should return a
tf.Tensor.
gate_nonlinearity (callable): Activation function for recurrent
layers. It should return a tf.Tensor. Set it to None to
maintain a linear activation.
w_h_init (callable): Initializer function for the weight
of recurrent layer(s). The function should return a
tf.Tensor.
forget_bias (float): Bias to be added to the forget gate at
initialization. It's used to reduce the scale of forgetting at the
beginning of the training.
Returns:
tf.Tensor: Final hidden state after feedforward.
tf.Tensor: Final cell state after feedforward.
Note:
Incoming gate: i(t) = f_i(x(t) @ W_xi + h(t-1) @ W_hi +
w_ci * c(t-1) + b_i)
Forget gate: f(t) = f_f(x(t) @ W_xf + h(t-1) @ W_hf +
w_cf * c(t-1) + b_f)
Cell gate: c(t) = f(t) * c(t - 1) + i(t) * f_c(x(t) @ W_xc +
h(t-1) @ W_hc + b_c)
Out gate: o(t) = f_o(x(t) @ W_xo + h(t-1) W_ho +
w_co * c(t) + b_o)
New hidden state: h(t) = o(t) * f_h(c(t))
Incoming, forget, cell, and out vectors must have the same
dimension as the hidden state.
|
recurrent_step_lstm
|
python
|
rlworkgroup/garage
|
tests/helpers.py
|
https://github.com/rlworkgroup/garage/blob/master/tests/helpers.py
|
MIT
|
def recurrent_step_gru(input_val,
num_units,
step_hidden,
w_x_init,
w_h_init,
b_init,
nonlinearity,
gate_nonlinearity,
forget_bias=1.0):
"""Helper function for performing feedforward of a GRU cell.
Args:
input_val (tf.Tensor): Input placeholder.
num_units (int): Hidden dimension for GRU cell.
step_hidden (tf.Tensor): Place holder for step hidden state.
nonlinearity (callable): Activation function for intermediate
dense layer(s). It should return a tf.Tensor. Set it to
None to maintain a linear activation.
w_x_init (callable): Initializer function for the weight
of intermediate dense layer(s). The function should return a
tf.Tensor.
b_init (callable): Initializer function for the bias
of intermediate dense layer(s). The function should return a
tf.Tensor.
gate_nonlinearity (callable): Activation function for recurrent
layers. It should return a tf.Tensor. Set it to None to
maintain a linear activation.
w_h_init (callable): Initializer function for the weight
of recurrent layer(s). The function should return a
tf.Tensor.
forget_bias (float): Bias to be added to the forget gate at
initialization. It's used to reduce the scale of forgetting at the
beginning of the training.
Returns:
tf.Tensor: Final hidden state after feedforward.
Note:
Reset gate: r(t) = f_r(x(t) @ W_xr + h(t-1) @ W_hr + b_r)
Update gate: u(t) = f_u(x(t) @ W_xu + h(t-1) @ W_hu + b_u)
Cell gate: c(t) = f_c(x(t) @ W_xc + r(t) *
(h(t-1) @ W_hc) + b_c)
New hidden state: h(t) = u_t * h(t-1) + (1 - u(t)) * c(t)
The reset, update, and cell vectors must have the same dimension
as the hidden state.
"""
def f(x):
"""Linear function.
Args:
x (float): Input variable.
Returns:
float: Ouput variable.
"""
return x
del forget_bias
if nonlinearity is None:
nonlinearity = f
if gate_nonlinearity is None:
gate_nonlinearity = f
input_dim = np.prod(input_val.shape[1:])
# Weights for the update gate
w_xz = np.full((input_dim, num_units), w_x_init)
w_hz = np.full((num_units, num_units), w_h_init)
b_z = np.full((num_units, ), b_init)
# Weights for the reset gate
w_xr = np.full((input_dim, num_units), w_x_init)
w_hr = np.full((num_units, num_units), w_h_init)
b_r = np.full((num_units, ), b_init)
# Weights for the hidden gate
w_xh = np.full((input_dim, num_units), w_x_init)
w_hh = np.full((num_units, num_units), w_h_init)
b_h = np.full((num_units, ), b_init)
w_x_zrh = np.concatenate([w_xz, w_xr, w_xh], axis=1)
w_h_zrh = np.concatenate([w_hz, w_hr, w_hh], axis=1)
x_zrh = np.matmul(input_val, w_x_zrh)
h_zrh = np.matmul(step_hidden, w_h_zrh)
x_z = x_zrh[:, :num_units]
x_r = x_zrh[:, num_units:num_units * 2]
x_h = x_zrh[:, num_units * 2:num_units * 3]
h_z = h_zrh[:, :num_units]
h_r = h_zrh[:, num_units:num_units * 2]
h_h = h_zrh[:, num_units * 2:num_units * 3]
z = gate_nonlinearity(x_z + h_z + b_z)
r = gate_nonlinearity(x_r + h_r + b_r)
hh = nonlinearity(x_h + r * h_h + b_h)
h = z * step_hidden + (1 - z) * hh
return h
|
Helper function for performing feedforward of a GRU cell.
Args:
input_val (tf.Tensor): Input placeholder.
num_units (int): Hidden dimension for GRU cell.
step_hidden (tf.Tensor): Place holder for step hidden state.
nonlinearity (callable): Activation function for intermediate
dense layer(s). It should return a tf.Tensor. Set it to
None to maintain a linear activation.
w_x_init (callable): Initializer function for the weight
of intermediate dense layer(s). The function should return a
tf.Tensor.
b_init (callable): Initializer function for the bias
of intermediate dense layer(s). The function should return a
tf.Tensor.
gate_nonlinearity (callable): Activation function for recurrent
layers. It should return a tf.Tensor. Set it to None to
maintain a linear activation.
w_h_init (callable): Initializer function for the weight
of recurrent layer(s). The function should return a
tf.Tensor.
forget_bias (float): Bias to be added to the forget gate at
initialization. It's used to reduce the scale of forgetting at the
beginning of the training.
Returns:
tf.Tensor: Final hidden state after feedforward.
Note:
Reset gate: r(t) = f_r(x(t) @ W_xr + h(t-1) @ W_hr + b_r)
Update gate: u(t) = f_u(x(t) @ W_xu + h(t-1) @ W_hu + b_u)
Cell gate: c(t) = f_c(x(t) @ W_xc + r(t) *
(h(t-1) @ W_hc) + b_c)
New hidden state: h(t) = u_t * h(t-1) + (1 - u(t)) * c(t)
The reset, update, and cell vectors must have the same dimension
as the hidden state.
|
recurrent_step_gru
|
python
|
rlworkgroup/garage
|
tests/helpers.py
|
https://github.com/rlworkgroup/garage/blob/master/tests/helpers.py
|
MIT
|
def _construct_image_vector(_input, batch, w, h, filter_width, filter_height,
in_shape):
"""Get sliding window of input image.
Args:
_input (tf.Tensor): Input tf.Tensor to the CNN.
batch (int): Batch index.
w (int): Width index.
h (int): Height index.
filter_width (int): Width of the filter.
filter_height (int): Height of the filter.
in_shape (int): The number of input channels.
Return:
np.array: The output of the sliding window.
"""
sw = np.empty((filter_width, filter_height, in_shape))
for dw in range(filter_width):
for dh in range(filter_height):
for in_c in range(in_shape):
sw[dw][dh][in_c] = _input[batch][w + dw][h + dh][in_c]
return sw.flatten()
|
Get sliding window of input image.
Args:
_input (tf.Tensor): Input tf.Tensor to the CNN.
batch (int): Batch index.
w (int): Width index.
h (int): Height index.
filter_width (int): Width of the filter.
filter_height (int): Height of the filter.
in_shape (int): The number of input channels.
Return:
np.array: The output of the sliding window.
|
_construct_image_vector
|
python
|
rlworkgroup/garage
|
tests/helpers.py
|
https://github.com/rlworkgroup/garage/blob/master/tests/helpers.py
|
MIT
|
def max_pooling(_input, pool_shape, pool_stride, padding='VALID'):
"""Helper function for performing max pooling.
Args:
_input (tf.Tensor): Input tf.Tensor to the CNN.
pool_shape (int): Dimension of the pooling layer.
pool_stride (int): The stride of the pooling layer.
padding (str): The type of padding algorithm to use, either 'SAME'
or 'VALID'.
Return:
tf.Tensor: The output tf.Tensor after max pooling.
"""
batch_size = _input.shape[0]
if padding == 'VALID':
height_size = int((_input.shape[1] - pool_shape) / pool_stride) + 1
width_size = int((_input.shape[2] - pool_shape) / pool_stride) + 1
else:
height_size = int((_input.shape[1] + pool_stride - 1) / pool_stride)
width_size = int((_input.shape[2] + pool_stride - 1) / pool_stride)
# max pooling
results = np.empty((batch_size, height_size, width_size, _input.shape[3]))
for b in range(batch_size):
for i in range(0, results.shape[1]):
for j in range(0, results.shape[2]):
for k in range(_input.shape[3]):
row = i * pool_shape
col = j * pool_shape
results[b][i][j][k] = np.max(_input[b,
row:row + pool_shape,
col:col + # noqa: W504
pool_shape, k])
return results
|
Helper function for performing max pooling.
Args:
_input (tf.Tensor): Input tf.Tensor to the CNN.
pool_shape (int): Dimension of the pooling layer.
pool_stride (int): The stride of the pooling layer.
padding (str): The type of padding algorithm to use, either 'SAME'
or 'VALID'.
Return:
tf.Tensor: The output tf.Tensor after max pooling.
|
max_pooling
|
python
|
rlworkgroup/garage
|
tests/helpers.py
|
https://github.com/rlworkgroup/garage/blob/master/tests/helpers.py
|
MIT
|
def __init__(self, env=None, env_name='', max_episode_length=100):
"""Create an AutoStepEnv.
Args:
env (gym.Env): Environment to be wrapped.
env_name (str): Name of the environment.
max_episode_length (int): Maximum length of the episode.
"""
if env_name:
super().__init__(gym.make(env_name))
else:
super().__init__(env)
self._episode_step = 0
self._max_episode_length = max_episode_length
|
Create an AutoStepEnv.
Args:
env (gym.Env): Environment to be wrapped.
env_name (str): Name of the environment.
max_episode_length (int): Maximum length of the episode.
|
__init__
|
python
|
rlworkgroup/garage
|
tests/wrappers.py
|
https://github.com/rlworkgroup/garage/blob/master/tests/wrappers.py
|
MIT
|
def step(self, action):
"""Step the wrapped environment.
Args:
action (np.ndarray): the action.
Returns:
np.ndarray: Next observation
float: Reward
bool: Termination signal
dict: Environment information
"""
self._episode_step += 1
next_obs, reward, done, info = self.env.step(action)
if self._episode_step == self._max_episode_length:
done = True
self._episode_step = 0
return next_obs, reward, done, info
|
Step the wrapped environment.
Args:
action (np.ndarray): the action.
Returns:
np.ndarray: Next observation
float: Reward
bool: Termination signal
dict: Environment information
|
step
|
python
|
rlworkgroup/garage
|
tests/wrappers.py
|
https://github.com/rlworkgroup/garage/blob/master/tests/wrappers.py
|
MIT
|
def setup_method(self):
"""Setup the Session and default Graph."""
self.graph = tf.Graph()
for c in self.graph.collections:
self.graph.clear_collection(c)
self.graph_manager = self.graph.as_default()
self.graph_manager.__enter__()
self.sess = tf.compat.v1.Session(graph=self.graph)
self.sess_manager = self.sess.as_default()
self.sess_manager.__enter__()
self.sess.__enter__()
logger.add_output(NullOutput())
deterministic.set_seed(1)
|
Setup the Session and default Graph.
|
setup_method
|
python
|
rlworkgroup/garage
|
tests/fixtures/fixtures.py
|
https://github.com/rlworkgroup/garage/blob/master/tests/fixtures/fixtures.py
|
MIT
|
def teardown_method(self):
"""Teardown the Session and default Graph."""
logger.remove_all()
self.sess.__exit__(None, None, None)
self.sess_manager.__exit__(None, None, None)
self.graph_manager.__exit__(None, None, None)
self.sess.close()
# These del are crucial to prevent ENOMEM in the CI
# b/c TensorFlow does not release memory explicitly
del self.graph
del self.sess
gc.collect()
|
Teardown the Session and default Graph.
|
teardown_method
|
python
|
rlworkgroup/garage
|
tests/fixtures/fixtures.py
|
https://github.com/rlworkgroup/garage/blob/master/tests/fixtures/fixtures.py
|
MIT
|
def train(self, trainer):
"""Obtain samplers and start actual training for each epoch.
See garage.np.algos.RLAlgorithm train().
Args:
trainer (Trainer): Trainer is passed to give algorithm
the access to trainer.step_epochs(), which provides services
such as snapshotting and sampler control.
"""
|
Obtain samplers and start actual training for each epoch.
See garage.np.algos.RLAlgorithm train().
Args:
trainer (Trainer): Trainer is passed to give algorithm
the access to trainer.step_epochs(), which provides services
such as snapshotting and sampler control.
|
train
|
python
|
rlworkgroup/garage
|
tests/fixtures/algos/dummy_algo.py
|
https://github.com/rlworkgroup/garage/blob/master/tests/fixtures/algos/dummy_algo.py
|
MIT
|
def init_opt(self):
"""Initialize the optimization procedure.
If using tensorflow, this may include declaring all the variables and
compiling functions.
"""
|
Initialize the optimization procedure.
If using tensorflow, this may include declaring all the variables and
compiling functions.
|
init_opt
|
python
|
rlworkgroup/garage
|
tests/fixtures/algos/dummy_tf_algo.py
|
https://github.com/rlworkgroup/garage/blob/master/tests/fixtures/algos/dummy_tf_algo.py
|
MIT
|
def optimize_policy(self, samples_data):
"""Optimize the policy using the samples.
Args:
samples_data (dict): Processed sample data.
"""
|
Optimize the policy using the samples.
Args:
samples_data (dict): Processed sample data.
|
optimize_policy
|
python
|
rlworkgroup/garage
|
tests/fixtures/algos/dummy_tf_algo.py
|
https://github.com/rlworkgroup/garage/blob/master/tests/fixtures/algos/dummy_tf_algo.py
|
MIT
|
def render(self, mode='human'):
"""Render.
Args:
mode (str): Render mode.
"""
|
Render.
Args:
mode (str): Render mode.
|
render
|
python
|
rlworkgroup/garage
|
tests/fixtures/envs/dummy/base.py
|
https://github.com/rlworkgroup/garage/blob/master/tests/fixtures/envs/dummy/base.py
|
MIT
|
def action_space(self):
"""Return an action space.
Returns:
gym.spaces: The action space of the environment.
"""
return akro.Box(low=-5.0,
high=5.0,
shape=self._action_dim,
dtype=np.float32)
|
Return an action space.
Returns:
gym.spaces: The action space of the environment.
|
action_space
|
python
|
rlworkgroup/garage
|
tests/fixtures/envs/dummy/dummy_box_env.py
|
https://github.com/rlworkgroup/garage/blob/master/tests/fixtures/envs/dummy/dummy_box_env.py
|
MIT
|
def observation_space(self):
"""Return the observation space.
Returns:
akro.Dict: Observation space.
"""
if self.obs_space_type == 'box':
return gym.spaces.Dict({
'achieved_goal':
gym.spaces.Box(low=-200.,
high=200.,
shape=(3, ),
dtype=np.float32),
'desired_goal':
gym.spaces.Box(low=-200.,
high=200.,
shape=(3, ),
dtype=np.float32),
'observation':
gym.spaces.Box(low=-200.,
high=200.,
shape=(25, ),
dtype=np.float32)
})
elif self.obs_space_type == 'image':
return gym.spaces.Dict({
'dummy':
gym.spaces.Box(low=0,
high=255,
shape=(100, 100, 3),
dtype=np.uint8),
})
else:
return gym.spaces.Dict({'dummy': gym.spaces.Discrete(5)})
|
Return the observation space.
Returns:
akro.Dict: Observation space.
|
observation_space
|
python
|
rlworkgroup/garage
|
tests/fixtures/envs/dummy/dummy_dict_env.py
|
https://github.com/rlworkgroup/garage/blob/master/tests/fixtures/envs/dummy/dummy_dict_env.py
|
MIT
|
def action_space(self):
"""Return the action space.
Returns:
akro.Box: Action space.
"""
if self.act_space_type == 'box':
return akro.Box(low=-5.0, high=5.0, shape=(1, ), dtype=np.float32)
else:
return akro.Discrete(5)
|
Return the action space.
Returns:
akro.Box: Action space.
|
action_space
|
python
|
rlworkgroup/garage
|
tests/fixtures/envs/dummy/dummy_dict_env.py
|
https://github.com/rlworkgroup/garage/blob/master/tests/fixtures/envs/dummy/dummy_dict_env.py
|
MIT
|
def compute_reward(self, achieved_goal, goal, info):
"""Function to compute new reward.
Args:
achieved_goal (numpy.ndarray): Achieved goal.
goal (numpy.ndarray): Original desired goal.
info (dict): Extra information.
Returns:
float: New computed reward.
"""
del info
return np.sum(achieved_goal - goal)
|
Function to compute new reward.
Args:
achieved_goal (numpy.ndarray): Achieved goal.
goal (numpy.ndarray): Original desired goal.
info (dict): Extra information.
Returns:
float: New computed reward.
|
compute_reward
|
python
|
rlworkgroup/garage
|
tests/fixtures/envs/dummy/dummy_dict_env.py
|
https://github.com/rlworkgroup/garage/blob/master/tests/fixtures/envs/dummy/dummy_dict_env.py
|
MIT
|
def reset(self):
"""Reset the environment.
Returns:
np.ndarray: Environment state.
"""
self.state = np.ones(self._obs_dim, dtype=np.uint8)
self._lives = 5
self.step_called = 0
return self.state
|
Reset the environment.
Returns:
np.ndarray: Environment state.
|
reset
|
python
|
rlworkgroup/garage
|
tests/fixtures/envs/dummy/dummy_discrete_pixel_env.py
|
https://github.com/rlworkgroup/garage/blob/master/tests/fixtures/envs/dummy/dummy_discrete_pixel_env.py
|
MIT
|
def step(self, action):
"""Step the environment.
Before gym fixed overflow issue for sample() in
np.uint8 environment, we will handle the sampling here.
We need high=256 since np.random.uniform sample from [low, high)
(includes low, but excludes high).
Args:
action (int): Action.
Returns:
np.ndarray: observation.
float: reward.
bool: terminal signal.
dict: extra environment info.
Raises:
RuntimeError: step when empty lives left.
"""
done = False
if self.state is not None:
# Simulating FIRE action
if action == 1:
if self._prev_action == 2:
done = True
obs = np.full(self._obs_dim, 2, dtype=np.uint8)
else:
if self.random:
obs = np.random.uniform(low=0,
high=256,
size=self._obs_dim).astype(
np.uint8)
else:
obs = self.state + action
if self._lives == 0:
raise RuntimeError('DummyEnv: Cannot step when lives = 0!')
self._lives -= 1
if self._lives == 0:
done = True
else:
raise RuntimeError(
'DummyEnv: reset() must be called before step()!')
self.step_called += 1
self._prev_action = action
return obs, 0, done, {'ale.lives': self._lives}
|
Step the environment.
Before gym fixed overflow issue for sample() in
np.uint8 environment, we will handle the sampling here.
We need high=256 since np.random.uniform sample from [low, high)
(includes low, but excludes high).
Args:
action (int): Action.
Returns:
np.ndarray: observation.
float: reward.
bool: terminal signal.
dict: extra environment info.
Raises:
RuntimeError: step when empty lives left.
|
step
|
python
|
rlworkgroup/garage
|
tests/fixtures/envs/dummy/dummy_discrete_pixel_env.py
|
https://github.com/rlworkgroup/garage/blob/master/tests/fixtures/envs/dummy/dummy_discrete_pixel_env.py
|
MIT
|
def __init__(self, frames):
"""
LazyFrames class from baselines.
Openai baselines use this class for FrameStack environment
wrapper. It is used for testing garage.envs.wrappers.AtariEnv.
garge.envs.wrapper.AtariEnv is used when algorithms are trained
using baselines wrappers, e.g. during benchmarking.
"""
self._frames = frames
self._out = None
|
LazyFrames class from baselines.
Openai baselines use this class for FrameStack environment
wrapper. It is used for testing garage.envs.wrappers.AtariEnv.
garge.envs.wrapper.AtariEnv is used when algorithms are trained
using baselines wrappers, e.g. during benchmarking.
|
__init__
|
python
|
rlworkgroup/garage
|
tests/fixtures/envs/dummy/dummy_discrete_pixel_env_baselines.py
|
https://github.com/rlworkgroup/garage/blob/master/tests/fixtures/envs/dummy/dummy_discrete_pixel_env_baselines.py
|
MIT
|
def fixture_exp(snapshot_config, sess):
"""Dummy fixture experiment function.
Args:
snapshot_config (garage.experiment.SnapshotConfig): The snapshot
configuration used by Trainer to create the snapshotter.
If None, it will create one with default settings.
sess (tf.Session): An optional TensorFlow session.
A new session will be created immediately if not provided.
Returns:
np.ndarray: Values of the parameters evaluated in
the current session
"""
with TFTrainer(snapshot_config=snapshot_config, sess=sess) as trainer:
env = GymEnv('CartPole-v1', max_episode_length=100)
policy = CategoricalMLPPolicy(name='policy',
env_spec=env.spec,
hidden_sizes=(8, 8))
baseline = LinearFeatureBaseline(env_spec=env.spec)
sampler = LocalSampler(agents=policy,
envs=env,
max_episode_length=env.spec.max_episode_length)
algo = VPG(env_spec=env.spec,
policy=policy,
baseline=baseline,
sampler=sampler,
discount=0.99,
optimizer_args=dict(learning_rate=0.01, ))
trainer.setup(algo, env)
trainer.train(n_epochs=5, batch_size=100)
return policy.get_param_values()
|
Dummy fixture experiment function.
Args:
snapshot_config (garage.experiment.SnapshotConfig): The snapshot
configuration used by Trainer to create the snapshotter.
If None, it will create one with default settings.
sess (tf.Session): An optional TensorFlow session.
A new session will be created immediately if not provided.
Returns:
np.ndarray: Values of the parameters evaluated in
the current session
|
fixture_exp
|
python
|
rlworkgroup/garage
|
tests/fixtures/experiment/fixture_experiment.py
|
https://github.com/rlworkgroup/garage/blob/master/tests/fixtures/experiment/fixture_experiment.py
|
MIT
|
def network_output_spec(self):
"""Network output spec.
Returns:
list[str]: Name of the model outputs, in order.
"""
return [
'all_output', 'step_output', 'step_hidden', 'init_hidden', 'dist'
]
|
Network output spec.
Returns:
list[str]: Name of the model outputs, in order.
|
network_output_spec
|
python
|
rlworkgroup/garage
|
tests/fixtures/models/simple_categorical_gru_model.py
|
https://github.com/rlworkgroup/garage/blob/master/tests/fixtures/models/simple_categorical_gru_model.py
|
MIT
|
def _build(self, obs_input, step_obs_input, step_hidden, name=None):
"""Build model.
Args:
obs_input (tf.Tensor): Entire time-series observation input.
step_obs_input (tf.Tensor): Single timestep observation input.
step_hidden (tf.Tensor): Hidden state for step.
name (str): Name of the model, also the name scope.
Returns:
tf.Tensor: Entire time-series outputs.
tf.Tensor: Step output.
tf.Tensor: Step hidden state.
tf.Tensor: Initial hidden state.
tfp.distributions.OneHotCategorical: Distribution.
"""
outputs, output, step_hidden, hidden_init_var = super()._build(
obs_input, step_obs_input, step_hidden, name)
dist = tfp.distributions.OneHotCategorical(outputs)
return outputs, output, step_hidden, hidden_init_var, dist
|
Build model.
Args:
obs_input (tf.Tensor): Entire time-series observation input.
step_obs_input (tf.Tensor): Single timestep observation input.
step_hidden (tf.Tensor): Hidden state for step.
name (str): Name of the model, also the name scope.
Returns:
tf.Tensor: Entire time-series outputs.
tf.Tensor: Step output.
tf.Tensor: Step hidden state.
tf.Tensor: Initial hidden state.
tfp.distributions.OneHotCategorical: Distribution.
|
_build
|
python
|
rlworkgroup/garage
|
tests/fixtures/models/simple_categorical_gru_model.py
|
https://github.com/rlworkgroup/garage/blob/master/tests/fixtures/models/simple_categorical_gru_model.py
|
MIT
|
def network_output_spec(self):
"""Network output spec.
Returns:
list[str]: Name of the model outputs, in order.
"""
return [
'all_output', 'step_output', 'step_hidden', 'step_cell',
'init_hidden', 'init_cell', 'dist'
]
|
Network output spec.
Returns:
list[str]: Name of the model outputs, in order.
|
network_output_spec
|
python
|
rlworkgroup/garage
|
tests/fixtures/models/simple_categorical_lstm_model.py
|
https://github.com/rlworkgroup/garage/blob/master/tests/fixtures/models/simple_categorical_lstm_model.py
|
MIT
|
def _build(self,
obs_input,
step_obs_input,
step_hidden,
step_cell,
name=None):
"""Build model.
Args:
obs_input (tf.Tensor): Entire time-series observation input.
step_obs_input (tf.Tensor): Single timestep observation input.
step_hidden (tf.Tensor): Hidden state for step.
step_cell (tf.Tensor): Cell state for step.
name (str): Name of the model, also the name scope.
Returns:
tf.Tensor: Entire time-series outputs.
tf.Tensor: Step output.
tf.Tensor: Step hidden state.
tf.Tensor: Step cell state.
tf.Tensor: Initial hidden state.
tf.Tensor: Initial cell state.
tfp.distributions.OneHotCategorical: Distribution.
"""
(outputs, output, step_hidden, step_cell, hidden_init_var,
cell_init_var) = super()._build(obs_input, step_obs_input,
step_hidden, step_cell, name)
dist = tfp.distributions.OneHotCategorical(outputs)
return (outputs, output, step_hidden, step_cell, hidden_init_var,
cell_init_var, dist)
|
Build model.
Args:
obs_input (tf.Tensor): Entire time-series observation input.
step_obs_input (tf.Tensor): Single timestep observation input.
step_hidden (tf.Tensor): Hidden state for step.
step_cell (tf.Tensor): Cell state for step.
name (str): Name of the model, also the name scope.
Returns:
tf.Tensor: Entire time-series outputs.
tf.Tensor: Step output.
tf.Tensor: Step hidden state.
tf.Tensor: Step cell state.
tf.Tensor: Initial hidden state.
tf.Tensor: Initial cell state.
tfp.distributions.OneHotCategorical: Distribution.
|
_build
|
python
|
rlworkgroup/garage
|
tests/fixtures/models/simple_categorical_lstm_model.py
|
https://github.com/rlworkgroup/garage/blob/master/tests/fixtures/models/simple_categorical_lstm_model.py
|
MIT
|
def _build(self, obs_input, name=None):
"""Build model.
Args:
obs_input (tf.Tensor): Observation inputs.
name (str): Name of the model, also the name scope.
Returns:
tf.Tensor: Network outputs.
tfp.distributions.OneHotCategorical: Distribution.
"""
prob = super()._build(obs_input, name)
dist = tfp.distributions.OneHotCategorical(prob)
return prob, dist
|
Build model.
Args:
obs_input (tf.Tensor): Observation inputs.
name (str): Name of the model, also the name scope.
Returns:
tf.Tensor: Network outputs.
tfp.distributions.OneHotCategorical: Distribution.
|
_build
|
python
|
rlworkgroup/garage
|
tests/fixtures/models/simple_categorical_mlp_model.py
|
https://github.com/rlworkgroup/garage/blob/master/tests/fixtures/models/simple_categorical_mlp_model.py
|
MIT
|
def _build(self, obs_input, name=None):
"""Build model given input placeholder(s).
Args:
obs_input (tf.Tensor): Tensor input for state.
name (str): Inner model name, also the variable scope of the
inner model, if exist. One example is
garage.tf.models.Sequential.
Return:
tf.Tensor: Tensor output of the model.
"""
del name
height_size = self.input_dim[0]
width_size = self.input_dim[1]
for filter_iter, stride in zip(self.filters, self.strides):
if self.padding == 'SAME':
height_size = int((height_size + stride - 1) / stride)
width_size = int((width_size + stride - 1) / stride)
else:
height_size = int(
(height_size - filter_iter[1][0]) / stride) + 1
width_size = int((width_size - filter_iter[1][1]) / stride) + 1
flatten_shape = height_size * width_size * self.filters[-1][0]
return_var = tf.compat.v1.get_variable(
'return_var', (), initializer=tf.constant_initializer(0.5))
return tf.fill((tf.shape(obs_input)[0], flatten_shape), return_var)
|
Build model given input placeholder(s).
Args:
obs_input (tf.Tensor): Tensor input for state.
name (str): Inner model name, also the variable scope of the
inner model, if exist. One example is
garage.tf.models.Sequential.
Return:
tf.Tensor: Tensor output of the model.
|
_build
|
python
|
rlworkgroup/garage
|
tests/fixtures/models/simple_cnn_model.py
|
https://github.com/rlworkgroup/garage/blob/master/tests/fixtures/models/simple_cnn_model.py
|
MIT
|
def _build(self, obs_input, name=None):
"""Build model given input placeholder(s).
Args:
obs_input (tf.Tensor): Tensor input for state.
name (str): Inner model name, also the variable scope of the
inner model, if exist. One example is
garage.tf.models.Sequential.
Return:
tf.Tensor: Tensor output of the model.
"""
del name
height_size = self.input_dim[0]
width_size = self.input_dim[1]
for filter_iter, stride in zip(self.filters, self.strides):
if self.padding == 'SAME':
height_size = int((height_size + stride - 1) / stride)
width_size = int((width_size + stride - 1) / stride)
new_height = height_size + self.pool_strides[0] - 1
height_size = int(new_height / self.pool_strides[0])
new_width = width_size + self.pool_strides[1] - 1
width_size = int(new_width / self.pool_strides[1])
else:
height_size = int(
(height_size - filter_iter[1][0]) / stride) + 1
width_size = int((width_size - filter_iter[1][1]) / stride) + 1
new_height = height_size - self.pool_shapes[0]
height_size = int(new_height / self.pool_strides[0]) + 1
new_width = width_size - self.pool_shapes[0]
width_size = int(new_width / self.pool_strides[1]) + 1
flatten_shape = height_size * width_size * self.filters[-1][0]
return_var = tf.compat.v1.get_variable(
'return_var', (), initializer=tf.constant_initializer(0.5))
return tf.fill((tf.shape(obs_input)[0], flatten_shape), return_var)
|
Build model given input placeholder(s).
Args:
obs_input (tf.Tensor): Tensor input for state.
name (str): Inner model name, also the variable scope of the
inner model, if exist. One example is
garage.tf.models.Sequential.
Return:
tf.Tensor: Tensor output of the model.
|
_build
|
python
|
rlworkgroup/garage
|
tests/fixtures/models/simple_cnn_model_with_max_pooling.py
|
https://github.com/rlworkgroup/garage/blob/master/tests/fixtures/models/simple_cnn_model_with_max_pooling.py
|
MIT
|
def _build(self, obs_input, step_obs_input, step_hidden, name=None):
"""Build model given input placeholder(s).
Args:
obs_input (tf.Tensor): Place holder for entire time-series
inputs.
step_obs_input (tf.Tensor): Place holder for step inputs.
step_hidden (tf.Tensor): Place holder for step hidden state.
name (str): Inner model name, also the variable scope of the
inner model, if exist. One example is
garage.tf.models.Sequential.
Return:
tf.Tensor: Entire time-series outputs.
tf.Tensor: Step output.
tf.Tensor: Step hidden state.
tf.Tensor: Initial hidden state.
"""
del name
return_var = tf.compat.v1.get_variable(
'return_var', (), initializer=tf.constant_initializer(0.5))
outputs = tf.fill(
(tf.shape(obs_input)[0], tf.shape(obs_input)[1], self.output_dim),
return_var)
output = tf.fill((tf.shape(step_obs_input)[0], self.output_dim),
return_var)
hidden_init_var = tf.compat.v1.get_variable(
name='initial_hidden',
shape=(self.hidden_dim, ),
initializer=tf.zeros_initializer(),
trainable=False,
dtype=tf.float32)
return outputs, output, step_hidden, hidden_init_var
|
Build model given input placeholder(s).
Args:
obs_input (tf.Tensor): Place holder for entire time-series
inputs.
step_obs_input (tf.Tensor): Place holder for step inputs.
step_hidden (tf.Tensor): Place holder for step hidden state.
name (str): Inner model name, also the variable scope of the
inner model, if exist. One example is
garage.tf.models.Sequential.
Return:
tf.Tensor: Entire time-series outputs.
tf.Tensor: Step output.
tf.Tensor: Step hidden state.
tf.Tensor: Initial hidden state.
|
_build
|
python
|
rlworkgroup/garage
|
tests/fixtures/models/simple_gru_model.py
|
https://github.com/rlworkgroup/garage/blob/master/tests/fixtures/models/simple_gru_model.py
|
MIT
|
def network_input_spec(self):
"""Network input spec.
Return:
list[str]: List of key(str) for the network outputs.
"""
return [
'full_input', 'step_input', 'step_hidden_input', 'step_cell_input'
]
|
Network input spec.
Return:
list[str]: List of key(str) for the network outputs.
|
network_input_spec
|
python
|
rlworkgroup/garage
|
tests/fixtures/models/simple_lstm_model.py
|
https://github.com/rlworkgroup/garage/blob/master/tests/fixtures/models/simple_lstm_model.py
|
MIT
|
def network_output_spec(self):
"""Network output spec.
Return:
list[str]: List of key(str) for the network outputs.
"""
return [
'all_output', 'step_output', 'step_hidden', 'step_cell',
'init_hidden', 'init_cell'
]
|
Network output spec.
Return:
list[str]: List of key(str) for the network outputs.
|
network_output_spec
|
python
|
rlworkgroup/garage
|
tests/fixtures/models/simple_lstm_model.py
|
https://github.com/rlworkgroup/garage/blob/master/tests/fixtures/models/simple_lstm_model.py
|
MIT
|
def _build(self,
obs_input,
step_obs_input,
step_hidden,
step_cell,
name=None):
"""Build model given input placeholder(s).
Args:
obs_input (tf.Tensor): Place holder for entire time-series
inputs.
step_obs_input (tf.Tensor): Place holder for step inputs.
step_hidden (tf.Tensor): Place holder for step hidden state.
step_cell (tf.Tensor): Place holder for step cell state.
name (str): Inner model name, also the variable scope of the
inner model, if exist. One example is
garage.tf.models.Sequential.
Return:
tf.Tensor: Entire time-series outputs.
tf.Tensor: Step output.
tf.Tensor: Step hidden state.
tf.Tensor: Step cell state.
tf.Tensor: Initial hidden state.
tf.Tensor: Initial cell state.
"""
del name
return_var = tf.compat.v1.get_variable(
'return_var', (), initializer=tf.constant_initializer(0.5))
outputs = tf.fill(
(tf.shape(obs_input)[0], tf.shape(obs_input)[1], self.output_dim),
return_var)
output = tf.fill((tf.shape(step_obs_input)[0], self.output_dim),
return_var)
hidden_init_var = tf.compat.v1.get_variable(
name='initial_hidden',
shape=(self.hidden_dim, ),
initializer=tf.zeros_initializer(),
trainable=False,
dtype=tf.float32)
cell_init_var = tf.compat.v1.get_variable(
name='initial_cell',
shape=(self.hidden_dim, ),
initializer=tf.zeros_initializer(),
trainable=False,
dtype=tf.float32)
return (outputs, output, step_hidden, step_cell, hidden_init_var,
cell_init_var)
|
Build model given input placeholder(s).
Args:
obs_input (tf.Tensor): Place holder for entire time-series
inputs.
step_obs_input (tf.Tensor): Place holder for step inputs.
step_hidden (tf.Tensor): Place holder for step hidden state.
step_cell (tf.Tensor): Place holder for step cell state.
name (str): Inner model name, also the variable scope of the
inner model, if exist. One example is
garage.tf.models.Sequential.
Return:
tf.Tensor: Entire time-series outputs.
tf.Tensor: Step output.
tf.Tensor: Step hidden state.
tf.Tensor: Step cell state.
tf.Tensor: Initial hidden state.
tf.Tensor: Initial cell state.
|
_build
|
python
|
rlworkgroup/garage
|
tests/fixtures/models/simple_lstm_model.py
|
https://github.com/rlworkgroup/garage/blob/master/tests/fixtures/models/simple_lstm_model.py
|
MIT
|
def _build(self, obs_input, act_input, name=None):
"""Build model given input placeholder(s).
Args:
obs_input (tf.Tensor): Tensor input for state.
act_input (tf.Tensor): Tensor input for action.
name (str): Inner model name, also the variable scope of the
inner model, if exist. One example is
garage.tf.models.Sequential.
Return:
tf.Tensor: Tensor output of the model.
"""
del name
del act_input
return_var = tf.compat.v1.get_variable(
'return_var', (), initializer=tf.constant_initializer(0.5))
return tf.fill((tf.shape(obs_input)[0], self.output_dim), return_var)
|
Build model given input placeholder(s).
Args:
obs_input (tf.Tensor): Tensor input for state.
act_input (tf.Tensor): Tensor input for action.
name (str): Inner model name, also the variable scope of the
inner model, if exist. One example is
garage.tf.models.Sequential.
Return:
tf.Tensor: Tensor output of the model.
|
_build
|
python
|
rlworkgroup/garage
|
tests/fixtures/models/simple_mlp_merge_model.py
|
https://github.com/rlworkgroup/garage/blob/master/tests/fixtures/models/simple_mlp_merge_model.py
|
MIT
|
def _build(self, obs_input, name=None):
"""Build model given input placeholder(s).
Args:
obs_input (tf.Tensor): Tensor input for state.
name (str): Inner model name, also the variable scope of the
inner model, if exist. One example is
garage.tf.models.Sequential.
Return:
tf.Tensor: Tensor output of the model.
"""
del name
return_var = tf.compat.v1.get_variable(
'return_var', (), initializer=tf.constant_initializer(0.5))
return tf.fill((tf.shape(obs_input)[0], self.output_dim), return_var)
|
Build model given input placeholder(s).
Args:
obs_input (tf.Tensor): Tensor input for state.
name (str): Inner model name, also the variable scope of the
inner model, if exist. One example is
garage.tf.models.Sequential.
Return:
tf.Tensor: Tensor output of the model.
|
_build
|
python
|
rlworkgroup/garage
|
tests/fixtures/models/simple_mlp_model.py
|
https://github.com/rlworkgroup/garage/blob/master/tests/fixtures/models/simple_mlp_model.py
|
MIT
|
def get_actions(self, observations):
"""Get multiple actions from this policy for the input observations.
Args:
observations (numpy.ndarray): Observations from environment.
Returns:
numpy.ndarray: Predicted actions.
dict: Distribution parameters.
"""
n = len(observations)
action, action_info = self.get_action(None)
return [action] * n, action_info
|
Get multiple actions from this policy for the input observations.
Args:
observations (numpy.ndarray): Observations from environment.
Returns:
numpy.ndarray: Predicted actions.
dict: Distribution parameters.
|
get_actions
|
python
|
rlworkgroup/garage
|
tests/fixtures/policies/dummy_policy.py
|
https://github.com/rlworkgroup/garage/blob/master/tests/fixtures/policies/dummy_policy.py
|
MIT
|
def __getstate__(self):
"""Object.__getstate__.
Returns:
dict: the state to be pickled for the instance.
"""
new_dict = super().__getstate__()
del new_dict['_q_val']
return new_dict
|
Object.__getstate__.
Returns:
dict: the state to be pickled for the instance.
|
__getstate__
|
python
|
rlworkgroup/garage
|
tests/fixtures/q_functions/simple_q_function.py
|
https://github.com/rlworkgroup/garage/blob/master/tests/fixtures/q_functions/simple_q_function.py
|
MIT
|
def ray_local_session_fixture():
"""Initializes Ray and shuts down Ray in local mode.
Yields:
None: Yield is for purposes of pytest module style.
All statements before the yield are apart of module setup, and all
statements after the yield are apart of module teardown.
"""
if not ray.is_initialized():
ray.init(local_mode=True,
ignore_reinit_error=True,
log_to_driver=False,
include_dashboard=False)
yield
if ray.is_initialized():
ray.shutdown()
|
Initializes Ray and shuts down Ray in local mode.
Yields:
None: Yield is for purposes of pytest module style.
All statements before the yield are apart of module setup, and all
statements after the yield are apart of module teardown.
|
ray_local_session_fixture
|
python
|
rlworkgroup/garage
|
tests/fixtures/sampler/ray_fixtures.py
|
https://github.com/rlworkgroup/garage/blob/master/tests/fixtures/sampler/ray_fixtures.py
|
MIT
|
def ray_session_fixture():
"""Initializes Ray and shuts down Ray.
Yields:
None: Yield is for purposes of pytest module style.
All statements before the yield are apart of module setup, and all
statements after the yield are apart of module teardown.
"""
if not ray.is_initialized():
ray.init(_memory=52428800,
object_store_memory=78643200,
ignore_reinit_error=True,
log_to_driver=False,
include_dashboard=False)
yield
if ray.is_initialized():
ray.shutdown()
|
Initializes Ray and shuts down Ray.
Yields:
None: Yield is for purposes of pytest module style.
All statements before the yield are apart of module setup, and all
statements after the yield are apart of module teardown.
|
ray_session_fixture
|
python
|
rlworkgroup/garage
|
tests/fixtures/sampler/ray_fixtures.py
|
https://github.com/rlworkgroup/garage/blob/master/tests/fixtures/sampler/ray_fixtures.py
|
MIT
|
def train(self, trainer):
"""Obtain samplers and start actual training for each epoch.
Args:
trainer (Trainer): Trainer is passed to give algorithm
the access to trainer.step_epochs(), which provides services
such as snapshotting and sampler control.
"""
|
Obtain samplers and start actual training for each epoch.
Args:
trainer (Trainer): Trainer is passed to give algorithm
the access to trainer.step_epochs(), which provides services
such as snapshotting and sampler control.
|
train
|
python
|
rlworkgroup/garage
|
tests/fixtures/tf/algos/dummy_off_policy_algo.py
|
https://github.com/rlworkgroup/garage/blob/master/tests/fixtures/tf/algos/dummy_off_policy_algo.py
|
MIT
|
def train_once(self, itr, paths):
"""Perform one step of policy optimization given one batch of samples.
Args:
itr (int): Iteration number.
paths (list[dict]): A list of collected paths.
"""
|
Perform one step of policy optimization given one batch of samples.
Args:
itr (int): Iteration number.
paths (list[dict]): A list of collected paths.
|
train_once
|
python
|
rlworkgroup/garage
|
tests/fixtures/tf/algos/dummy_off_policy_algo.py
|
https://github.com/rlworkgroup/garage/blob/master/tests/fixtures/tf/algos/dummy_off_policy_algo.py
|
MIT
|
def optimize_policy(self, samples_data):
"""Optimize the policy using the samples.
Args:
samples_data (dict): Processed sample data.
"""
|
Optimize the policy using the samples.
Args:
samples_data (dict): Processed sample data.
|
optimize_policy
|
python
|
rlworkgroup/garage
|
tests/fixtures/tf/algos/dummy_off_policy_algo.py
|
https://github.com/rlworkgroup/garage/blob/master/tests/fixtures/tf/algos/dummy_off_policy_algo.py
|
MIT
|
def test_tf_make_optimizer_with_type(self):
"""Test make_optimizer function with type as first argument."""
optimizer_type = tf.compat.v1.train.AdamOptimizer
lr = 0.123
optimizer = make_optimizer(optimizer_type,
learning_rate=lr,
name='testOptimizer')
assert isinstance(optimizer, optimizer_type)
self.sess.run(tf.compat.v1.global_variables_initializer())
assert optimizer._name == 'testOptimizer'
assert np.allclose(
optimizer._lr, lr
) # Adam holds the value of learning rate in private variable self._lr
|
Test make_optimizer function with type as first argument.
|
test_tf_make_optimizer_with_type
|
python
|
rlworkgroup/garage
|
tests/garage/test_functions.py
|
https://github.com/rlworkgroup/garage/blob/master/tests/garage/test_functions.py
|
MIT
|
def test_tf_make_optimizer_with_tuple(self):
"""Test make_optimizer function with tuple as first argument."""
lr = 0.123
optimizer_type = (tf.compat.v1.train.AdamOptimizer, {
'learning_rate': lr
})
optimizer = make_optimizer(optimizer_type)
# pylint: disable=isinstance-second-argument-not-valid-type
assert isinstance(optimizer, optimizer_type)
self.sess.run(tf.compat.v1.global_variables_initializer())
assert np.allclose(
optimizer._lr, lr
) # Adam holds the value of learning rate in private variable self._lr
|
Test make_optimizer function with tuple as first argument.
|
test_tf_make_optimizer_with_tuple
|
python
|
rlworkgroup/garage
|
tests/garage/test_functions.py
|
https://github.com/rlworkgroup/garage/blob/master/tests/garage/test_functions.py
|
MIT
|
def test_torch_make_optimizer_with_type(self):
"""Test make_optimizer function with type as first argument."""
optimizer_type = torch.optim.Adam
module = torch.nn.Linear(2, 1)
lr = 0.123
optimizer = make_optimizer(optimizer_type, module=module, lr=lr)
assert isinstance(optimizer, optimizer_type)
assert optimizer.defaults['lr'] == lr
|
Test make_optimizer function with type as first argument.
|
test_torch_make_optimizer_with_type
|
python
|
rlworkgroup/garage
|
tests/garage/test_functions.py
|
https://github.com/rlworkgroup/garage/blob/master/tests/garage/test_functions.py
|
MIT
|
def test_torch_make_optimizer_with_tuple(self):
"""Test make_optimizer function with tuple as first argument."""
optimizer_type = (torch.optim.Adam, {'lr': 0.1})
module = torch.nn.Linear(2, 1)
optimizer = make_optimizer(optimizer_type, module=module)
# pylint: disable=isinstance-second-argument-not-valid-type
assert isinstance(optimizer, optimizer_type)
assert optimizer.defaults['lr'] == optimizer_type[1]['lr']
|
Test make_optimizer function with tuple as first argument.
|
test_torch_make_optimizer_with_tuple
|
python
|
rlworkgroup/garage
|
tests/garage/test_functions.py
|
https://github.com/rlworkgroup/garage/blob/master/tests/garage/test_functions.py
|
MIT
|
def _init_multi_env_wrapper(self,
env_names,
sample_strategy=uniform_random_strategy):
"""helper function to initialize multi_env_wrapper
Args:
env_names (list(str)): List of Environment names.
sample_strategy (func): A sampling strategy.
Returns:
garage.envs.multi_env_wrapper: Multi env wrapper.
"""
task_envs = [GymEnv(name) for name in env_names]
return MultiEnvWrapper(task_envs, sample_strategy=sample_strategy)
|
helper function to initialize multi_env_wrapper
Args:
env_names (list(str)): List of Environment names.
sample_strategy (func): A sampling strategy.
Returns:
garage.envs.multi_env_wrapper: Multi env wrapper.
|
_init_multi_env_wrapper
|
python
|
rlworkgroup/garage
|
tests/garage/envs/test_multi_env_wrapper.py
|
https://github.com/rlworkgroup/garage/blob/master/tests/garage/envs/test_multi_env_wrapper.py
|
MIT
|
def test_tasks_from_same_env(self):
"""test init with multiple tasks from same env"""
envs = ['CartPole-v0', 'CartPole-v0']
mt_env = self._init_multi_env_wrapper(envs)
assert mt_env.num_tasks == 2
|
test init with multiple tasks from same env
|
test_tasks_from_same_env
|
python
|
rlworkgroup/garage
|
tests/garage/envs/test_multi_env_wrapper.py
|
https://github.com/rlworkgroup/garage/blob/master/tests/garage/envs/test_multi_env_wrapper.py
|
MIT
|
def test_tasks_from_different_envs(self):
"""test init with multiple tasks from different env"""
envs = ['CartPole-v0', 'CartPole-v1']
mt_env = self._init_multi_env_wrapper(envs)
assert mt_env.num_tasks == 2
|
test init with multiple tasks from different env
|
test_tasks_from_different_envs
|
python
|
rlworkgroup/garage
|
tests/garage/envs/test_multi_env_wrapper.py
|
https://github.com/rlworkgroup/garage/blob/master/tests/garage/envs/test_multi_env_wrapper.py
|
MIT
|
def test_raise_exception_when_different_obs_space(self):
"""test if exception is raised when using tasks with different obs space""" # noqa: E501
envs = ['CartPole-v0', 'Blackjack-v0']
with pytest.raises(ValueError):
_ = self._init_multi_env_wrapper(envs)
|
test if exception is raised when using tasks with different obs space
|
test_raise_exception_when_different_obs_space
|
python
|
rlworkgroup/garage
|
tests/garage/envs/test_multi_env_wrapper.py
|
https://github.com/rlworkgroup/garage/blob/master/tests/garage/envs/test_multi_env_wrapper.py
|
MIT
|
def test_raise_exception_when_different_action_space(self):
"""test if exception is raised when using tasks with different action space""" # noqa: E501
envs = ['LunarLander-v2', 'LunarLanderContinuous-v2']
with pytest.raises(ValueError):
_ = self._init_multi_env_wrapper(envs)
|
test if exception is raised when using tasks with different action space
|
test_raise_exception_when_different_action_space
|
python
|
rlworkgroup/garage
|
tests/garage/envs/test_multi_env_wrapper.py
|
https://github.com/rlworkgroup/garage/blob/master/tests/garage/envs/test_multi_env_wrapper.py
|
MIT
|
def test_default_active_task_is_none(self):
"""test if default active task is none"""
envs = ['CartPole-v0', 'CartPole-v1']
mt_env = self._init_multi_env_wrapper(
envs, sample_strategy=round_robin_strategy)
assert mt_env._active_task_index is None
|
test if default active task is none
|
test_default_active_task_is_none
|
python
|
rlworkgroup/garage
|
tests/garage/envs/test_multi_env_wrapper.py
|
https://github.com/rlworkgroup/garage/blob/master/tests/garage/envs/test_multi_env_wrapper.py
|
MIT
|
def test_one_hot_observation_space(self):
"""test one hot representation of observation space"""
envs = ['CartPole-v0', 'CartPole-v1']
mt_env = self._init_multi_env_wrapper(envs)
cartpole = GymEnv('CartPole-v0')
cartpole_lb, cartpole_ub = cartpole.observation_space.bounds
obs_space = akro.Box(np.concatenate([cartpole_lb,
np.zeros(2)]),
np.concatenate([cartpole_ub,
np.ones(2)]))
assert mt_env.observation_space.shape == obs_space.shape
assert (
mt_env.observation_space.bounds[0] == obs_space.bounds[0]).all()
assert (
mt_env.observation_space.bounds[1] == obs_space.bounds[1]).all()
|
test one hot representation of observation space
|
test_one_hot_observation_space
|
python
|
rlworkgroup/garage
|
tests/garage/envs/test_multi_env_wrapper.py
|
https://github.com/rlworkgroup/garage/blob/master/tests/garage/envs/test_multi_env_wrapper.py
|
MIT
|
def test_task_remains_same_between_multiple_step_calls(self):
"""test if active_task remains same between multiple step calls"""
envs = ['CartPole-v0', 'CartPole-v1']
mt_env = self._init_multi_env_wrapper(
envs, sample_strategy=round_robin_strategy)
mt_env.reset()
tasks = []
for _ in envs:
es = mt_env.step(1)
tasks.append(es.env_info['task_id'])
assert tasks[0] == 0 and tasks[1] == 0
|
test if active_task remains same between multiple step calls
|
test_task_remains_same_between_multiple_step_calls
|
python
|
rlworkgroup/garage
|
tests/garage/envs/test_multi_env_wrapper.py
|
https://github.com/rlworkgroup/garage/blob/master/tests/garage/envs/test_multi_env_wrapper.py
|
MIT
|
def test_one_hot_observation(self):
"""test if output of step function is correct"""
envs = ['CartPole-v0', 'CartPole-v0']
mt_env = self._init_multi_env_wrapper(
envs, sample_strategy=round_robin_strategy)
obs, _ = mt_env.reset()
assert (obs[-2:] == np.array([1., 0.])).all()
obs = mt_env.step(1).observation
assert (obs[-2:] == np.array([1., 0.])).all()
obs, _ = mt_env.reset()
assert (obs[-2:] == np.array([0., 1.])).all()
obs = mt_env.step(1).observation
assert (obs[-2:] == np.array([0., 1.])).all()
|
test if output of step function is correct
|
test_one_hot_observation
|
python
|
rlworkgroup/garage
|
tests/garage/envs/test_multi_env_wrapper.py
|
https://github.com/rlworkgroup/garage/blob/master/tests/garage/envs/test_multi_env_wrapper.py
|
MIT
|
def test_pickleable(env_ids):
"""Test Bullet environments are pickle-able"""
for env_id in env_ids:
# extract id string
env_id = env_id.replace('- ', '')
env = BulletEnv(env_id)
round_trip = pickle.loads(pickle.dumps(env))
assert round_trip
env.close()
|
Test Bullet environments are pickle-able
|
test_pickleable
|
python
|
rlworkgroup/garage
|
tests/garage/envs/bullet/test_bullet_env.py
|
https://github.com/rlworkgroup/garage/blob/master/tests/garage/envs/bullet/test_bullet_env.py
|
MIT
|
def test_pickle_creates_new_server(env_ids):
"""Test pickling a Bullet environment creates a new connection.
If all pickling create new connections, no repetition of client id
should be found.
"""
n_env = 4
for env_id in env_ids:
# extract id string
env_id = env_id.replace('- ', '')
bullet_env = BulletEnv(env_id)
envs = [pickle.loads(pickle.dumps(bullet_env)) for _ in range(n_env)]
id_set = set()
if hasattr(bullet_env._env, '_pybullet_client'):
id_set.add(bullet_env._env._pybullet_client._client)
for e in envs:
new_id = e._env._pybullet_client._client
assert new_id not in id_set
id_set.add(new_id)
elif hasattr(bullet_env._env, '_p'):
if isinstance(bullet_env._env._p, BulletClient):
id_set.add(bullet_env._env._p._client)
for e in envs:
new_id = e._env._p._client
assert new_id not in id_set
id_set.add(new_id)
else:
# Some environments have _p as the pybullet module, and they
# don't store client id, so can't check here
pass
for env in envs:
env.close()
|
Test pickling a Bullet environment creates a new connection.
If all pickling create new connections, no repetition of client id
should be found.
|
test_pickle_creates_new_server
|
python
|
rlworkgroup/garage
|
tests/garage/envs/bullet/test_bullet_env.py
|
https://github.com/rlworkgroup/garage/blob/master/tests/garage/envs/bullet/test_bullet_env.py
|
MIT
|
def test_grayscale_reset(self):
"""
RGB to grayscale conversion using scikit-image.
Weights used for conversion:
Y = 0.2125 R + 0.7154 G + 0.0721 B
Reference:
http://scikit-image.org/docs/dev/api/skimage.color.html#skimage.color.rgb2grey
"""
grayscale_output = np.round(
np.dot(self.env.reset()[:, :, :3],
[0.2125, 0.7154, 0.0721])).astype(np.uint8)
np.testing.assert_array_almost_equal(grayscale_output,
self.env_g.reset())
|
RGB to grayscale conversion using scikit-image.
Weights used for conversion:
Y = 0.2125 R + 0.7154 G + 0.0721 B
Reference:
http://scikit-image.org/docs/dev/api/skimage.color.html#skimage.color.rgb2grey
|
test_grayscale_reset
|
python
|
rlworkgroup/garage
|
tests/garage/envs/wrappers/test_grayscale_env.py
|
https://github.com/rlworkgroup/garage/blob/master/tests/garage/envs/wrappers/test_grayscale_env.py
|
MIT
|
def test_deterministic_tfp_seed_stream():
"""Test deterministic behavior of TFP SeedStream"""
deterministic.set_seed(0)
with tf.compat.v1.Session() as sess:
rand_tensor = sess.run(
tf.random.uniform((5, 5),
seed=deterministic.get_tf_seed_stream(),
dtype=tf.dtypes.float32))
sess.run(tf.random.uniform((5, 5), dtype=tf.dtypes.float32))
rand_tensor2 = sess.run(
tf.random.uniform((5, 5),
seed=deterministic.get_tf_seed_stream(),
dtype=tf.dtypes.float32))
deterministic_tensor = np.array(
[[0.10550332, 0.14218152, 0.5544759, 0.3720839, 0.6899766],
[0.47086394, 0.5401237, 0.21653509, 0.42823565, 0.6927656],
[0.16598761, 0.48356044, 0.36901915, 0.97140956, 0.07564807],
[0.6694747, 0.21241283, 0.72315156, 0.631876, 0.34476352],
[0.8718543, 0.4879316, 0.76272845, 0.04737151, 0.39661574]],
dtype=np.float32)
deterministic_tensor2 = np.array(
[[0.9950017, 0.52794397, 0.7703887, 0.8688295, 0.78926384],
[0.6301824, 0.45042813, 0.6257613, 0.7717335, 0.8412994],
[0.30846167, 0.71520185, 0.13243473, 0.8455602, 0.01623428],
[0.01353145, 0.23445582, 0.36002636, 0.3576231, 0.61981404],
[0.47964382, 0.55043316, 0.3270856, 0.7003857, 0.53755534]],
dtype=np.float32)
assert np.allclose(rand_tensor, deterministic_tensor)
assert np.allclose(rand_tensor2, deterministic_tensor2)
|
Test deterministic behavior of TFP SeedStream
|
test_deterministic_tfp_seed_stream
|
python
|
rlworkgroup/garage
|
tests/garage/experiment/test_deterministic.py
|
https://github.com/rlworkgroup/garage/blob/master/tests/garage/experiment/test_deterministic.py
|
MIT
|
def setup_method(self):
"""Setup method which is called before every test."""
self.env = normalize(GymEnv('InvertedDoublePendulum-v2'))
self.policy = GaussianMLPPolicy(
env_spec=self.env.spec,
hidden_sizes=(64, 64),
hidden_nonlinearity=torch.tanh,
output_nonlinearity=None,
)
self.value_function = GaussianMLPValueFunction(env_spec=self.env.spec)
deterministic.set_seed(0)
self.sampler = LocalSampler(
agents=self.policy,
envs=self.env,
max_episode_length=self.env.spec.max_episode_length,
is_tf_worker=True)
|
Setup method which is called before every test.
|
setup_method
|
python
|
rlworkgroup/garage
|
tests/garage/experiment/test_trainer.py
|
https://github.com/rlworkgroup/garage/blob/master/tests/garage/experiment/test_trainer.py
|
MIT
|
def test_ddpg_pendulum(self):
"""Test DDPG with Pendulum environment.
This environment has a [-3, 3] action_space bound.
"""
with TFTrainer(snapshot_config, sess=self.sess) as trainer:
env = normalize(
GymEnv('InvertedPendulum-v2', max_episode_length=100))
policy = ContinuousMLPPolicy(env_spec=env.spec,
hidden_sizes=[64, 64],
hidden_nonlinearity=tf.nn.relu,
output_nonlinearity=tf.nn.tanh)
exploration_policy = AddOrnsteinUhlenbeckNoise(env.spec,
policy,
sigma=0.2)
qf = ContinuousMLPQFunction(env_spec=env.spec,
hidden_sizes=[64, 64],
hidden_nonlinearity=tf.nn.relu)
replay_buffer = PathBuffer(capacity_in_transitions=int(1e6))
sampler = LocalSampler(
agents=exploration_policy,
envs=env,
max_episode_length=env.spec.max_episode_length,
is_tf_worker=True,
worker_class=FragmentWorker)
algo = DDPG(
env_spec=env.spec,
policy=policy,
policy_lr=1e-4,
qf_lr=1e-3,
qf=qf,
replay_buffer=replay_buffer,
sampler=sampler,
steps_per_epoch=20,
target_update_tau=1e-2,
n_train_steps=50,
discount=0.9,
min_buffer_size=int(5e3),
exploration_policy=exploration_policy,
)
trainer.setup(algo, env)
last_avg_ret = trainer.train(n_epochs=10, batch_size=100)
assert last_avg_ret > 10
env.close()
|
Test DDPG with Pendulum environment.
This environment has a [-3, 3] action_space bound.
|
test_ddpg_pendulum
|
python
|
rlworkgroup/garage
|
tests/garage/tf/algos/test_ddpg.py
|
https://github.com/rlworkgroup/garage/blob/master/tests/garage/tf/algos/test_ddpg.py
|
MIT
|
def test_ddpg_pendulum_with_decayed_weights(self):
"""Test DDPG with Pendulum environment and decayed weights.
This environment has a [-3, 3] action_space bound.
"""
with TFTrainer(snapshot_config, sess=self.sess) as trainer:
env = normalize(
GymEnv('InvertedPendulum-v2', max_episode_length=100))
policy = ContinuousMLPPolicy(env_spec=env.spec,
hidden_sizes=[64, 64],
hidden_nonlinearity=tf.nn.relu,
output_nonlinearity=tf.nn.tanh)
exploration_policy = AddOrnsteinUhlenbeckNoise(env.spec,
policy,
sigma=0.2)
qf = ContinuousMLPQFunction(env_spec=env.spec,
hidden_sizes=[64, 64],
hidden_nonlinearity=tf.nn.relu)
replay_buffer = PathBuffer(capacity_in_transitions=int(1e6))
sampler = LocalSampler(
agents=exploration_policy,
envs=env,
max_episode_length=env.spec.max_episode_length,
is_tf_worker=True,
worker_class=FragmentWorker)
algo = DDPG(
env_spec=env.spec,
policy=policy,
policy_lr=1e-4,
qf_lr=1e-3,
qf=qf,
replay_buffer=replay_buffer,
sampler=sampler,
steps_per_epoch=20,
target_update_tau=1e-2,
n_train_steps=50,
discount=0.9,
policy_weight_decay=0.01,
qf_weight_decay=0.01,
min_buffer_size=int(5e3),
exploration_policy=exploration_policy,
)
trainer.setup(algo, env)
last_avg_ret = trainer.train(n_epochs=10, batch_size=100)
assert last_avg_ret > 10
env.close()
|
Test DDPG with Pendulum environment and decayed weights.
This environment has a [-3, 3] action_space bound.
|
test_ddpg_pendulum_with_decayed_weights
|
python
|
rlworkgroup/garage
|
tests/garage/tf/algos/test_ddpg.py
|
https://github.com/rlworkgroup/garage/blob/master/tests/garage/tf/algos/test_ddpg.py
|
MIT
|
def test_npo_with_unknown_pg_loss(self):
"""Test NPO with unkown pg loss."""
with pytest.raises(ValueError, match='Invalid pg_loss'):
NPO(
env_spec=self.env.spec,
policy=self.policy,
baseline=self.baseline,
sampler=self.sampler,
pg_loss='random pg_loss',
)
|
Test NPO with unkown pg loss.
|
test_npo_with_unknown_pg_loss
|
python
|
rlworkgroup/garage
|
tests/garage/tf/algos/test_npo.py
|
https://github.com/rlworkgroup/garage/blob/master/tests/garage/tf/algos/test_npo.py
|
MIT
|
def test_npo_with_invalid_entropy_method(self):
"""Test NPO with invalid entropy method."""
with pytest.raises(ValueError, match='Invalid entropy_method'):
NPO(
env_spec=self.env.spec,
policy=self.policy,
baseline=self.baseline,
sampler=self.sampler,
entropy_method=None,
)
|
Test NPO with invalid entropy method.
|
test_npo_with_invalid_entropy_method
|
python
|
rlworkgroup/garage
|
tests/garage/tf/algos/test_npo.py
|
https://github.com/rlworkgroup/garage/blob/master/tests/garage/tf/algos/test_npo.py
|
MIT
|
def test_npo_with_max_entropy_and_center_adv(self):
"""Test NPO with max entropy and center_adv."""
with pytest.raises(ValueError):
NPO(
env_spec=self.env.spec,
policy=self.policy,
baseline=self.baseline,
sampler=self.sampler,
entropy_method='max',
center_adv=True,
)
|
Test NPO with max entropy and center_adv.
|
test_npo_with_max_entropy_and_center_adv
|
python
|
rlworkgroup/garage
|
tests/garage/tf/algos/test_npo.py
|
https://github.com/rlworkgroup/garage/blob/master/tests/garage/tf/algos/test_npo.py
|
MIT
|
def test_npo_with_max_entropy_and_no_stop_entropy_gradient(self):
"""Test NPO with max entropy and false stop_entropy_gradient."""
with pytest.raises(ValueError):
NPO(
env_spec=self.env.spec,
policy=self.policy,
baseline=self.baseline,
sampler=self.sampler,
entropy_method='max',
stop_entropy_gradient=False,
)
|
Test NPO with max entropy and false stop_entropy_gradient.
|
test_npo_with_max_entropy_and_no_stop_entropy_gradient
|
python
|
rlworkgroup/garage
|
tests/garage/tf/algos/test_npo.py
|
https://github.com/rlworkgroup/garage/blob/master/tests/garage/tf/algos/test_npo.py
|
MIT
|
def test_npo_with_invalid_no_entropy_configuration(self):
"""Test NPO with invalid no entropy configuration."""
with pytest.raises(ValueError):
NPO(
env_spec=self.env.spec,
policy=self.policy,
baseline=self.baseline,
sampler=self.sampler,
entropy_method='no_entropy',
policy_ent_coeff=0.02,
)
|
Test NPO with invalid no entropy configuration.
|
test_npo_with_invalid_no_entropy_configuration
|
python
|
rlworkgroup/garage
|
tests/garage/tf/algos/test_npo.py
|
https://github.com/rlworkgroup/garage/blob/master/tests/garage/tf/algos/test_npo.py
|
MIT
|
def test_ppo_with_maximum_entropy(self):
"""Test PPO with maxium entropy method."""
with TFTrainer(snapshot_config, sess=self.sess) as trainer:
algo = PPO(env_spec=self.env.spec,
policy=self.policy,
baseline=self.baseline,
sampler=self.sampler,
discount=0.99,
lr_clip_range=0.01,
optimizer_args=dict(batch_size=32,
max_optimization_epochs=10),
stop_entropy_gradient=True,
entropy_method='max',
policy_ent_coeff=0.02,
center_adv=False)
trainer.setup(algo, self.env)
last_avg_ret = trainer.train(n_epochs=10, batch_size=2048)
assert last_avg_ret > 35
|
Test PPO with maxium entropy method.
|
test_ppo_with_maximum_entropy
|
python
|
rlworkgroup/garage
|
tests/garage/tf/algos/test_ppo.py
|
https://github.com/rlworkgroup/garage/blob/master/tests/garage/tf/algos/test_ppo.py
|
MIT
|
def test_ppo_with_neg_log_likeli_entropy_estimation_and_max(self):
"""
Test PPO with negative log likelihood entropy estimation and max
entropy method.
"""
with TFTrainer(snapshot_config, sess=self.sess) as trainer:
algo = PPO(env_spec=self.env.spec,
policy=self.policy,
baseline=self.baseline,
sampler=self.sampler,
discount=0.99,
lr_clip_range=0.01,
optimizer_args=dict(batch_size=32,
max_optimization_epochs=10),
stop_entropy_gradient=True,
use_neg_logli_entropy=True,
entropy_method='max',
policy_ent_coeff=0.02,
center_adv=False)
trainer.setup(algo, self.env)
last_avg_ret = trainer.train(n_epochs=10, batch_size=2048)
assert last_avg_ret > 35
|
Test PPO with negative log likelihood entropy estimation and max
entropy method.
|
test_ppo_with_neg_log_likeli_entropy_estimation_and_max
|
python
|
rlworkgroup/garage
|
tests/garage/tf/algos/test_ppo.py
|
https://github.com/rlworkgroup/garage/blob/master/tests/garage/tf/algos/test_ppo.py
|
MIT
|
def test_ppo_with_neg_log_likeli_entropy_estimation_and_regularized(self):
"""
Test PPO with negative log likelihood entropy estimation and
regularized entropy method.
"""
with TFTrainer(snapshot_config, sess=self.sess) as trainer:
algo = PPO(env_spec=self.env.spec,
policy=self.policy,
baseline=self.baseline,
sampler=self.sampler,
discount=0.99,
lr_clip_range=0.01,
optimizer_args=dict(batch_size=32,
max_optimization_epochs=10),
stop_entropy_gradient=True,
use_neg_logli_entropy=True,
entropy_method='regularized',
policy_ent_coeff=0.0,
center_adv=True)
trainer.setup(algo, self.env)
last_avg_ret = trainer.train(n_epochs=10, batch_size=2048)
assert last_avg_ret > 35
|
Test PPO with negative log likelihood entropy estimation and
regularized entropy method.
|
test_ppo_with_neg_log_likeli_entropy_estimation_and_regularized
|
python
|
rlworkgroup/garage
|
tests/garage/tf/algos/test_ppo.py
|
https://github.com/rlworkgroup/garage/blob/master/tests/garage/tf/algos/test_ppo.py
|
MIT
|
def test_ppo_with_regularized_entropy(self):
"""Test PPO with regularized entropy method."""
with TFTrainer(snapshot_config, sess=self.sess) as trainer:
algo = PPO(env_spec=self.env.spec,
policy=self.policy,
baseline=self.baseline,
sampler=self.sampler,
discount=0.99,
lr_clip_range=0.01,
optimizer_args=dict(batch_size=32,
max_optimization_epochs=10),
stop_entropy_gradient=False,
entropy_method='regularized',
policy_ent_coeff=0.02,
center_adv=True)
trainer.setup(algo, self.env)
last_avg_ret = trainer.train(n_epochs=10, batch_size=2048)
assert last_avg_ret > 35
|
Test PPO with regularized entropy method.
|
test_ppo_with_regularized_entropy
|
python
|
rlworkgroup/garage
|
tests/garage/tf/algos/test_ppo.py
|
https://github.com/rlworkgroup/garage/blob/master/tests/garage/tf/algos/test_ppo.py
|
MIT
|
def test_ppo_pendulum_recurrent_continuous_baseline(self):
"""Test PPO with Pendulum environment and recurrent policy."""
with TFTrainer(snapshot_config) as trainer:
env = normalize(
GymEnv('InvertedDoublePendulum-v2', max_episode_length=100))
policy = GaussianLSTMPolicy(env_spec=env.spec, )
baseline = ContinuousMLPBaseline(
env_spec=env.spec,
hidden_sizes=(32, 32),
)
sampler = LocalSampler(
agents=policy,
envs=env,
max_episode_length=env.spec.max_episode_length,
is_tf_worker=True)
algo = PPO(
env_spec=env.spec,
policy=policy,
baseline=baseline,
sampler=sampler,
discount=0.99,
gae_lambda=0.95,
lr_clip_range=0.2,
optimizer_args=dict(
batch_size=32,
max_optimization_epochs=10,
),
stop_entropy_gradient=True,
entropy_method='max',
policy_ent_coeff=0.02,
center_adv=False,
)
trainer.setup(algo, env)
last_avg_ret = trainer.train(n_epochs=10, batch_size=2048)
assert last_avg_ret > 100
env.close()
|
Test PPO with Pendulum environment and recurrent policy.
|
test_ppo_pendulum_recurrent_continuous_baseline
|
python
|
rlworkgroup/garage
|
tests/garage/tf/algos/test_ppo.py
|
https://github.com/rlworkgroup/garage/blob/master/tests/garage/tf/algos/test_ppo.py
|
MIT
|
def test_ppo_pendulum_lstm(self):
"""Test PPO with Pendulum environment and recurrent policy."""
with TFTrainer(snapshot_config) as trainer:
env = normalize(
GymEnv('InvertedDoublePendulum-v2', max_episode_length=100))
lstm_policy = GaussianLSTMPolicy(env_spec=env.spec)
baseline = GaussianMLPBaseline(
env_spec=env.spec,
hidden_sizes=(32, 32),
)
sampler = LocalSampler(
agents=lstm_policy,
envs=env,
max_episode_length=env.spec.max_episode_length,
is_tf_worker=True)
algo = PPO(
env_spec=env.spec,
policy=lstm_policy,
baseline=baseline,
sampler=sampler,
discount=0.99,
gae_lambda=0.95,
lr_clip_range=0.2,
optimizer_args=dict(
batch_size=32,
max_optimization_epochs=10,
),
stop_entropy_gradient=True,
entropy_method='max',
policy_ent_coeff=0.02,
center_adv=False,
)
trainer.setup(algo, env)
last_avg_ret = trainer.train(n_epochs=10, batch_size=2048)
assert last_avg_ret > 60
|
Test PPO with Pendulum environment and recurrent policy.
|
test_ppo_pendulum_lstm
|
python
|
rlworkgroup/garage
|
tests/garage/tf/algos/test_ppo.py
|
https://github.com/rlworkgroup/garage/blob/master/tests/garage/tf/algos/test_ppo.py
|
MIT
|
def test_ppo_pendulum_gru(self):
"""Test PPO with Pendulum environment and recurrent policy."""
with TFTrainer(snapshot_config) as trainer:
env = normalize(
GymEnv('InvertedDoublePendulum-v2', max_episode_length=100))
gru_policy = GaussianGRUPolicy(env_spec=env.spec)
baseline = GaussianMLPBaseline(
env_spec=env.spec,
hidden_sizes=(32, 32),
)
sampler = LocalSampler(
agents=gru_policy,
envs=env,
max_episode_length=env.spec.max_episode_length,
is_tf_worker=True)
algo = PPO(
env_spec=env.spec,
policy=gru_policy,
baseline=baseline,
sampler=sampler,
discount=0.99,
gae_lambda=0.95,
lr_clip_range=0.2,
optimizer_args=dict(
batch_size=32,
max_optimization_epochs=10,
),
stop_entropy_gradient=True,
entropy_method='max',
policy_ent_coeff=0.02,
center_adv=False,
)
trainer.setup(algo, env)
last_avg_ret = trainer.train(n_epochs=10, batch_size=2048)
assert last_avg_ret > 80
|
Test PPO with Pendulum environment and recurrent policy.
|
test_ppo_pendulum_gru
|
python
|
rlworkgroup/garage
|
tests/garage/tf/algos/test_ppo.py
|
https://github.com/rlworkgroup/garage/blob/master/tests/garage/tf/algos/test_ppo.py
|
MIT
|
def test_reps_cartpole(self):
"""Test REPS with gym Cartpole environment."""
with TFTrainer(snapshot_config, sess=self.sess) as trainer:
env = GymEnv('CartPole-v0')
policy = CategoricalMLPPolicy(env_spec=env.spec,
hidden_sizes=[32, 32])
baseline = LinearFeatureBaseline(env_spec=env.spec)
sampler = LocalSampler(
agents=policy,
envs=env,
max_episode_length=env.spec.max_episode_length,
is_tf_worker=True)
algo = REPS(env_spec=env.spec,
policy=policy,
baseline=baseline,
sampler=sampler,
discount=0.99)
trainer.setup(algo, env)
last_avg_ret = trainer.train(n_epochs=10, batch_size=4000)
assert last_avg_ret > 5
env.close()
|
Test REPS with gym Cartpole environment.
|
test_reps_cartpole
|
python
|
rlworkgroup/garage
|
tests/garage/tf/algos/test_reps.py
|
https://github.com/rlworkgroup/garage/blob/master/tests/garage/tf/algos/test_reps.py
|
MIT
|
def circle(r, n):
"""Generate n points on a circle of radius r.
Args:
r (float): Radius of the circle.
n (int): Number of points to generate.
Yields:
tuple(float, float): Coordinate of a point.
"""
for t in np.arange(0, 2 * np.pi, 2 * np.pi / n):
yield r * np.sin(t), r * np.cos(t)
|
Generate n points on a circle of radius r.
Args:
r (float): Radius of the circle.
n (int): Number of points to generate.
Yields:
tuple(float, float): Coordinate of a point.
|
circle
|
python
|
rlworkgroup/garage
|
tests/garage/tf/algos/test_te.py
|
https://github.com/rlworkgroup/garage/blob/master/tests/garage/tf/algos/test_te.py
|
MIT
|
def test_trpo_unknown_kl_constraint(self):
"""Test TRPO with unkown KL constraints."""
with pytest.raises(ValueError, match='Invalid kl_constraint'):
TRPO(
env_spec=self.env.spec,
policy=self.policy,
baseline=self.baseline,
sampler=self.sampler,
discount=0.99,
gae_lambda=0.98,
policy_ent_coeff=0.0,
kl_constraint='random kl_constraint',
)
|
Test TRPO with unkown KL constraints.
|
test_trpo_unknown_kl_constraint
|
python
|
rlworkgroup/garage
|
tests/garage/tf/algos/test_trpo.py
|
https://github.com/rlworkgroup/garage/blob/master/tests/garage/tf/algos/test_trpo.py
|
MIT
|
def test_trpo_soft_kl_constraint(self):
"""Test TRPO with unkown KL constraints."""
with TFTrainer(snapshot_config, sess=self.sess) as trainer:
algo = TRPO(env_spec=self.env.spec,
policy=self.policy,
baseline=self.baseline,
sampler=self.sampler,
discount=0.99,
gae_lambda=0.98,
policy_ent_coeff=0.0,
kl_constraint='soft')
trainer.setup(algo, self.env)
last_avg_ret = trainer.train(n_epochs=10, batch_size=2048)
assert last_avg_ret > 45
|
Test TRPO with unkown KL constraints.
|
test_trpo_soft_kl_constraint
|
python
|
rlworkgroup/garage
|
tests/garage/tf/algos/test_trpo.py
|
https://github.com/rlworkgroup/garage/blob/master/tests/garage/tf/algos/test_trpo.py
|
MIT
|
def test_cg(self):
"""Solve Ax = b using Conjugate gradient method."""
a = np.linspace(-np.pi, np.pi, 25).reshape((5, 5))
a = a.T.dot(a) # make sure a is positive semi-definite
b = np.linspace(-np.pi, np.pi, 5)
x = _cg(a.dot, b, cg_iters=5)
assert np.allclose(a.dot(x), b)
|
Solve Ax = b using Conjugate gradient method.
|
test_cg
|
python
|
rlworkgroup/garage
|
tests/garage/tf/optimizers/test_conjugate_gradient_optimizer.py
|
https://github.com/rlworkgroup/garage/blob/master/tests/garage/tf/optimizers/test_conjugate_gradient_optimizer.py
|
MIT
|
def test_pearl_mutter_hvp_1x1(self):
"""Test Hessian-vector product for a function with one variable."""
policy = HelperPolicy(n_vars=1)
x = policy.get_params()[0]
a_val = np.array([5.0])
a = tf.constant([0.0])
f = a * (x**2)
expected_hessian = 2 * a_val
vector = np.array([10.0])
expected_hvp = expected_hessian * vector
reg_coeff = 1e-5
hvp = PearlmutterHVP()
self.sess.run(tf.compat.v1.global_variables_initializer())
hvp.update_hvp(f, policy, (a, ), reg_coeff)
hx = hvp.build_eval(np.array([a_val]))
computed_hvp = hx(vector)
assert np.allclose(computed_hvp, expected_hvp)
|
Test Hessian-vector product for a function with one variable.
|
test_pearl_mutter_hvp_1x1
|
python
|
rlworkgroup/garage
|
tests/garage/tf/optimizers/test_conjugate_gradient_optimizer.py
|
https://github.com/rlworkgroup/garage/blob/master/tests/garage/tf/optimizers/test_conjugate_gradient_optimizer.py
|
MIT
|
def test_pearl_mutter_hvp_2x2(self, a_val, b_val, x_val, y_val, vector):
"""Test Hessian-vector product for a function with two variables."""
a_val = [a_val]
b_val = [b_val]
vector = np.array([vector], dtype=np.float32)
policy = HelperPolicy(n_vars=2)
params = policy.get_params()
x, y = params[0], params[1]
a = tf.constant(a_val)
b = tf.constant(b_val)
f = a * (x**2) + b * (y**2)
expected_hessian = compute_hessian(f, [x, y])
expected_hvp = tf.matmul(vector, expected_hessian)
reg_coeff = 1e-5
hvp = PearlmutterHVP()
self.sess.run(tf.compat.v1.global_variables_initializer())
self.sess.run(x.assign([x_val]))
self.sess.run(y.assign([y_val]))
hvp.update_hvp(f, policy, (a, b), reg_coeff)
hx = hvp.build_eval((np.array(a_val), np.array(b_val)))
hvp = hx(vector[0])
expected_hvp = expected_hvp.eval()
assert np.allclose(hvp, expected_hvp, atol=1e-6)
|
Test Hessian-vector product for a function with two variables.
|
test_pearl_mutter_hvp_2x2
|
python
|
rlworkgroup/garage
|
tests/garage/tf/optimizers/test_conjugate_gradient_optimizer.py
|
https://github.com/rlworkgroup/garage/blob/master/tests/garage/tf/optimizers/test_conjugate_gradient_optimizer.py
|
MIT
|
def test_pearl_mutter_hvp_2x2_non_diagonal(self, a_val, b_val, x_val,
y_val, vector):
"""Test Hessian-vector product for a function with two variables whose Hessian
is non-diagonal.
"""
a_val = [a_val]
b_val = [b_val]
vector = np.array([vector], dtype=np.float32)
policy = HelperPolicy(n_vars=2)
params = policy.get_params()
x, y = params[0], params[1]
a = tf.constant(a_val)
b = tf.constant(b_val)
f = a * (x**3) + b * (y**3) + (x**2) * y + (y**2) * x
expected_hessian = compute_hessian(f, [x, y])
expected_hvp = tf.matmul(vector, expected_hessian)
reg_coeff = 1e-5
hvp = PearlmutterHVP()
self.sess.run(tf.compat.v1.global_variables_initializer())
self.sess.run(x.assign([x_val]))
self.sess.run(y.assign([y_val]))
hvp.update_hvp(f, policy, (a, b), reg_coeff)
hx = hvp.build_eval((np.array(a_val), np.array(b_val)))
hvp = hx(vector[0])
expected_hvp = expected_hvp.eval()
assert np.allclose(hvp, expected_hvp)
|
Test Hessian-vector product for a function with two variables whose Hessian
is non-diagonal.
|
test_pearl_mutter_hvp_2x2_non_diagonal
|
python
|
rlworkgroup/garage
|
tests/garage/tf/optimizers/test_conjugate_gradient_optimizer.py
|
https://github.com/rlworkgroup/garage/blob/master/tests/garage/tf/optimizers/test_conjugate_gradient_optimizer.py
|
MIT
|
def test_finite_difference_hvp(self):
"""Test Hessian-vector product for a function with one variable."""
policy = HelperPolicy(n_vars=1)
x = policy.get_params()[0]
a_val = np.array([5.0])
a = tf.constant([0.0])
f = a * (x**2)
expected_hessian = 2 * a_val
vector = np.array([10.0])
expected_hvp = expected_hessian * vector
reg_coeff = 1e-5
hvp = FiniteDifferenceHVP()
self.sess.run(tf.compat.v1.global_variables_initializer())
hvp.update_hvp(f, policy, (a, ), reg_coeff)
hx = hvp.build_eval(np.array([a_val]))
computed_hvp = hx(vector)
assert np.allclose(computed_hvp, expected_hvp)
|
Test Hessian-vector product for a function with one variable.
|
test_finite_difference_hvp
|
python
|
rlworkgroup/garage
|
tests/garage/tf/optimizers/test_conjugate_gradient_optimizer.py
|
https://github.com/rlworkgroup/garage/blob/master/tests/garage/tf/optimizers/test_conjugate_gradient_optimizer.py
|
MIT
|
def test_finite_difference_hvp_2x2(self, a_val, b_val, x_val, y_val,
vector):
"""Test Hessian-vector product for a function with two variables."""
a_val = [a_val]
b_val = [b_val]
vector = np.array([vector], dtype=np.float32)
policy = HelperPolicy(n_vars=2)
params = policy.get_params()
x, y = params[0], params[1]
a = tf.constant(a_val)
b = tf.constant(b_val)
f = a * (x**2) + b * (y**2)
expected_hessian = compute_hessian(f, [x, y])
expected_hvp = tf.matmul(vector, expected_hessian)
reg_coeff = 1e-8
hvp = FiniteDifferenceHVP(base_eps=1.0)
self.sess.run(tf.compat.v1.global_variables_initializer())
self.sess.run(x.assign([x_val]))
self.sess.run(y.assign([y_val]))
hvp.update_hvp(f, policy, (a, b), reg_coeff)
hx = hvp.build_eval((np.array(a_val), np.array(b_val)))
hvp = hx(vector[0])
expected_hvp = expected_hvp.eval()
assert np.allclose(hvp, expected_hvp)
|
Test Hessian-vector product for a function with two variables.
|
test_finite_difference_hvp_2x2
|
python
|
rlworkgroup/garage
|
tests/garage/tf/optimizers/test_conjugate_gradient_optimizer.py
|
https://github.com/rlworkgroup/garage/blob/master/tests/garage/tf/optimizers/test_conjugate_gradient_optimizer.py
|
MIT
|
def test_finite_difference_hvp_2x2_non_diagonal(self, a_val, b_val, x_val,
y_val, vector):
"""Test Hessian-vector product for a function with two variables whose Hessian
is non-diagonal.
"""
a_val = [a_val]
b_val = [b_val]
vector = np.array([vector], dtype=np.float32)
policy = HelperPolicy(n_vars=2)
params = policy.get_params()
x, y = params[0], params[1]
a = tf.constant(a_val)
b = tf.constant(b_val)
f = a * (x**3) + b * (y**3) + (x**2) * y + (y**2) * x
expected_hessian = compute_hessian(f, [x, y])
expected_hvp = tf.matmul(vector, expected_hessian)
reg_coeff = 1e-5
hvp = FiniteDifferenceHVP(base_eps=1)
self.sess.run(tf.compat.v1.global_variables_initializer())
self.sess.run(x.assign([x_val]))
self.sess.run(y.assign([y_val]))
hvp.update_hvp(f, policy, (a, b), reg_coeff)
hx = hvp.build_eval((np.array(a_val), np.array(b_val)))
hvp = hx(vector[0])
expected_hvp = expected_hvp.eval()
assert np.allclose(hvp, expected_hvp)
|
Test Hessian-vector product for a function with two variables whose Hessian
is non-diagonal.
|
test_finite_difference_hvp_2x2_non_diagonal
|
python
|
rlworkgroup/garage
|
tests/garage/tf/optimizers/test_conjugate_gradient_optimizer.py
|
https://github.com/rlworkgroup/garage/blob/master/tests/garage/tf/optimizers/test_conjugate_gradient_optimizer.py
|
MIT
|
def test_does_not_support_dict_obs_space(self, filters, strides, padding,
hidden_sizes):
"""Test that policy raises error if passed a dict obs space."""
env = GymEnv(DummyDictEnv(act_space_type='discrete'))
with pytest.raises(ValueError):
CategoricalCNNPolicy(env_spec=env.spec,
filters=filters,
strides=strides,
padding=padding,
hidden_sizes=hidden_sizes)
|
Test that policy raises error if passed a dict obs space.
|
test_does_not_support_dict_obs_space
|
python
|
rlworkgroup/garage
|
tests/garage/tf/policies/test_categorical_cnn_policy.py
|
https://github.com/rlworkgroup/garage/blob/master/tests/garage/tf/policies/test_categorical_cnn_policy.py
|
MIT
|
def test_does_not_support_dict_obs_space(self):
"""Test that policy raises error if passed a dict obs space."""
env = GymEnv(DummyDictEnv(act_space_type='discrete'))
with pytest.raises(ValueError):
qf = SimpleQFunction(env.spec,
name='does_not_support_dict_obs_space')
DiscreteQFArgmaxPolicy(env_spec=env.spec, qf=qf)
|
Test that policy raises error if passed a dict obs space.
|
test_does_not_support_dict_obs_space
|
python
|
rlworkgroup/garage
|
tests/garage/tf/policies/test_discrete_qf_argmax_policy.py
|
https://github.com/rlworkgroup/garage/blob/master/tests/garage/tf/policies/test_discrete_qf_argmax_policy.py
|
MIT
|
def test_invalid_action_spaces(self):
"""Test that policy raises error if passed a dict obs space."""
env = GymEnv(DummyDictEnv(act_space_type='box'))
with pytest.raises(ValueError):
qf = SimpleQFunction(env.spec)
DiscreteQFArgmaxPolicy(env_spec=env.spec, qf=qf)
|
Test that policy raises error if passed a dict obs space.
|
test_invalid_action_spaces
|
python
|
rlworkgroup/garage
|
tests/garage/tf/policies/test_discrete_qf_argmax_policy.py
|
https://github.com/rlworkgroup/garage/blob/master/tests/garage/tf/policies/test_discrete_qf_argmax_policy.py
|
MIT
|
def test_obs_unflattened(self):
"""Test if a flattened image obs is passed to get_action
then it is unflattened.
"""
obs = self.env.observation_space.sample()
action, _ = self.policy.get_action(
self.env.observation_space.flatten(obs))
self.env.step(action)
|
Test if a flattened image obs is passed to get_action
then it is unflattened.
|
test_obs_unflattened
|
python
|
rlworkgroup/garage
|
tests/garage/tf/policies/test_discrete_qf_argmax_policy.py
|
https://github.com/rlworkgroup/garage/blob/master/tests/garage/tf/policies/test_discrete_qf_argmax_policy.py
|
MIT
|
def test_utils_set_gpu_mode():
"""Test setting gpu mode to False to force CPU."""
if torch.cuda.is_available():
set_gpu_mode(mode=True)
assert global_device() == torch.device('cuda:0')
assert tu._USE_GPU
else:
set_gpu_mode(mode=False)
assert global_device() == torch.device('cpu')
assert not tu._USE_GPU
assert not tu._GPU_ID
|
Test setting gpu mode to False to force CPU.
|
test_utils_set_gpu_mode
|
python
|
rlworkgroup/garage
|
tests/garage/torch/test_functions.py
|
https://github.com/rlworkgroup/garage/blob/master/tests/garage/torch/test_functions.py
|
MIT
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.