code
stringlengths 66
870k
| docstring
stringlengths 19
26.7k
| func_name
stringlengths 1
138
| language
stringclasses 1
value | repo
stringlengths 7
68
| path
stringlengths 5
324
| url
stringlengths 46
389
| license
stringclasses 7
values |
---|---|---|---|---|---|---|---|
def te_ppo_mt1_push(ctxt, seed, n_epochs, batch_size_per_task):
"""Train Task Embedding PPO with PointEnv.
Args:
ctxt (ExperimentContext): The experiment configuration used by
:class:`~Trainer` to create the :class:`~Snapshotter`.
seed (int): Used to seed the random number generator to produce
determinism.
n_epochs (int): Total number of epochs for training.
batch_size_per_task (int): Batch size of samples for each task.
"""
set_seed(seed)
n_tasks = 50
mt1 = metaworld.MT1('push-v1')
task_sampler = MetaWorldTaskSampler(mt1,
'train',
lambda env, _: normalize(env),
add_env_onehot=False)
envs = [env_up() for env_up in task_sampler.sample(n_tasks)]
env = MultiEnvWrapper(envs,
sample_strategy=round_robin_strategy,
mode='vanilla')
latent_length = 2
inference_window = 6
batch_size = batch_size_per_task * n_tasks
policy_ent_coeff = 2e-2
encoder_ent_coeff = 2e-4
inference_ce_coeff = 5e-2
embedding_init_std = 0.1
embedding_max_std = 0.2
embedding_min_std = 1e-6
policy_init_std = 1.0
policy_max_std = None
policy_min_std = None
with TFTrainer(snapshot_config=ctxt) as trainer:
task_embed_spec = TEPPO.get_encoder_spec(env.task_space,
latent_dim=latent_length)
task_encoder = GaussianMLPEncoder(
name='embedding',
embedding_spec=task_embed_spec,
hidden_sizes=(20, 20),
std_share_network=True,
init_std=embedding_init_std,
max_std=embedding_max_std,
output_nonlinearity=tf.nn.tanh,
min_std=embedding_min_std,
)
traj_embed_spec = TEPPO.get_infer_spec(
env.spec,
latent_dim=latent_length,
inference_window_size=inference_window)
inference = GaussianMLPEncoder(
name='inference',
embedding_spec=traj_embed_spec,
hidden_sizes=(20, 10),
std_share_network=True,
init_std=2.0,
output_nonlinearity=tf.nn.tanh,
min_std=embedding_min_std,
)
policy = GaussianMLPTaskEmbeddingPolicy(
name='policy',
env_spec=env.spec,
encoder=task_encoder,
hidden_sizes=(32, 16),
std_share_network=True,
max_std=policy_max_std,
init_std=policy_init_std,
min_std=policy_min_std,
)
baseline = LinearMultiFeatureBaseline(
env_spec=env.spec, features=['observations', 'tasks', 'latents'])
sampler = LocalSampler(agents=policy,
envs=env,
max_episode_length=env.spec.max_episode_length,
is_tf_worker=True,
worker_class=TaskEmbeddingWorker)
algo = TEPPO(env_spec=env.spec,
policy=policy,
baseline=baseline,
sampler=sampler,
inference=inference,
discount=0.99,
lr_clip_range=0.2,
policy_ent_coeff=policy_ent_coeff,
encoder_ent_coeff=encoder_ent_coeff,
inference_ce_coeff=inference_ce_coeff,
use_softplus_entropy=True,
optimizer_args=dict(
batch_size=32,
max_optimization_epochs=10,
learning_rate=1e-3,
),
inference_optimizer_args=dict(
batch_size=32,
max_optimization_epochs=10,
),
center_adv=True,
stop_ce_gradient=True)
trainer.setup(algo, env)
trainer.train(n_epochs=n_epochs, batch_size=batch_size, plot=False)
|
Train Task Embedding PPO with PointEnv.
Args:
ctxt (ExperimentContext): The experiment configuration used by
:class:`~Trainer` to create the :class:`~Snapshotter`.
seed (int): Used to seed the random number generator to produce
determinism.
n_epochs (int): Total number of epochs for training.
batch_size_per_task (int): Batch size of samples for each task.
|
te_ppo_mt1_push
|
python
|
rlworkgroup/garage
|
src/garage/examples/tf/te_ppo_metaworld_mt1_push.py
|
https://github.com/rlworkgroup/garage/blob/master/src/garage/examples/tf/te_ppo_metaworld_mt1_push.py
|
MIT
|
def te_ppo_mt50(ctxt, seed, n_epochs, batch_size_per_task, n_tasks):
"""Train Task Embedding PPO with PointEnv.
Args:
ctxt (garage.experiment.ExperimentContext): The experiment
configuration used by Trainer to create the snapshotter.
seed (int): Used to seed the random number generator to produce
determinism.
n_epochs (int): Total number of epochs for training.
batch_size_per_task (int): Batch size of samples for each task.
n_tasks (int): Number of tasks to use. Should be a multiple of 50.
"""
set_seed(seed)
mt50 = metaworld.MT50()
task_sampler = MetaWorldTaskSampler(mt50,
'train',
lambda env, _: normalize(env),
add_env_onehot=False)
assert n_tasks % 50 == 0
assert n_tasks <= 2500
envs = [env_up() for env_up in task_sampler.sample(n_tasks)]
env = MultiEnvWrapper(envs,
sample_strategy=round_robin_strategy,
mode='vanilla')
latent_length = 6
inference_window = 6
batch_size = batch_size_per_task * n_tasks
policy_ent_coeff = 2e-2
encoder_ent_coeff = 2e-4
inference_ce_coeff = 5e-2
embedding_init_std = 0.1
embedding_max_std = 0.2
embedding_min_std = 1e-6
policy_init_std = 1.0
policy_max_std = None
policy_min_std = None
with TFTrainer(snapshot_config=ctxt) as trainer:
task_embed_spec = TEPPO.get_encoder_spec(env.task_space,
latent_dim=latent_length)
task_encoder = GaussianMLPEncoder(
name='embedding',
embedding_spec=task_embed_spec,
hidden_sizes=(20, 20),
std_share_network=True,
init_std=embedding_init_std,
max_std=embedding_max_std,
output_nonlinearity=tf.nn.tanh,
min_std=embedding_min_std,
)
traj_embed_spec = TEPPO.get_infer_spec(
env.spec,
latent_dim=latent_length,
inference_window_size=inference_window)
inference = GaussianMLPEncoder(
name='inference',
embedding_spec=traj_embed_spec,
hidden_sizes=(20, 10),
std_share_network=True,
init_std=2.0,
output_nonlinearity=tf.nn.tanh,
min_std=embedding_min_std,
)
policy = GaussianMLPTaskEmbeddingPolicy(
name='policy',
env_spec=env.spec,
encoder=task_encoder,
hidden_sizes=(32, 16),
std_share_network=True,
max_std=policy_max_std,
init_std=policy_init_std,
min_std=policy_min_std,
)
baseline = LinearMultiFeatureBaseline(
env_spec=env.spec, features=['observations', 'tasks', 'latents'])
sampler = LocalSampler(agents=policy,
envs=env,
max_episode_length=env.spec.max_episode_length,
is_tf_worker=True,
worker_class=TaskEmbeddingWorker)
algo = TEPPO(env_spec=env.spec,
policy=policy,
baseline=baseline,
sampler=sampler,
inference=inference,
discount=0.99,
lr_clip_range=0.2,
policy_ent_coeff=policy_ent_coeff,
encoder_ent_coeff=encoder_ent_coeff,
inference_ce_coeff=inference_ce_coeff,
use_softplus_entropy=True,
optimizer_args=dict(
batch_size=32,
max_optimization_epochs=10,
learning_rate=1e-3,
),
inference_optimizer_args=dict(
batch_size=32,
max_optimization_epochs=10,
),
center_adv=True,
stop_ce_gradient=True)
trainer.setup(algo, env)
trainer.train(n_epochs=n_epochs, batch_size=batch_size, plot=False)
|
Train Task Embedding PPO with PointEnv.
Args:
ctxt (garage.experiment.ExperimentContext): The experiment
configuration used by Trainer to create the snapshotter.
seed (int): Used to seed the random number generator to produce
determinism.
n_epochs (int): Total number of epochs for training.
batch_size_per_task (int): Batch size of samples for each task.
n_tasks (int): Number of tasks to use. Should be a multiple of 50.
|
te_ppo_mt50
|
python
|
rlworkgroup/garage
|
src/garage/examples/tf/te_ppo_metaworld_mt50.py
|
https://github.com/rlworkgroup/garage/blob/master/src/garage/examples/tf/te_ppo_metaworld_mt50.py
|
MIT
|
def circle(r, n):
"""Generate n points on a circle of radius r.
Args:
r (float): Radius of the circle.
n (int): Number of points to generate.
Yields:
tuple(float, float): Coordinate of a point.
"""
for t in np.arange(0, 2 * np.pi, 2 * np.pi / n):
yield r * np.sin(t), r * np.cos(t)
|
Generate n points on a circle of radius r.
Args:
r (float): Radius of the circle.
n (int): Number of points to generate.
Yields:
tuple(float, float): Coordinate of a point.
|
circle
|
python
|
rlworkgroup/garage
|
src/garage/examples/tf/te_ppo_point.py
|
https://github.com/rlworkgroup/garage/blob/master/src/garage/examples/tf/te_ppo_point.py
|
MIT
|
def te_ppo_pointenv(ctxt, seed, n_epochs, batch_size_per_task):
"""Train Task Embedding PPO with PointEnv.
Args:
ctxt (garage.experiment.ExperimentContext): The experiment
configuration used by Trainer to create the snapshotter.
seed (int): Used to seed the random number generator to produce
determinism.
n_epochs (int): Total number of epochs for training.
batch_size_per_task (int): Batch size of samples for each task.
"""
set_seed(seed)
tasks = TASKS
latent_length = 2
inference_window = 6
batch_size = batch_size_per_task * len(TASKS)
policy_ent_coeff = 1e-3
encoder_ent_coeff = 1e-3
inference_ce_coeff = 5e-2
embedding_init_std = 0.1
embedding_max_std = 0.2
embedding_min_std = 1e-6
policy_init_std = 1.0
policy_max_std = 2.0
policy_min_std = None
task_names = sorted(tasks.keys())
task_args = [tasks[t]['args'] for t in task_names]
task_kwargs = [tasks[t]['kwargs'] for t in task_names]
with TFTrainer(snapshot_config=ctxt) as trainer:
task_envs = [
PointEnv(*t_args, **t_kwargs, max_episode_length=100)
for t_args, t_kwargs in zip(task_args, task_kwargs)
]
env = MultiEnvWrapper(task_envs, round_robin_strategy, mode='vanilla')
task_embed_spec = TEPPO.get_encoder_spec(env.task_space,
latent_dim=latent_length)
task_encoder = GaussianMLPEncoder(
name='embedding',
embedding_spec=task_embed_spec,
hidden_sizes=(20, 20),
std_share_network=True,
init_std=embedding_init_std,
max_std=embedding_max_std,
output_nonlinearity=tf.nn.tanh,
std_output_nonlinearity=tf.nn.tanh,
min_std=embedding_min_std,
)
traj_embed_spec = TEPPO.get_infer_spec(
env.spec,
latent_dim=latent_length,
inference_window_size=inference_window)
inference = GaussianMLPEncoder(
name='inference',
embedding_spec=traj_embed_spec,
hidden_sizes=(20, 20),
std_share_network=True,
init_std=0.1,
output_nonlinearity=tf.nn.tanh,
std_output_nonlinearity=tf.nn.tanh,
min_std=embedding_min_std,
)
policy = GaussianMLPTaskEmbeddingPolicy(
name='policy',
env_spec=env.spec,
encoder=task_encoder,
hidden_sizes=(32, 16),
std_share_network=True,
max_std=policy_max_std,
init_std=policy_init_std,
min_std=policy_min_std,
)
baseline = LinearMultiFeatureBaseline(
env_spec=env.spec, features=['observations', 'tasks', 'latents'])
sampler = LocalSampler(agents=policy,
envs=env,
max_episode_length=env.spec.max_episode_length,
is_tf_worker=True,
worker_class=TaskEmbeddingWorker)
algo = TEPPO(env_spec=env.spec,
policy=policy,
baseline=baseline,
sampler=sampler,
inference=inference,
discount=0.99,
lr_clip_range=0.2,
policy_ent_coeff=policy_ent_coeff,
encoder_ent_coeff=encoder_ent_coeff,
inference_ce_coeff=inference_ce_coeff,
use_softplus_entropy=True,
optimizer_args=dict(
batch_size=32,
max_optimization_epochs=10,
learning_rate=1e-3,
),
inference_optimizer_args=dict(
batch_size=32,
max_optimization_epochs=10,
learning_rate=1e-3,
),
center_adv=True,
stop_ce_gradient=True)
trainer.setup(algo, env)
trainer.train(n_epochs=n_epochs, batch_size=batch_size, plot=False)
|
Train Task Embedding PPO with PointEnv.
Args:
ctxt (garage.experiment.ExperimentContext): The experiment
configuration used by Trainer to create the snapshotter.
seed (int): Used to seed the random number generator to produce
determinism.
n_epochs (int): Total number of epochs for training.
batch_size_per_task (int): Batch size of samples for each task.
|
te_ppo_pointenv
|
python
|
rlworkgroup/garage
|
src/garage/examples/tf/te_ppo_point.py
|
https://github.com/rlworkgroup/garage/blob/master/src/garage/examples/tf/te_ppo_point.py
|
MIT
|
def trpo_cartpole(ctxt=None, seed=1):
"""Train TRPO with CartPole-v1 environment.
Args:
ctxt (garage.experiment.ExperimentContext): The experiment
configuration used by Trainer to create the snapshotter.
seed (int): Used to seed the random number generator to produce
determinism.
"""
set_seed(seed)
with TFTrainer(ctxt) as trainer:
env = GymEnv('CartPole-v1')
policy = CategoricalMLPPolicy(name='policy',
env_spec=env.spec,
hidden_sizes=(32, 32))
baseline = LinearFeatureBaseline(env_spec=env.spec)
sampler = LocalSampler(agents=policy,
envs=env,
max_episode_length=env.spec.max_episode_length,
is_tf_worker=True)
algo = TRPO(env_spec=env.spec,
policy=policy,
baseline=baseline,
sampler=sampler,
discount=0.99,
max_kl_step=0.01)
trainer.setup(algo, env)
trainer.train(n_epochs=100, batch_size=4000)
|
Train TRPO with CartPole-v1 environment.
Args:
ctxt (garage.experiment.ExperimentContext): The experiment
configuration used by Trainer to create the snapshotter.
seed (int): Used to seed the random number generator to produce
determinism.
|
trpo_cartpole
|
python
|
rlworkgroup/garage
|
src/garage/examples/tf/trpo_cartpole.py
|
https://github.com/rlworkgroup/garage/blob/master/src/garage/examples/tf/trpo_cartpole.py
|
MIT
|
def trpo_cartpole_bullet(ctxt=None, seed=1):
"""Train TRPO with Pybullet's CartPoleBulletEnv environment.
Args:
ctxt (garage.experiment.ExperimentContext): The experiment
configuration used by Trainer to create the snapshotter.
seed (int): Used to seed the random number generator to produce
determinism.
"""
set_seed(seed)
with TFTrainer(ctxt) as trainer:
env = BulletEnv(
gym.make('CartPoleBulletEnv-v1',
renders=False,
discrete_actions=True))
policy = CategoricalMLPPolicy(name='policy',
env_spec=env.spec,
hidden_sizes=(32, 32))
baseline = LinearFeatureBaseline(env_spec=env.spec)
sampler = RaySampler(agents=policy,
envs=env,
max_episode_length=env.spec.max_episode_length,
is_tf_worker=True)
algo = TRPO(env_spec=env.spec,
policy=policy,
baseline=baseline,
sampler=sampler,
discount=0.99,
max_kl_step=0.01)
trainer.setup(algo, env)
trainer.train(n_epochs=100, batch_size=4000)
|
Train TRPO with Pybullet's CartPoleBulletEnv environment.
Args:
ctxt (garage.experiment.ExperimentContext): The experiment
configuration used by Trainer to create the snapshotter.
seed (int): Used to seed the random number generator to produce
determinism.
|
trpo_cartpole_bullet
|
python
|
rlworkgroup/garage
|
src/garage/examples/tf/trpo_cartpole_bullet.py
|
https://github.com/rlworkgroup/garage/blob/master/src/garage/examples/tf/trpo_cartpole_bullet.py
|
MIT
|
def trpo_cartpole_recurrent(ctxt, seed, n_epochs, batch_size, plot):
"""Train TRPO with a recurrent policy on CartPole.
Args:
ctxt (garage.experiment.ExperimentContext): The experiment
configuration used by Trainer to create the snapshotter.
n_epochs (int): Number of epochs for training.
seed (int): Used to seed the random number generator to produce
determinism.
batch_size (int): Batch size used for training.
plot (bool): Whether to plot or not.
"""
set_seed(seed)
with TFTrainer(snapshot_config=ctxt) as trainer:
env = GymEnv('CartPole-v1', max_episode_length=100)
policy = CategoricalLSTMPolicy(name='policy', env_spec=env.spec)
baseline = LinearFeatureBaseline(env_spec=env.spec)
sampler = RaySampler(agents=policy,
envs=env,
max_episode_length=env.spec.max_episode_length,
is_tf_worker=True)
algo = TRPO(env_spec=env.spec,
policy=policy,
baseline=baseline,
sampler=sampler,
discount=0.99,
max_kl_step=0.01,
optimizer=ConjugateGradientOptimizer,
optimizer_args=dict(hvp_approach=FiniteDifferenceHVP(
base_eps=1e-5)))
trainer.setup(algo, env)
trainer.train(n_epochs=n_epochs, batch_size=batch_size, plot=plot)
|
Train TRPO with a recurrent policy on CartPole.
Args:
ctxt (garage.experiment.ExperimentContext): The experiment
configuration used by Trainer to create the snapshotter.
n_epochs (int): Number of epochs for training.
seed (int): Used to seed the random number generator to produce
determinism.
batch_size (int): Batch size used for training.
plot (bool): Whether to plot or not.
|
trpo_cartpole_recurrent
|
python
|
rlworkgroup/garage
|
src/garage/examples/tf/trpo_cartpole_recurrent.py
|
https://github.com/rlworkgroup/garage/blob/master/src/garage/examples/tf/trpo_cartpole_recurrent.py
|
MIT
|
def trpo_cubecrash(ctxt=None, seed=1, max_episode_length=5, batch_size=4000):
"""Train TRPO with CubeCrash-v0 environment.
Args:
ctxt (garage.experiment.ExperimentContext): The experiment
configuration used by Trainer to create the snapshotter.
seed (int): Used to seed the random number generator to produce
determinism.
max_episode_length (int): Maximum length of a single episode.
batch_size (int): Number of timesteps to use in each training step.
"""
set_seed(seed)
with TFTrainer(ctxt) as trainer:
env = normalize(
GymEnv('CubeCrash-v0', max_episode_length=max_episode_length))
policy = CategoricalCNNPolicy(env_spec=env.spec,
filters=((32, (8, 8)), (64, (4, 4))),
strides=(4, 2),
padding='VALID',
hidden_sizes=(32, 32))
baseline = GaussianCNNBaseline(env_spec=env.spec,
filters=((32, (8, 8)), (64, (4, 4))),
strides=(4, 2),
padding='VALID',
hidden_sizes=(32, 32),
use_trust_region=True)
sampler = RaySampler(agents=policy,
envs=env,
max_episode_length=env.spec.max_episode_length,
is_tf_worker=True)
algo = TRPO(env_spec=env.spec,
policy=policy,
baseline=baseline,
sampler=sampler,
discount=0.99,
gae_lambda=0.95,
lr_clip_range=0.2,
policy_ent_coeff=0.0)
trainer.setup(algo, env)
trainer.train(n_epochs=100, batch_size=batch_size)
|
Train TRPO with CubeCrash-v0 environment.
Args:
ctxt (garage.experiment.ExperimentContext): The experiment
configuration used by Trainer to create the snapshotter.
seed (int): Used to seed the random number generator to produce
determinism.
max_episode_length (int): Maximum length of a single episode.
batch_size (int): Number of timesteps to use in each training step.
|
trpo_cubecrash
|
python
|
rlworkgroup/garage
|
src/garage/examples/tf/trpo_cubecrash.py
|
https://github.com/rlworkgroup/garage/blob/master/src/garage/examples/tf/trpo_cubecrash.py
|
MIT
|
def trpo_gym_tf_cartpole(ctxt=None, seed=1):
"""Train TRPO with CartPole-v0 environment.
Args:
ctxt (garage.experiment.ExperimentContext): The experiment
configuration used by Trainer to create the snapshotter.
seed (int): Used to seed the random number generator to produce
determinism.
"""
set_seed(seed)
with TFTrainer(snapshot_config=ctxt) as trainer:
env = GymEnv('CartPole-v0')
policy = CategoricalMLPPolicy(name='policy',
env_spec=env.spec,
hidden_sizes=(32, 32))
baseline = LinearFeatureBaseline(env_spec=env.spec)
sampler = RaySampler(agents=policy,
envs=env,
max_episode_length=env.spec.max_episode_length,
is_tf_worker=True)
algo = TRPO(
env_spec=env.spec,
policy=policy,
baseline=baseline,
sampler=sampler,
discount=0.99,
max_kl_step=0.01,
)
trainer.setup(algo, env)
trainer.train(n_epochs=120, batch_size=4000)
|
Train TRPO with CartPole-v0 environment.
Args:
ctxt (garage.experiment.ExperimentContext): The experiment
configuration used by Trainer to create the snapshotter.
seed (int): Used to seed the random number generator to produce
determinism.
|
trpo_gym_tf_cartpole
|
python
|
rlworkgroup/garage
|
src/garage/examples/tf/trpo_gym_tf_cartpole.py
|
https://github.com/rlworkgroup/garage/blob/master/src/garage/examples/tf/trpo_gym_tf_cartpole.py
|
MIT
|
def trpo_gym_tf_cartpole(ctxt=None, seed=1):
"""Train TRPO with CartPole-v0 environment.
Args:
ctxt (garage.experiment.ExperimentContext): The experiment
configuration used by Trainer to create the snapshotter.
seed (int): Used to seed the random number generator to produce
determinism.
"""
set_seed(seed)
with TFTrainer(snapshot_config=ctxt) as trainer:
env = GymEnv('CartPole-v1')
policy = CategoricalMLPPolicy(name='policy',
env_spec=env.spec,
hidden_sizes=(32, 32))
baseline = LinearFeatureBaseline(env_spec=env.spec)
sampler = RaySampler(agents=policy,
envs=env,
max_episode_length=env.spec.max_episode_length,
is_tf_worker=True)
algo = TRPO(
env_spec=env.spec,
policy=policy,
baseline=baseline,
sampler=sampler,
discount=0.99,
max_kl_step=0.01,
)
trainer.setup(algo, env)
trainer.train(n_epochs=10, batch_size=10000, plot=False)
|
Train TRPO with CartPole-v0 environment.
Args:
ctxt (garage.experiment.ExperimentContext): The experiment
configuration used by Trainer to create the snapshotter.
seed (int): Used to seed the random number generator to produce
determinism.
|
trpo_gym_tf_cartpole
|
python
|
rlworkgroup/garage
|
src/garage/examples/tf/trpo_gym_tf_cartpole_pretrained.py
|
https://github.com/rlworkgroup/garage/blob/master/src/garage/examples/tf/trpo_gym_tf_cartpole_pretrained.py
|
MIT
|
def pre_trained_trpo_cartpole(
ctxt=None,
snapshot_dir='data/local/experiment/trpo_gym_tf_cartpole',
seed=1):
"""Use pre-trained TRPO and reusume experiment.
Args:
ctxt (garage.experiment.ExperimentContext): The experiment
configuration used by Trainer to create the snapshotter.
snapshot_dir (path): directory to snapshot
seed (int): Used to seed the random number generator to produce
determinism.
"""
set_seed(seed)
with TFTrainer(snapshot_config=ctxt) as trainer:
trainer.restore(snapshot_dir)
trainer.resume(n_epochs=30, batch_size=8000)
|
Use pre-trained TRPO and reusume experiment.
Args:
ctxt (garage.experiment.ExperimentContext): The experiment
configuration used by Trainer to create the snapshotter.
snapshot_dir (path): directory to snapshot
seed (int): Used to seed the random number generator to produce
determinism.
|
pre_trained_trpo_cartpole
|
python
|
rlworkgroup/garage
|
src/garage/examples/tf/trpo_gym_tf_cartpole_pretrained.py
|
https://github.com/rlworkgroup/garage/blob/master/src/garage/examples/tf/trpo_gym_tf_cartpole_pretrained.py
|
MIT
|
def trpo_swimmer(ctxt=None, seed=1, batch_size=4000):
"""Train TRPO with Swimmer-v2 environment.
Args:
ctxt (garage.experiment.ExperimentContext): The experiment
configuration used by Trainer to create the snapshotter.
seed (int): Used to seed the random number generator to produce
determinism.
batch_size (int): Number of timesteps to use in each training step.
"""
set_seed(seed)
with TFTrainer(ctxt) as trainer:
env = GymEnv('Swimmer-v2')
policy = GaussianMLPPolicy(env_spec=env.spec, hidden_sizes=(32, 32))
baseline = LinearFeatureBaseline(env_spec=env.spec)
sampler = RaySampler(agents=policy,
envs=env,
max_episode_length=env.spec.max_episode_length,
is_tf_worker=True)
algo = TRPO(env_spec=env.spec,
policy=policy,
baseline=baseline,
sampler=sampler,
discount=0.99,
max_kl_step=0.01)
trainer.setup(algo, env)
trainer.train(n_epochs=40, batch_size=batch_size)
|
Train TRPO with Swimmer-v2 environment.
Args:
ctxt (garage.experiment.ExperimentContext): The experiment
configuration used by Trainer to create the snapshotter.
seed (int): Used to seed the random number generator to produce
determinism.
batch_size (int): Number of timesteps to use in each training step.
|
trpo_swimmer
|
python
|
rlworkgroup/garage
|
src/garage/examples/tf/trpo_swimmer.py
|
https://github.com/rlworkgroup/garage/blob/master/src/garage/examples/tf/trpo_swimmer.py
|
MIT
|
def trpo_swimmer_ray_sampler(ctxt=None, seed=1):
"""tf_trpo_swimmer.
Args:
ctxt (garage.experiment.ExperimentContext): The experiment
configuration used by Trainer to create the snapshotter.
seed (int): Used to seed the random number generator to produce
determinism.
"""
# Since this is an example, we are running ray in a reduced state.
# One can comment this line out in order to run ray at full capacity
ray.init(_memory=52428800,
object_store_memory=78643200,
ignore_reinit_error=True,
log_to_driver=False,
include_dashboard=False)
with TFTrainer(snapshot_config=ctxt) as trainer:
set_seed(seed)
env = GymEnv('Swimmer-v2')
policy = GaussianMLPPolicy(env_spec=env.spec, hidden_sizes=(32, 32))
baseline = LinearFeatureBaseline(env_spec=env.spec)
sampler = RaySampler(agents=policy,
envs=env,
max_episode_length=env.spec.max_episode_length,
is_tf_worker=True)
algo = TRPO(env_spec=env.spec,
policy=policy,
baseline=baseline,
sampler=sampler,
discount=0.99,
max_kl_step=0.01)
trainer.setup(algo, env)
trainer.train(n_epochs=40, batch_size=4000)
|
tf_trpo_swimmer.
Args:
ctxt (garage.experiment.ExperimentContext): The experiment
configuration used by Trainer to create the snapshotter.
seed (int): Used to seed the random number generator to produce
determinism.
|
trpo_swimmer_ray_sampler
|
python
|
rlworkgroup/garage
|
src/garage/examples/tf/trpo_swimmer_ray_sampler.py
|
https://github.com/rlworkgroup/garage/blob/master/src/garage/examples/tf/trpo_swimmer_ray_sampler.py
|
MIT
|
def init_opt(self):
"""Initialize optimizer and build computation graph."""
observation_dim = self.policy.observation_space.flat_dim
action_dim = self.policy.action_space.flat_dim
with tf.name_scope('inputs'):
self._observation = tf.compat.v1.placeholder(
tf.float32, shape=[None, observation_dim], name='observation')
self._action = tf.compat.v1.placeholder(tf.float32,
shape=[None, action_dim],
name='action')
self._returns = tf.compat.v1.placeholder(tf.float32,
shape=[None],
name='return')
policy_dist = self.policy.build(self._observation, name='policy').dist
with tf.name_scope('loss'):
ll = policy_dist.log_prob(self._action, name='log_likelihood')
loss = -tf.reduce_mean(ll * self._returns)
with tf.name_scope('train'):
self._train_op = tf.compat.v1.train.AdamOptimizer(1e-3).minimize(
loss)
|
Initialize optimizer and build computation graph.
|
init_opt
|
python
|
rlworkgroup/garage
|
src/garage/examples/tf/tutorial_vpg.py
|
https://github.com/rlworkgroup/garage/blob/master/src/garage/examples/tf/tutorial_vpg.py
|
MIT
|
def train(self, trainer):
"""Obtain samplers and start actual training for each epoch.
Args:
trainer (Trainer): Experiment trainer.
"""
for epoch in trainer.step_epochs():
samples = trainer.obtain_samples(epoch)
log_performance(epoch,
EpisodeBatch.from_list(self.env_spec, samples),
self._discount)
self._train_once(samples)
|
Obtain samplers and start actual training for each epoch.
Args:
trainer (Trainer): Experiment trainer.
|
train
|
python
|
rlworkgroup/garage
|
src/garage/examples/tf/tutorial_vpg.py
|
https://github.com/rlworkgroup/garage/blob/master/src/garage/examples/tf/tutorial_vpg.py
|
MIT
|
def _train_once(self, samples):
"""Perform one step of policy optimization given one batch of samples.
Args:
samples (list[dict]): A list of collected samples.
Returns:
numpy.float64: Average return.
"""
obs = np.concatenate([path['observations'] for path in samples])
actions = np.concatenate([path['actions'] for path in samples])
returns = []
for path in samples:
returns.append(discount_cumsum(path['rewards'], self._discount))
returns = np.concatenate(returns)
sess = tf.compat.v1.get_default_session()
sess.run(self._train_op,
feed_dict={
self._observation: obs,
self._action: actions,
self._returns: returns,
})
return np.mean(returns)
|
Perform one step of policy optimization given one batch of samples.
Args:
samples (list[dict]): A list of collected samples.
Returns:
numpy.float64: Average return.
|
_train_once
|
python
|
rlworkgroup/garage
|
src/garage/examples/tf/tutorial_vpg.py
|
https://github.com/rlworkgroup/garage/blob/master/src/garage/examples/tf/tutorial_vpg.py
|
MIT
|
def __getstate__(self):
"""Parameters to save in snapshot.
Returns:
dict: Parameters to save.
"""
data = self.__dict__.copy()
del data['_observation']
del data['_action']
del data['_returns']
del data['_train_op']
return data
|
Parameters to save in snapshot.
Returns:
dict: Parameters to save.
|
__getstate__
|
python
|
rlworkgroup/garage
|
src/garage/examples/tf/tutorial_vpg.py
|
https://github.com/rlworkgroup/garage/blob/master/src/garage/examples/tf/tutorial_vpg.py
|
MIT
|
def tutorial_vpg(ctxt=None):
"""Train VPG with PointEnv environment.
Args:
ctxt (ExperimentContext): The experiment configuration used by
:class:`~Trainer` to create the :class:`~Snapshotter`.
"""
set_seed(100)
with TFTrainer(ctxt) as trainer:
env = PointEnv(max_episode_length=200)
policy = GaussianMLPPolicy(env.spec)
sampler = LocalSampler(agents=policy,
envs=env,
max_episode_length=env.spec.max_episode_length,
is_tf_worker=True)
algo = SimpleVPG(env.spec, policy, sampler)
trainer.setup(algo, env)
trainer.train(n_epochs=200, batch_size=4000)
|
Train VPG with PointEnv environment.
Args:
ctxt (ExperimentContext): The experiment configuration used by
:class:`~Trainer` to create the :class:`~Snapshotter`.
|
tutorial_vpg
|
python
|
rlworkgroup/garage
|
src/garage/examples/tf/tutorial_vpg.py
|
https://github.com/rlworkgroup/garage/blob/master/src/garage/examples/tf/tutorial_vpg.py
|
MIT
|
def vpg_cartpole(ctxt=None, seed=1):
"""Train VPG with CartPole-v1 environment.
Args:
ctxt (garage.experiment.ExperimentContext): The experiment
configuration used by Trainer to create the snapshotter.
seed (int): Used to seed the random number generator to produce
determinism.
"""
set_seed(seed)
with TFTrainer(snapshot_config=ctxt) as trainer:
env = GymEnv('CartPole-v1')
policy = CategoricalMLPPolicy(name='policy',
env_spec=env.spec,
hidden_sizes=(32, 32))
baseline = LinearFeatureBaseline(env_spec=env.spec)
sampler = RaySampler(agents=policy,
envs=env,
max_episode_length=env.spec.max_episode_length,
is_tf_worker=True)
algo = VPG(env_spec=env.spec,
policy=policy,
baseline=baseline,
sampler=sampler,
discount=0.99,
optimizer_args=dict(learning_rate=0.01, ))
trainer.setup(algo, env)
trainer.train(n_epochs=100, batch_size=10000)
|
Train VPG with CartPole-v1 environment.
Args:
ctxt (garage.experiment.ExperimentContext): The experiment
configuration used by Trainer to create the snapshotter.
seed (int): Used to seed the random number generator to produce
determinism.
|
vpg_cartpole
|
python
|
rlworkgroup/garage
|
src/garage/examples/tf/vpg_cartpole.py
|
https://github.com/rlworkgroup/garage/blob/master/src/garage/examples/tf/vpg_cartpole.py
|
MIT
|
def get_actions(self, observations):
"""Get actions given observations.
Args:
observations (np.ndarray): Observations from the environment.
Has shape :math:`(B, O)`, where :math:`B` is the batch
dimension and :math:`O` is the observation dimensionality (at
least 2).
Returns:
tuple:
* np.ndarray: Batch of optimal actions.
Has shape :math:`(B, 2)`, where :math:`B` is the batch
dimension.
Optimal action in the environment.
* dict[str, np.ndarray]: Agent info (empty).
"""
return (self.goal[np.newaxis, :].repeat(len(observations), axis=0) -
observations[:, :2]), {}
|
Get actions given observations.
Args:
observations (np.ndarray): Observations from the environment.
Has shape :math:`(B, O)`, where :math:`B` is the batch
dimension and :math:`O` is the observation dimensionality (at
least 2).
Returns:
tuple:
* np.ndarray: Batch of optimal actions.
Has shape :math:`(B, 2)`, where :math:`B` is the batch
dimension.
Optimal action in the environment.
* dict[str, np.ndarray]: Agent info (empty).
|
get_actions
|
python
|
rlworkgroup/garage
|
src/garage/examples/torch/bc_point.py
|
https://github.com/rlworkgroup/garage/blob/master/src/garage/examples/torch/bc_point.py
|
MIT
|
def bc_point(ctxt=None, loss='log_prob'):
"""Run Behavioral Cloning on garage.envs.PointEnv.
Args:
ctxt (ExperimentContext): Provided by wrap_experiment.
loss (str): Either 'log_prob' or 'mse'
"""
trainer = Trainer(ctxt)
goal = np.array([1., 1.])
env = PointEnv(goal=goal, max_episode_length=200)
expert = OptimalPolicy(env.spec, goal=goal)
policy = GaussianMLPPolicy(env.spec, [8, 8])
batch_size = 1000
sampler = RaySampler(agents=expert,
envs=env,
max_episode_length=env.spec.max_episode_length)
algo = BC(env.spec,
policy,
batch_size=batch_size,
source=expert,
sampler=sampler,
policy_lr=1e-2,
loss=loss)
trainer.setup(algo, env)
trainer.train(100, batch_size=batch_size)
|
Run Behavioral Cloning on garage.envs.PointEnv.
Args:
ctxt (ExperimentContext): Provided by wrap_experiment.
loss (str): Either 'log_prob' or 'mse'
|
bc_point
|
python
|
rlworkgroup/garage
|
src/garage/examples/torch/bc_point.py
|
https://github.com/rlworkgroup/garage/blob/master/src/garage/examples/torch/bc_point.py
|
MIT
|
def get_actions(self, observations):
"""Get actions given observations.
Args:
observations (np.ndarray): Observations from the environment.
Has shape :math:`(B, O)`, where :math:`B` is the batch
dimension and :math:`O` is the observation dimensionality (at
least 2).
Returns:
tuple:
* np.ndarray: Batch of optimal actions.
Has shape :math:`(B, 2)`, where :math:`B` is the batch
dimension.
Optimal action in the environment.
* dict[str, np.ndarray]: Agent info (empty).
"""
return (self.goal[np.newaxis, :].repeat(len(observations), axis=0) -
observations[:, :2]), {}
|
Get actions given observations.
Args:
observations (np.ndarray): Observations from the environment.
Has shape :math:`(B, O)`, where :math:`B` is the batch
dimension and :math:`O` is the observation dimensionality (at
least 2).
Returns:
tuple:
* np.ndarray: Batch of optimal actions.
Has shape :math:`(B, 2)`, where :math:`B` is the batch
dimension.
Optimal action in the environment.
* dict[str, np.ndarray]: Agent info (empty).
|
get_actions
|
python
|
rlworkgroup/garage
|
src/garage/examples/torch/bc_point_deterministic_policy.py
|
https://github.com/rlworkgroup/garage/blob/master/src/garage/examples/torch/bc_point_deterministic_policy.py
|
MIT
|
def bc_point(ctxt=None):
"""Run Behavioral Cloning on garage.envs.PointEnv.
Args:
ctxt (ExperimentContext): Provided by wrap_experiment.
"""
trainer = Trainer(ctxt)
goal = np.array([1., 1.])
env = PointEnv(goal=goal, max_episode_length=200)
expert = OptimalPolicy(env.spec, goal=goal)
policy = DeterministicMLPPolicy(env.spec, hidden_sizes=[8, 8])
batch_size = 1000
sampler = RaySampler(agents=expert,
envs=env,
max_episode_length=env.spec.max_episode_length)
algo = BC(env.spec,
policy,
batch_size=batch_size,
source=expert,
sampler=sampler,
policy_lr=1e-2,
loss='mse')
trainer.setup(algo, env)
trainer.train(100, batch_size=batch_size)
|
Run Behavioral Cloning on garage.envs.PointEnv.
Args:
ctxt (ExperimentContext): Provided by wrap_experiment.
|
bc_point
|
python
|
rlworkgroup/garage
|
src/garage/examples/torch/bc_point_deterministic_policy.py
|
https://github.com/rlworkgroup/garage/blob/master/src/garage/examples/torch/bc_point_deterministic_policy.py
|
MIT
|
def ddpg_pendulum(ctxt=None, seed=1, lr=1e-4):
"""Train DDPG with InvertedDoublePendulum-v2 environment.
Args:
ctxt (garage.experiment.ExperimentContext): The experiment
configuration used by Trainer to create the snapshotter.
seed (int): Used to seed the random number generator to produce
determinism.
lr (float): Learning rate for policy optimization.
"""
set_seed(seed)
trainer = Trainer(ctxt)
env = normalize(GymEnv('InvertedDoublePendulum-v2'))
policy = DeterministicMLPPolicy(env_spec=env.spec,
hidden_sizes=[64, 64],
hidden_nonlinearity=F.relu,
output_nonlinearity=torch.tanh)
exploration_policy = AddOrnsteinUhlenbeckNoise(env.spec, policy, sigma=0.2)
qf = ContinuousMLPQFunction(env_spec=env.spec,
hidden_sizes=[64, 64],
hidden_nonlinearity=F.relu)
replay_buffer = PathBuffer(capacity_in_transitions=int(1e6))
policy_optimizer = (torch.optim.Adagrad, {'lr': lr, 'lr_decay': 0.99})
sampler = LocalSampler(agents=exploration_policy,
envs=env,
max_episode_length=env.spec.max_episode_length,
worker_class=FragmentWorker)
ddpg = DDPG(env_spec=env.spec,
policy=policy,
qf=qf,
replay_buffer=replay_buffer,
sampler=sampler,
steps_per_epoch=20,
n_train_steps=50,
min_buffer_size=int(1e4),
exploration_policy=exploration_policy,
target_update_tau=1e-2,
discount=0.9,
policy_optimizer=policy_optimizer,
qf_optimizer=torch.optim.Adam)
trainer.setup(algo=ddpg, env=env)
trainer.train(n_epochs=500, batch_size=100)
|
Train DDPG with InvertedDoublePendulum-v2 environment.
Args:
ctxt (garage.experiment.ExperimentContext): The experiment
configuration used by Trainer to create the snapshotter.
seed (int): Used to seed the random number generator to produce
determinism.
lr (float): Learning rate for policy optimization.
|
ddpg_pendulum
|
python
|
rlworkgroup/garage
|
src/garage/examples/torch/ddpg_pendulum.py
|
https://github.com/rlworkgroup/garage/blob/master/src/garage/examples/torch/ddpg_pendulum.py
|
MIT
|
def dqn_cartpole(ctxt=None, seed=24):
"""Train DQN with CartPole-v0 environment.
Args:
ctxt (garage.experiment.ExperimentContext): The experiment
configuration used by LocalRunner to create the snapshotter.
seed (int): Used to seed the random number generator to produce
determinism.
"""
set_seed(seed)
runner = Trainer(ctxt)
n_epochs = 100
steps_per_epoch = 10
sampler_batch_size = 512
num_timesteps = n_epochs * steps_per_epoch * sampler_batch_size
env = GymEnv('CartPole-v0')
replay_buffer = PathBuffer(capacity_in_transitions=int(1e6))
qf = DiscreteMLPQFunction(env_spec=env.spec, hidden_sizes=(8, 5))
policy = DiscreteQFArgmaxPolicy(env_spec=env.spec, qf=qf)
exploration_policy = EpsilonGreedyPolicy(env_spec=env.spec,
policy=policy,
total_timesteps=num_timesteps,
max_epsilon=1.0,
min_epsilon=0.01,
decay_ratio=0.4)
sampler = LocalSampler(agents=exploration_policy,
envs=env,
max_episode_length=env.spec.max_episode_length,
worker_class=FragmentWorker)
algo = DQN(env_spec=env.spec,
policy=policy,
qf=qf,
exploration_policy=exploration_policy,
replay_buffer=replay_buffer,
sampler=sampler,
steps_per_epoch=steps_per_epoch,
qf_lr=5e-5,
discount=0.9,
min_buffer_size=int(1e4),
n_train_steps=500,
target_update_freq=30,
buffer_batch_size=64)
runner.setup(algo, env)
runner.train(n_epochs=n_epochs, batch_size=sampler_batch_size)
env.close()
|
Train DQN with CartPole-v0 environment.
Args:
ctxt (garage.experiment.ExperimentContext): The experiment
configuration used by LocalRunner to create the snapshotter.
seed (int): Used to seed the random number generator to produce
determinism.
|
dqn_cartpole
|
python
|
rlworkgroup/garage
|
src/garage/examples/torch/dqn_cartpole.py
|
https://github.com/rlworkgroup/garage/blob/master/src/garage/examples/torch/dqn_cartpole.py
|
MIT
|
def maml_ppo_half_cheetah_dir(ctxt, seed, epochs, episodes_per_task,
meta_batch_size):
"""Set up environment and algorithm and run the task.
Args:
ctxt (ExperimentContext): The experiment configuration used by
:class:`~Trainer` to create the :class:`~Snapshotter`.
seed (int): Used to seed the random number generator to produce
determinism.
epochs (int): Number of training epochs.
episodes_per_task (int): Number of episodes per epoch per task
for training.
meta_batch_size (int): Number of tasks sampled per batch.
"""
set_seed(seed)
max_episode_length = 100
env = normalize(GymEnv(HalfCheetahDirEnv(),
max_episode_length=max_episode_length),
expected_action_scale=10.)
policy = GaussianMLPPolicy(
env_spec=env.spec,
hidden_sizes=(64, 64),
hidden_nonlinearity=torch.tanh,
output_nonlinearity=None,
)
value_function = GaussianMLPValueFunction(env_spec=env.spec,
hidden_sizes=(32, 32),
hidden_nonlinearity=torch.tanh,
output_nonlinearity=None)
task_sampler = SetTaskSampler(
HalfCheetahDirEnv,
wrapper=lambda env, _: normalize(GymEnv(
env, max_episode_length=max_episode_length),
expected_action_scale=10.))
meta_evaluator = MetaEvaluator(test_task_sampler=task_sampler,
n_test_tasks=2,
n_test_episodes=10)
trainer = Trainer(ctxt)
sampler = RaySampler(agents=policy,
envs=env,
max_episode_length=env.spec.max_episode_length)
algo = MAMLPPO(env=env,
policy=policy,
sampler=sampler,
task_sampler=task_sampler,
value_function=value_function,
meta_batch_size=meta_batch_size,
discount=0.99,
gae_lambda=1.,
inner_lr=0.1,
num_grad_updates=1,
meta_evaluator=meta_evaluator)
trainer.setup(algo, env)
trainer.train(n_epochs=epochs,
batch_size=episodes_per_task * env.spec.max_episode_length)
|
Set up environment and algorithm and run the task.
Args:
ctxt (ExperimentContext): The experiment configuration used by
:class:`~Trainer` to create the :class:`~Snapshotter`.
seed (int): Used to seed the random number generator to produce
determinism.
epochs (int): Number of training epochs.
episodes_per_task (int): Number of episodes per epoch per task
for training.
meta_batch_size (int): Number of tasks sampled per batch.
|
maml_ppo_half_cheetah_dir
|
python
|
rlworkgroup/garage
|
src/garage/examples/torch/maml_ppo_half_cheetah_dir.py
|
https://github.com/rlworkgroup/garage/blob/master/src/garage/examples/torch/maml_ppo_half_cheetah_dir.py
|
MIT
|
def maml_trpo_half_cheetah_dir(ctxt, seed, epochs, episodes_per_task,
meta_batch_size):
"""Set up environment and algorithm and run the task.
Args:
ctxt (ExperimentContext): The experiment configuration used by
:class:`~Trainer` to create the :class:`~Snapshotter`.
seed (int): Used to seed the random number generator to produce
determinism.
epochs (int): Number of training epochs.
episodes_per_task (int): Number of episodes per epoch per task for
training.
meta_batch_size (int): Number of tasks sampled per batch.
"""
set_seed(seed)
max_episode_length = 100
env = normalize(GymEnv(HalfCheetahDirEnv(),
max_episode_length=max_episode_length),
expected_action_scale=10.)
policy = GaussianMLPPolicy(
env_spec=env.spec,
hidden_sizes=[64, 64],
hidden_nonlinearity=torch.tanh,
output_nonlinearity=None,
)
value_function = GaussianMLPValueFunction(env_spec=env.spec,
hidden_sizes=[32, 32],
hidden_nonlinearity=torch.tanh,
output_nonlinearity=None)
task_sampler = SetTaskSampler(
HalfCheetahDirEnv,
wrapper=lambda env, _: normalize(GymEnv(
env, max_episode_length=max_episode_length),
expected_action_scale=10.))
meta_evaluator = MetaEvaluator(test_task_sampler=task_sampler,
n_test_tasks=1,
n_test_episodes=10)
trainer = Trainer(ctxt)
sampler = RaySampler(agents=policy,
envs=env,
max_episode_length=env.spec.max_episode_length)
algo = MAMLTRPO(env=env,
policy=policy,
sampler=sampler,
task_sampler=task_sampler,
value_function=value_function,
meta_batch_size=meta_batch_size,
discount=0.99,
gae_lambda=1.,
inner_lr=0.1,
num_grad_updates=1,
meta_evaluator=meta_evaluator)
trainer.setup(algo, env)
trainer.train(n_epochs=epochs,
batch_size=episodes_per_task * env.spec.max_episode_length)
|
Set up environment and algorithm and run the task.
Args:
ctxt (ExperimentContext): The experiment configuration used by
:class:`~Trainer` to create the :class:`~Snapshotter`.
seed (int): Used to seed the random number generator to produce
determinism.
epochs (int): Number of training epochs.
episodes_per_task (int): Number of episodes per epoch per task for
training.
meta_batch_size (int): Number of tasks sampled per batch.
|
maml_trpo_half_cheetah_dir
|
python
|
rlworkgroup/garage
|
src/garage/examples/torch/maml_trpo_half_cheetah_dir.py
|
https://github.com/rlworkgroup/garage/blob/master/src/garage/examples/torch/maml_trpo_half_cheetah_dir.py
|
MIT
|
def maml_trpo_metaworld_ml10(ctxt, seed, epochs, episodes_per_task,
meta_batch_size):
"""Set up environment and algorithm and run the task.
Args:
ctxt (ExperimentContext): The experiment configuration used by
:class:`~Trainer: to create the :class:`~Snapshotter:.
seed (int): Used to seed the random number generator to produce
determinism.
epochs (int): Number of training epochs.
episodes_per_task (int): Number of episodes per epoch per task
for training.
meta_batch_size (int): Number of tasks sampled per batch.
"""
set_seed(seed)
ml10 = metaworld.ML10()
tasks = MetaWorldTaskSampler(ml10, 'train')
env = tasks.sample(10)[0]()
test_sampler = SetTaskSampler(MetaWorldSetTaskEnv,
env=MetaWorldSetTaskEnv(ml10, 'test'))
policy = GaussianMLPPolicy(
env_spec=env.spec,
hidden_sizes=(100, 100),
hidden_nonlinearity=torch.tanh,
output_nonlinearity=None,
)
value_function = GaussianMLPValueFunction(env_spec=env.spec,
hidden_sizes=(32, 32),
hidden_nonlinearity=torch.tanh,
output_nonlinearity=None)
meta_evaluator = MetaEvaluator(test_task_sampler=test_sampler)
sampler = RaySampler(agents=policy,
envs=env,
max_episode_length=env.spec.max_episode_length,
n_workers=meta_batch_size)
trainer = Trainer(ctxt)
algo = MAMLTRPO(env=env,
policy=policy,
sampler=sampler,
task_sampler=tasks,
value_function=value_function,
meta_batch_size=meta_batch_size,
discount=0.99,
gae_lambda=1.,
inner_lr=0.1,
num_grad_updates=1,
meta_evaluator=meta_evaluator)
trainer.setup(algo, env)
trainer.train(n_epochs=epochs,
batch_size=episodes_per_task * env.spec.max_episode_length)
|
Set up environment and algorithm and run the task.
Args:
ctxt (ExperimentContext): The experiment configuration used by
:class:`~Trainer: to create the :class:`~Snapshotter:.
seed (int): Used to seed the random number generator to produce
determinism.
epochs (int): Number of training epochs.
episodes_per_task (int): Number of episodes per epoch per task
for training.
meta_batch_size (int): Number of tasks sampled per batch.
|
maml_trpo_metaworld_ml10
|
python
|
rlworkgroup/garage
|
src/garage/examples/torch/maml_trpo_metaworld_ml10.py
|
https://github.com/rlworkgroup/garage/blob/master/src/garage/examples/torch/maml_trpo_metaworld_ml10.py
|
MIT
|
def maml_trpo_metaworld_ml1_push(ctxt, seed, epochs, rollouts_per_task,
meta_batch_size):
"""Set up environment and algorithm and run the task.
Args:
ctxt (garage.experiment.ExperimentContext): The experiment
configuration used by Trainer to create the snapshotter.
seed (int): Used to seed the random number generator to produce
determinism.
epochs (int): Number of training epochs.
rollouts_per_task (int): Number of rollouts per epoch per task
for training.
meta_batch_size (int): Number of tasks sampled per batch.
"""
set_seed(seed)
ml1 = metaworld.ML1('push-v1')
tasks = MetaWorldTaskSampler(ml1, 'train')
env = tasks.sample(1)[0]()
test_sampler = SetTaskSampler(MetaWorldSetTaskEnv,
env=MetaWorldSetTaskEnv(ml1, 'test'))
policy = GaussianMLPPolicy(
env_spec=env.spec,
hidden_sizes=(100, 100),
hidden_nonlinearity=torch.tanh,
output_nonlinearity=None,
)
value_function = GaussianMLPValueFunction(env_spec=env.spec,
hidden_sizes=[32, 32],
hidden_nonlinearity=torch.tanh,
output_nonlinearity=None)
meta_evaluator = MetaEvaluator(test_task_sampler=test_sampler,
n_test_tasks=1,
n_exploration_eps=rollouts_per_task)
sampler = RaySampler(agents=policy,
envs=env,
max_episode_length=env.spec.max_episode_length,
n_workers=meta_batch_size)
trainer = Trainer(ctxt)
algo = MAMLTRPO(env=env,
policy=policy,
sampler=sampler,
task_sampler=tasks,
value_function=value_function,
meta_batch_size=meta_batch_size,
discount=0.99,
gae_lambda=1.,
inner_lr=0.1,
num_grad_updates=1,
meta_evaluator=meta_evaluator)
trainer.setup(algo, env)
trainer.train(n_epochs=epochs,
batch_size=rollouts_per_task * env.spec.max_episode_length)
|
Set up environment and algorithm and run the task.
Args:
ctxt (garage.experiment.ExperimentContext): The experiment
configuration used by Trainer to create the snapshotter.
seed (int): Used to seed the random number generator to produce
determinism.
epochs (int): Number of training epochs.
rollouts_per_task (int): Number of rollouts per epoch per task
for training.
meta_batch_size (int): Number of tasks sampled per batch.
|
maml_trpo_metaworld_ml1_push
|
python
|
rlworkgroup/garage
|
src/garage/examples/torch/maml_trpo_metaworld_ml1_push.py
|
https://github.com/rlworkgroup/garage/blob/master/src/garage/examples/torch/maml_trpo_metaworld_ml1_push.py
|
MIT
|
def maml_trpo_metaworld_ml45(ctxt, seed, epochs, episodes_per_task,
meta_batch_size):
"""Set up environment and algorithm and run the task.
Args:
ctxt (ExperimentContext): The experiment configuration used by
:class:`~Trainer` to create the :class:`~Snapshotter`.
seed (int): Used to seed the random number generator to produce
determinism.
epochs (int): Number of training epochs.
episodes_per_task (int): Number of episodes per epoch per task
for training.
meta_batch_size (int): Number of tasks sampled per batch.
"""
set_seed(seed)
ml45 = metaworld.ML45()
# pylint: disable=missing-return-doc,missing-return-type-doc
def wrap(env, _):
return normalize(env, expected_action_scale=10.0)
train_task_sampler = MetaWorldTaskSampler(ml45, 'train', wrap)
test_env = wrap(MetaWorldSetTaskEnv(ml45, 'test'), None)
test_task_sampler = SetTaskSampler(MetaWorldSetTaskEnv,
env=test_env,
wrapper=wrap)
env = train_task_sampler.sample(45)[0]()
policy = GaussianMLPPolicy(
env_spec=env.spec,
hidden_sizes=(100, 100),
hidden_nonlinearity=torch.tanh,
output_nonlinearity=None,
)
value_function = GaussianMLPValueFunction(env_spec=env.spec,
hidden_sizes=(32, 32),
hidden_nonlinearity=torch.tanh,
output_nonlinearity=None)
meta_evaluator = MetaEvaluator(test_task_sampler=test_task_sampler)
sampler = RaySampler(agents=policy,
envs=env,
max_episode_length=env.spec.max_episode_length,
n_workers=meta_batch_size)
trainer = Trainer(ctxt)
algo = MAMLTRPO(env=env,
task_sampler=train_task_sampler,
policy=policy,
sampler=sampler,
value_function=value_function,
meta_batch_size=meta_batch_size,
discount=0.99,
gae_lambda=1.,
inner_lr=0.1,
num_grad_updates=1,
meta_evaluator=meta_evaluator)
trainer.setup(algo, env)
trainer.train(n_epochs=epochs,
batch_size=episodes_per_task * env.spec.max_episode_length)
|
Set up environment and algorithm and run the task.
Args:
ctxt (ExperimentContext): The experiment configuration used by
:class:`~Trainer` to create the :class:`~Snapshotter`.
seed (int): Used to seed the random number generator to produce
determinism.
epochs (int): Number of training epochs.
episodes_per_task (int): Number of episodes per epoch per task
for training.
meta_batch_size (int): Number of tasks sampled per batch.
|
maml_trpo_metaworld_ml45
|
python
|
rlworkgroup/garage
|
src/garage/examples/torch/maml_trpo_metaworld_ml45.py
|
https://github.com/rlworkgroup/garage/blob/master/src/garage/examples/torch/maml_trpo_metaworld_ml45.py
|
MIT
|
def maml_vpg_half_cheetah_dir(ctxt, seed, epochs, episodes_per_task,
meta_batch_size):
"""Set up environment and algorithm and run the task.
Args:
ctxt (ExperimentContext): The experiment configuration used by
:class:`~Trainer` to create the :class:`~Snapshotter`.
seed (int): Used to seed the random number generator to produce
determinism.
epochs (int): Number of training epochs.
episodes_per_task (int): Number of episodes per epoch per task
for training.
meta_batch_size (int): Number of tasks sampled per batch.
"""
set_seed(seed)
env = normalize(GymEnv(HalfCheetahDirEnv(), max_episode_length=100),
expected_action_scale=10.)
policy = GaussianMLPPolicy(
env_spec=env.spec,
hidden_sizes=(64, 64),
hidden_nonlinearity=torch.tanh,
output_nonlinearity=None,
)
value_function = GaussianMLPValueFunction(env_spec=env.spec,
hidden_sizes=(32, 32),
hidden_nonlinearity=torch.tanh,
output_nonlinearity=None)
max_episode_length = env.spec.max_episode_length
task_sampler = SetTaskSampler(
HalfCheetahDirEnv,
wrapper=lambda env, _: normalize(GymEnv(
env, max_episode_length=max_episode_length),
expected_action_scale=10.))
meta_evaluator = MetaEvaluator(test_task_sampler=task_sampler,
n_test_tasks=1,
n_test_episodes=10)
sampler = RaySampler(agents=policy,
envs=env,
max_episode_length=env.spec.max_episode_length)
trainer = Trainer(ctxt)
algo = MAMLVPG(env=env,
policy=policy,
sampler=sampler,
task_sampler=task_sampler,
value_function=value_function,
meta_batch_size=meta_batch_size,
discount=0.99,
gae_lambda=1.,
inner_lr=0.1,
num_grad_updates=1,
meta_evaluator=meta_evaluator)
trainer.setup(algo, env)
trainer.train(n_epochs=epochs,
batch_size=episodes_per_task * max_episode_length)
|
Set up environment and algorithm and run the task.
Args:
ctxt (ExperimentContext): The experiment configuration used by
:class:`~Trainer` to create the :class:`~Snapshotter`.
seed (int): Used to seed the random number generator to produce
determinism.
epochs (int): Number of training epochs.
episodes_per_task (int): Number of episodes per epoch per task
for training.
meta_batch_size (int): Number of tasks sampled per batch.
|
maml_vpg_half_cheetah_dir
|
python
|
rlworkgroup/garage
|
src/garage/examples/torch/maml_vpg_half_cheetah_dir.py
|
https://github.com/rlworkgroup/garage/blob/master/src/garage/examples/torch/maml_vpg_half_cheetah_dir.py
|
MIT
|
def mtppo_metaworld_mt10(ctxt, seed, epochs, batch_size, n_workers, n_tasks):
"""Set up environment and algorithm and run the task.
Args:
ctxt (garage.experiment.ExperimentContext): The experiment
configuration used by Trainer to create the snapshotter.
seed (int): Used to seed the random number generator to produce
determinism.
epochs (int): Number of training epochs.
batch_size (int): Number of environment steps in one batch.
n_workers (int): The number of workers the sampler should use.
n_tasks (int): Number of tasks to use. Should be a multiple of 10.
"""
set_seed(seed)
mt10 = metaworld.MT10()
train_task_sampler = MetaWorldTaskSampler(mt10,
'train',
lambda env, _: normalize(env),
add_env_onehot=True)
assert n_tasks % 10 == 0
assert n_tasks <= 500
envs = [env_up() for env_up in train_task_sampler.sample(n_tasks)]
env = MultiEnvWrapper(envs,
sample_strategy=round_robin_strategy,
mode='vanilla')
policy = GaussianMLPPolicy(
env_spec=env.spec,
hidden_sizes=(64, 64),
hidden_nonlinearity=torch.tanh,
output_nonlinearity=None,
)
value_function = GaussianMLPValueFunction(env_spec=env.spec,
hidden_sizes=(32, 32),
hidden_nonlinearity=torch.tanh,
output_nonlinearity=None)
sampler = RaySampler(agents=policy,
envs=env,
max_episode_length=env.spec.max_episode_length,
n_workers=n_workers)
algo = PPO(env_spec=env.spec,
policy=policy,
value_function=value_function,
sampler=sampler,
discount=0.99,
gae_lambda=0.95,
center_adv=True,
lr_clip_range=0.2)
trainer = Trainer(ctxt)
trainer.setup(algo, env)
trainer.train(n_epochs=epochs, batch_size=batch_size)
|
Set up environment and algorithm and run the task.
Args:
ctxt (garage.experiment.ExperimentContext): The experiment
configuration used by Trainer to create the snapshotter.
seed (int): Used to seed the random number generator to produce
determinism.
epochs (int): Number of training epochs.
batch_size (int): Number of environment steps in one batch.
n_workers (int): The number of workers the sampler should use.
n_tasks (int): Number of tasks to use. Should be a multiple of 10.
|
mtppo_metaworld_mt10
|
python
|
rlworkgroup/garage
|
src/garage/examples/torch/mtppo_metaworld_mt10.py
|
https://github.com/rlworkgroup/garage/blob/master/src/garage/examples/torch/mtppo_metaworld_mt10.py
|
MIT
|
def mtppo_metaworld_mt1_push(ctxt, seed, epochs, batch_size):
"""Set up environment and algorithm and run the task.
Args:
ctxt (garage.experiment.ExperimentContext): The experiment
configuration used by Trainer to create the snapshotter.
seed (int): Used to seed the random number generator to produce
determinism.
epochs (int): Number of training epochs.
batch_size (int): Number of environment steps in one batch.
"""
set_seed(seed)
n_tasks = 50
mt1 = metaworld.MT1('push-v1')
train_task_sampler = MetaWorldTaskSampler(mt1, 'train',
lambda env, _: normalize(env))
envs = [env_up() for env_up in train_task_sampler.sample(n_tasks)]
env = MultiEnvWrapper(envs,
sample_strategy=round_robin_strategy,
mode='vanilla')
policy = GaussianMLPPolicy(
env_spec=env.spec,
hidden_nonlinearity=torch.tanh,
output_nonlinearity=None,
)
value_function = GaussianMLPValueFunction(env_spec=env.spec,
hidden_nonlinearity=torch.tanh,
output_nonlinearity=None)
sampler = RaySampler(agents=policy,
envs=env,
max_episode_length=env.spec.max_episode_length)
algo = PPO(env_spec=env.spec,
policy=policy,
value_function=value_function,
sampler=sampler,
discount=0.99,
gae_lambda=0.95,
center_adv=True,
lr_clip_range=0.2)
trainer = Trainer(ctxt)
trainer.setup(algo, env)
trainer.train(n_epochs=epochs, batch_size=batch_size)
|
Set up environment and algorithm and run the task.
Args:
ctxt (garage.experiment.ExperimentContext): The experiment
configuration used by Trainer to create the snapshotter.
seed (int): Used to seed the random number generator to produce
determinism.
epochs (int): Number of training epochs.
batch_size (int): Number of environment steps in one batch.
|
mtppo_metaworld_mt1_push
|
python
|
rlworkgroup/garage
|
src/garage/examples/torch/mtppo_metaworld_mt1_push.py
|
https://github.com/rlworkgroup/garage/blob/master/src/garage/examples/torch/mtppo_metaworld_mt1_push.py
|
MIT
|
def mtppo_metaworld_mt50(ctxt, seed, epochs, batch_size, n_workers, n_tasks):
"""Set up environment and algorithm and run the task.
Args:
ctxt (garage.experiment.ExperimentContext): The experiment
configuration used by Trainer to create the snapshotter.
seed (int): Used to seed the random number generator to produce
determinism.
epochs (int): Number of training epochs.
batch_size (int): Number of environment steps in one batch.
n_workers (int): The number of workers the sampler should use.
n_tasks (int): Number of tasks to use. Should be a multiple of 50.
"""
set_seed(seed)
mt10 = metaworld.MT10()
train_task_sampler = MetaWorldTaskSampler(mt10,
'train',
lambda env, _: normalize(env),
add_env_onehot=True)
assert n_tasks % 50 == 0
assert n_tasks <= 2500
envs = [env_up() for env_up in train_task_sampler.sample(n_tasks)]
env = MultiEnvWrapper(envs,
sample_strategy=round_robin_strategy,
mode='vanilla')
policy = GaussianMLPPolicy(
env_spec=env.spec,
hidden_sizes=(64, 64),
hidden_nonlinearity=torch.tanh,
output_nonlinearity=None,
)
value_function = GaussianMLPValueFunction(env_spec=env.spec,
hidden_sizes=(32, 32),
hidden_nonlinearity=torch.tanh,
output_nonlinearity=None)
sampler = RaySampler(agents=policy,
envs=env,
max_episode_length=env.spec.max_episode_length,
n_workers=n_workers)
algo = PPO(env_spec=env.spec,
policy=policy,
value_function=value_function,
sampler=sampler,
discount=0.99,
gae_lambda=0.95,
center_adv=True,
lr_clip_range=0.2)
trainer = Trainer(ctxt)
trainer.setup(algo, env)
trainer.train(n_epochs=epochs, batch_size=batch_size)
|
Set up environment and algorithm and run the task.
Args:
ctxt (garage.experiment.ExperimentContext): The experiment
configuration used by Trainer to create the snapshotter.
seed (int): Used to seed the random number generator to produce
determinism.
epochs (int): Number of training epochs.
batch_size (int): Number of environment steps in one batch.
n_workers (int): The number of workers the sampler should use.
n_tasks (int): Number of tasks to use. Should be a multiple of 50.
|
mtppo_metaworld_mt50
|
python
|
rlworkgroup/garage
|
src/garage/examples/torch/mtppo_metaworld_mt50.py
|
https://github.com/rlworkgroup/garage/blob/master/src/garage/examples/torch/mtppo_metaworld_mt50.py
|
MIT
|
def mtsac_metaworld_mt10(ctxt=None, *, seed, _gpu, n_tasks, timesteps):
"""Train MTSAC with MT10 environment.
Args:
ctxt (garage.experiment.ExperimentContext): The experiment
configuration used by Trainer to create the snapshotter.
seed (int): Used to seed the random number generator to produce
determinism.
_gpu (int): The ID of the gpu to be used (used on multi-gpu machines).
n_tasks (int): Number of tasks to use. Should be a multiple of 10.
timesteps (int): Number of timesteps to run.
"""
deterministic.set_seed(seed)
trainer = Trainer(ctxt)
mt10 = metaworld.MT10() # pylint: disable=no-member
mt10_test = metaworld.MT10() # pylint: disable=no-member
# pylint: disable=missing-return-doc, missing-return-type-doc
def wrap(env, _):
return normalize(env, normalize_reward=True)
train_task_sampler = MetaWorldTaskSampler(mt10,
'train',
wrap,
add_env_onehot=True)
test_task_sampler = MetaWorldTaskSampler(mt10_test,
'train',
add_env_onehot=True)
assert n_tasks % 10 == 0
assert n_tasks <= 500
mt10_train_envs = train_task_sampler.sample(n_tasks)
env = mt10_train_envs[0]()
mt10_test_envs = [env_up() for env_up in test_task_sampler.sample(n_tasks)]
policy = TanhGaussianMLPPolicy(
env_spec=env.spec,
hidden_sizes=[400, 400, 400],
hidden_nonlinearity=nn.ReLU,
output_nonlinearity=None,
min_std=np.exp(-20.),
max_std=np.exp(2.),
)
qf1 = ContinuousMLPQFunction(env_spec=env.spec,
hidden_sizes=[400, 400, 400],
hidden_nonlinearity=F.relu)
qf2 = ContinuousMLPQFunction(env_spec=env.spec,
hidden_sizes=[400, 400, 400],
hidden_nonlinearity=F.relu)
replay_buffer = PathBuffer(capacity_in_transitions=int(1e6), )
meta_batch_size = 10
sampler = LocalSampler(
agents=policy,
envs=mt10_train_envs,
max_episode_length=env.spec.max_episode_length,
# 1 sampler worker for each environment
n_workers=meta_batch_size,
worker_class=FragmentWorker,
# increasing n_envs increases the vectorization of a sampler worker
# which improves runtime performance, but you will need to adjust this
# depending on your memory constraints. For reference, each worker by
# default uses n_envs=8. Each environment is approximately ~50mb large
# so creating 50 envs with 8 copies comes out to 20gb of memory. Many
# users want to be able to run multiple seeds on 1 machine, so I have
# reduced this to n_envs = 2 for 2 copies in the meantime.
worker_args=dict(n_envs=2))
batch_size = int(env.spec.max_episode_length * meta_batch_size)
num_evaluation_points = 500
epochs = timesteps // batch_size
epoch_cycles = epochs // num_evaluation_points
epochs = epochs // epoch_cycles
mtsac = MTSAC(policy=policy,
qf1=qf1,
qf2=qf2,
sampler=sampler,
gradient_steps_per_itr=env.spec.max_episode_length,
eval_env=mt10_test_envs,
env_spec=env.spec,
num_tasks=10,
steps_per_epoch=epoch_cycles,
replay_buffer=replay_buffer,
min_buffer_size=1500,
target_update_tau=5e-3,
discount=0.99,
buffer_batch_size=1280)
if _gpu is not None:
set_gpu_mode(True, _gpu)
mtsac.to()
trainer.setup(algo=mtsac, env=mt10_train_envs)
trainer.train(n_epochs=epochs, batch_size=batch_size)
|
Train MTSAC with MT10 environment.
Args:
ctxt (garage.experiment.ExperimentContext): The experiment
configuration used by Trainer to create the snapshotter.
seed (int): Used to seed the random number generator to produce
determinism.
_gpu (int): The ID of the gpu to be used (used on multi-gpu machines).
n_tasks (int): Number of tasks to use. Should be a multiple of 10.
timesteps (int): Number of timesteps to run.
|
mtsac_metaworld_mt10
|
python
|
rlworkgroup/garage
|
src/garage/examples/torch/mtsac_metaworld_mt10.py
|
https://github.com/rlworkgroup/garage/blob/master/src/garage/examples/torch/mtsac_metaworld_mt10.py
|
MIT
|
def mtsac_metaworld_mt1_pick_place(ctxt=None, *, seed, timesteps, _gpu):
"""Train MTSAC with the MT1 pick-place-v1 environment.
Args:
ctxt (garage.experiment.ExperimentContext): The experiment
configuration used by Trainer to create the snapshotter.
seed (int): Used to seed the random number generator to produce
determinism.
_gpu (int): The ID of the gpu to be used (used on multi-gpu machines).
timesteps (int): Number of timesteps to run.
"""
deterministic.set_seed(seed)
mt1 = metaworld.MT1('pick-place-v1')
mt1_test = metaworld.MT1('pick-place-v1')
train_task_sampler = MetaWorldTaskSampler(mt1, 'train',
lambda env, _: normalize(env))
test_task_sampler = MetaWorldTaskSampler(mt1_test, 'train',
lambda env, _: normalize(env))
n_tasks = 50
train_envs = train_task_sampler.sample(n_tasks)
env = train_envs[0]()
test_envs = [env_up() for env_up in test_task_sampler.sample(n_tasks)]
trainer = Trainer(ctxt)
policy = TanhGaussianMLPPolicy(
env_spec=env.spec,
hidden_sizes=[400, 400, 400],
hidden_nonlinearity=nn.ReLU,
output_nonlinearity=None,
min_std=np.exp(-20.),
max_std=np.exp(2.),
)
qf1 = ContinuousMLPQFunction(env_spec=env.spec,
hidden_sizes=[400, 400, 400],
hidden_nonlinearity=F.relu)
qf2 = ContinuousMLPQFunction(env_spec=env.spec,
hidden_sizes=[400, 400, 400],
hidden_nonlinearity=F.relu)
replay_buffer = PathBuffer(capacity_in_transitions=int(1e6), )
sampler = LocalSampler(agents=policy,
envs=train_envs,
max_episode_length=env.spec.max_episode_length,
n_workers=n_tasks,
worker_class=FragmentWorker)
batch_size = int(env.spec.max_episode_length * n_tasks)
num_evaluation_points = 500
epochs = timesteps // batch_size
epoch_cycles = epochs // num_evaluation_points
epochs = epochs // epoch_cycles
mtsac = MTSAC(policy=policy,
qf1=qf1,
qf2=qf2,
sampler=sampler,
gradient_steps_per_itr=150,
eval_env=test_envs,
env_spec=env.spec,
num_tasks=1,
steps_per_epoch=epoch_cycles,
replay_buffer=replay_buffer,
min_buffer_size=1500,
target_update_tau=5e-3,
discount=0.99,
buffer_batch_size=1280)
if _gpu is not None:
set_gpu_mode(True, _gpu)
mtsac.to()
trainer.setup(algo=mtsac, env=train_envs)
trainer.train(n_epochs=epochs, batch_size=batch_size)
|
Train MTSAC with the MT1 pick-place-v1 environment.
Args:
ctxt (garage.experiment.ExperimentContext): The experiment
configuration used by Trainer to create the snapshotter.
seed (int): Used to seed the random number generator to produce
determinism.
_gpu (int): The ID of the gpu to be used (used on multi-gpu machines).
timesteps (int): Number of timesteps to run.
|
mtsac_metaworld_mt1_pick_place
|
python
|
rlworkgroup/garage
|
src/garage/examples/torch/mtsac_metaworld_mt1_pick_place.py
|
https://github.com/rlworkgroup/garage/blob/master/src/garage/examples/torch/mtsac_metaworld_mt1_pick_place.py
|
MIT
|
def mtsac_metaworld_mt50(ctxt=None,
*,
seed,
use_gpu,
_gpu,
n_tasks,
timesteps):
"""Train MTSAC with MT50 environment.
Args:
ctxt (garage.experiment.ExperimentContext): The experiment
configuration used by Trainer to create the snapshotter.
seed (int): Used to seed the random number generator to produce
determinism.
use_gpu (bool): Used to enable ussage of GPU in training.
_gpu (int): The ID of the gpu (used on multi-gpu machines).
n_tasks (int): Number of tasks to use. Should be a multiple of 50.
timesteps (int): Number of timesteps to run.
"""
deterministic.set_seed(seed)
trainer = Trainer(ctxt)
mt50 = metaworld.MT50() # pylint: disable=no-member
mt50_test = metaworld.MT50() # pylint: disable=no-member
train_task_sampler = MetaWorldTaskSampler(
mt50,
'train',
lambda env, _: normalize(env, normalize_reward=True),
add_env_onehot=True)
test_task_sampler = MetaWorldTaskSampler(mt50_test,
'train',
lambda env, _: normalize(env),
add_env_onehot=True)
assert n_tasks % 50 == 0
assert n_tasks <= 2500
mt50_train_envs = train_task_sampler.sample(n_tasks)
env = mt50_train_envs[0]()
mt50_test_envs = [env_up() for env_up in test_task_sampler.sample(n_tasks)]
policy = TanhGaussianMLPPolicy(
env_spec=env.spec,
hidden_sizes=[400, 400, 400],
hidden_nonlinearity=nn.ReLU,
output_nonlinearity=None,
min_std=np.exp(-20.),
max_std=np.exp(2.),
)
qf1 = ContinuousMLPQFunction(env_spec=env.spec,
hidden_sizes=[400, 400, 400],
hidden_nonlinearity=F.relu)
qf2 = ContinuousMLPQFunction(env_spec=env.spec,
hidden_sizes=[400, 400, 400],
hidden_nonlinearity=F.relu)
replay_buffer = PathBuffer(capacity_in_transitions=int(1e6), )
sampler = LocalSampler(
agents=policy,
envs=mt50_train_envs,
max_episode_length=env.spec.max_episode_length,
# 1 sampler worker for each environment
n_workers=50,
worker_class=FragmentWorker,
# increasing n_envs increases the vectorization of a sampler worker
# which improves runtime performance, but you will need to adjust this
# depending on your memory constraints. For reference, each worker by
# default uses n_envs=8. Each environment is approximately ~50mb large
# so creating 50 envs with 8 copies comes out to 20gb of memory. Many
# users want to be able to run multiple seeds on 1 machine, so I have
# reduced this to n_envs = 2 for 2 copies in the meantime.
worker_args=dict(n_envs=2))
batch_size = int(env.spec.max_episode_length * n_tasks)
num_evaluation_points = 500
epochs = timesteps // batch_size
epoch_cycles = epochs // num_evaluation_points
epochs = epochs // epoch_cycles
mtsac = MTSAC(policy=policy,
qf1=qf1,
qf2=qf2,
sampler=sampler,
gradient_steps_per_itr=env.spec.max_episode_length,
eval_env=mt50_test_envs,
env_spec=env.spec,
num_tasks=50,
steps_per_epoch=epoch_cycles,
replay_buffer=replay_buffer,
min_buffer_size=7500,
target_update_tau=5e-3,
discount=0.99,
buffer_batch_size=6400)
set_gpu_mode(use_gpu, _gpu)
mtsac.to()
trainer.setup(algo=mtsac, env=mt50_train_envs)
trainer.train(n_epochs=epochs, batch_size=batch_size)
|
Train MTSAC with MT50 environment.
Args:
ctxt (garage.experiment.ExperimentContext): The experiment
configuration used by Trainer to create the snapshotter.
seed (int): Used to seed the random number generator to produce
determinism.
use_gpu (bool): Used to enable ussage of GPU in training.
_gpu (int): The ID of the gpu (used on multi-gpu machines).
n_tasks (int): Number of tasks to use. Should be a multiple of 50.
timesteps (int): Number of timesteps to run.
|
mtsac_metaworld_mt50
|
python
|
rlworkgroup/garage
|
src/garage/examples/torch/mtsac_metaworld_mt50.py
|
https://github.com/rlworkgroup/garage/blob/master/src/garage/examples/torch/mtsac_metaworld_mt50.py
|
MIT
|
def mttrpo_metaworld_mt10(ctxt, seed, epochs, batch_size, n_workers, n_tasks):
"""Set up environment and algorithm and run the task.
Args:
ctxt (garage.experiment.ExperimentContext): The experiment
configuration used by Trainer to create the snapshotter.
seed (int): Used to seed the random number generator to produce
determinism.
epochs (int): Number of training epochs.
batch_size (int): Number of environment steps in one batch.
n_workers (int): The number of workers the sampler should use.
n_tasks (int): Number of tasks to use. Should be a multiple of 10.
"""
set_seed(seed)
mt10 = metaworld.MT10()
train_task_sampler = MetaWorldTaskSampler(mt10,
'train',
lambda env, _: normalize(env),
add_env_onehot=True)
assert n_tasks % 10 == 0
assert n_tasks <= 500
envs = [env_up() for env_up in train_task_sampler.sample(n_tasks)]
env = MultiEnvWrapper(envs,
sample_strategy=round_robin_strategy,
mode='vanilla')
policy = GaussianMLPPolicy(
env_spec=env.spec,
hidden_sizes=(64, 64),
hidden_nonlinearity=torch.tanh,
output_nonlinearity=None,
)
value_function = GaussianMLPValueFunction(env_spec=env.spec,
hidden_sizes=(32, 32),
hidden_nonlinearity=torch.tanh,
output_nonlinearity=None)
sampler = RaySampler(agents=policy,
envs=env,
max_episode_length=env.spec.max_episode_length,
n_workers=n_workers)
algo = TRPO(env_spec=env.spec,
policy=policy,
value_function=value_function,
sampler=sampler,
discount=0.99,
gae_lambda=0.95)
trainer = Trainer(ctxt)
trainer.setup(algo, env)
trainer.train(n_epochs=epochs, batch_size=batch_size)
|
Set up environment and algorithm and run the task.
Args:
ctxt (garage.experiment.ExperimentContext): The experiment
configuration used by Trainer to create the snapshotter.
seed (int): Used to seed the random number generator to produce
determinism.
epochs (int): Number of training epochs.
batch_size (int): Number of environment steps in one batch.
n_workers (int): The number of workers the sampler should use.
n_tasks (int): Number of tasks to use. Should be a multiple of 10.
|
mttrpo_metaworld_mt10
|
python
|
rlworkgroup/garage
|
src/garage/examples/torch/mttrpo_metaworld_mt10.py
|
https://github.com/rlworkgroup/garage/blob/master/src/garage/examples/torch/mttrpo_metaworld_mt10.py
|
MIT
|
def mttrpo_metaworld_mt1_push(ctxt, seed, epochs, batch_size):
"""Set up environment and algorithm and run the task.
Args:
ctxt (garage.experiment.ExperimentContext): The experiment
configuration used by Trainer to create the snapshotter.
seed (int): Used to seed the random number generator to produce
determinism.
epochs (int): Number of training epochs.
batch_size (int): Number of environment steps in one batch.
"""
set_seed(seed)
n_tasks = 50
mt1 = metaworld.MT1('push-v1')
train_task_sampler = MetaWorldTaskSampler(mt1, 'train',
lambda env, _: normalize(env))
envs = [env_up() for env_up in train_task_sampler.sample(n_tasks)]
env = MultiEnvWrapper(envs,
sample_strategy=round_robin_strategy,
mode='vanilla')
policy = GaussianMLPPolicy(
env_spec=env.spec,
hidden_sizes=(64, 64),
hidden_nonlinearity=torch.tanh,
output_nonlinearity=None,
)
value_function = GaussianMLPValueFunction(env_spec=env.spec,
hidden_sizes=(32, 32),
hidden_nonlinearity=torch.tanh,
output_nonlinearity=None)
sampler = RaySampler(agents=policy,
envs=env,
max_episode_length=env.spec.max_episode_length)
algo = TRPO(env_spec=env.spec,
policy=policy,
value_function=value_function,
sampler=sampler,
discount=0.99,
gae_lambda=0.95)
trainer = Trainer(ctxt)
trainer.setup(algo, env)
trainer.train(n_epochs=epochs, batch_size=batch_size)
|
Set up environment and algorithm and run the task.
Args:
ctxt (garage.experiment.ExperimentContext): The experiment
configuration used by Trainer to create the snapshotter.
seed (int): Used to seed the random number generator to produce
determinism.
epochs (int): Number of training epochs.
batch_size (int): Number of environment steps in one batch.
|
mttrpo_metaworld_mt1_push
|
python
|
rlworkgroup/garage
|
src/garage/examples/torch/mttrpo_metaworld_mt1_push.py
|
https://github.com/rlworkgroup/garage/blob/master/src/garage/examples/torch/mttrpo_metaworld_mt1_push.py
|
MIT
|
def mttrpo_metaworld_mt50(ctxt, seed, epochs, batch_size, n_workers, n_tasks):
"""Set up environment and algorithm and run the task.
Args:
ctxt (garage.experiment.ExperimentContext): The experiment
configuration used by Trainer to create the snapshotter.
seed (int): Used to seed the random number generator to produce
determinism.
epochs (int): Number of training epochs.
batch_size (int): Number of environment steps in one batch.
n_workers (int): The number of workers the sampler should use.
n_tasks (int): Number of tasks to use. Should be a multiple of 50.
"""
set_seed(seed)
mt10 = metaworld.MT10()
train_task_sampler = MetaWorldTaskSampler(mt10,
'train',
lambda env, _: normalize(env),
add_env_onehot=True)
assert n_tasks % 10 == 0
assert n_tasks <= 500
envs = [env_up() for env_up in train_task_sampler.sample(n_tasks)]
env = MultiEnvWrapper(envs,
sample_strategy=round_robin_strategy,
mode='vanilla')
policy = GaussianMLPPolicy(
env_spec=env.spec,
hidden_sizes=(64, 64),
hidden_nonlinearity=torch.tanh,
output_nonlinearity=None,
)
value_function = GaussianMLPValueFunction(env_spec=env.spec,
hidden_sizes=(32, 32),
hidden_nonlinearity=torch.tanh,
output_nonlinearity=None)
sampler = RaySampler(agents=policy,
envs=env,
max_episode_length=env.spec.max_episode_length,
n_workers=n_workers)
algo = TRPO(env_spec=env.spec,
policy=policy,
value_function=value_function,
sampler=sampler,
discount=0.99,
gae_lambda=0.95)
trainer = Trainer(ctxt)
trainer.setup(algo, env)
trainer.train(n_epochs=epochs, batch_size=batch_size)
|
Set up environment and algorithm and run the task.
Args:
ctxt (garage.experiment.ExperimentContext): The experiment
configuration used by Trainer to create the snapshotter.
seed (int): Used to seed the random number generator to produce
determinism.
epochs (int): Number of training epochs.
batch_size (int): Number of environment steps in one batch.
n_workers (int): The number of workers the sampler should use.
n_tasks (int): Number of tasks to use. Should be a multiple of 50.
|
mttrpo_metaworld_mt50
|
python
|
rlworkgroup/garage
|
src/garage/examples/torch/mttrpo_metaworld_mt50.py
|
https://github.com/rlworkgroup/garage/blob/master/src/garage/examples/torch/mttrpo_metaworld_mt50.py
|
MIT
|
def pearl_half_cheetah_vel(ctxt=None,
seed=1,
num_epochs=500,
num_train_tasks=100,
num_test_tasks=100,
latent_size=5,
encoder_hidden_size=200,
net_size=300,
meta_batch_size=16,
num_steps_per_epoch=2000,
num_initial_steps=2000,
num_tasks_sample=5,
num_steps_prior=400,
num_extra_rl_steps_posterior=600,
batch_size=256,
embedding_batch_size=100,
embedding_mini_batch_size=100,
max_episode_length=200,
reward_scale=5.,
use_gpu=False):
"""Train PEARL with HalfCheetahVel environment.
Args:
ctxt (garage.experiment.ExperimentContext): The experiment
configuration used by Trainer to create the snapshotter.
seed (int): Used to seed the random number generator to produce
determinism.
num_epochs (int): Number of training epochs.
num_train_tasks (int): Number of tasks for training.
num_test_tasks (int): Number of tasks to use for testing.
latent_size (int): Size of latent context vector.
encoder_hidden_size (int): Output dimension of dense layer of the
context encoder.
net_size (int): Output dimension of a dense layer of Q-function and
value function.
meta_batch_size (int): Meta batch size.
num_steps_per_epoch (int): Number of iterations per epoch.
num_initial_steps (int): Number of transitions obtained per task before
training.
num_tasks_sample (int): Number of random tasks to obtain data for each
iteration.
num_steps_prior (int): Number of transitions to obtain per task with
z ~ prior.
num_extra_rl_steps_posterior (int): Number of additional transitions
to obtain per task with z ~ posterior that are only used to train
the policy and NOT the encoder.
batch_size (int): Number of transitions in RL batch.
embedding_batch_size (int): Number of transitions in context batch.
embedding_mini_batch_size (int): Number of transitions in mini context
batch; should be same as embedding_batch_size for non-recurrent
encoder.
max_episode_length (int): Maximum episode length.
reward_scale (int): Reward scale.
use_gpu (bool): Whether or not to use GPU for training.
"""
set_seed(seed)
encoder_hidden_sizes = (encoder_hidden_size, encoder_hidden_size,
encoder_hidden_size)
# create multi-task environment and sample tasks
env_sampler = SetTaskSampler(
HalfCheetahVelEnv,
wrapper=lambda env, _: normalize(
GymEnv(env, max_episode_length=max_episode_length)))
env = env_sampler.sample(num_train_tasks)
test_env_sampler = SetTaskSampler(
HalfCheetahVelEnv,
wrapper=lambda env, _: normalize(
GymEnv(env, max_episode_length=max_episode_length)))
trainer = Trainer(ctxt)
# instantiate networks
augmented_env = PEARL.augment_env_spec(env[0](), latent_size)
qf = ContinuousMLPQFunction(env_spec=augmented_env,
hidden_sizes=[net_size, net_size, net_size])
vf_env = PEARL.get_env_spec(env[0](), latent_size, 'vf')
vf = ContinuousMLPQFunction(env_spec=vf_env,
hidden_sizes=[net_size, net_size, net_size])
inner_policy = TanhGaussianMLPPolicy(
env_spec=augmented_env, hidden_sizes=[net_size, net_size, net_size])
sampler = LocalSampler(agents=None,
envs=env[0](),
max_episode_length=env[0]().spec.max_episode_length,
n_workers=1,
worker_class=PEARLWorker)
pearl = PEARL(
env=env,
policy_class=ContextConditionedPolicy,
encoder_class=MLPEncoder,
inner_policy=inner_policy,
qf=qf,
vf=vf,
sampler=sampler,
num_train_tasks=num_train_tasks,
num_test_tasks=num_test_tasks,
latent_dim=latent_size,
encoder_hidden_sizes=encoder_hidden_sizes,
test_env_sampler=test_env_sampler,
meta_batch_size=meta_batch_size,
num_steps_per_epoch=num_steps_per_epoch,
num_initial_steps=num_initial_steps,
num_tasks_sample=num_tasks_sample,
num_steps_prior=num_steps_prior,
num_extra_rl_steps_posterior=num_extra_rl_steps_posterior,
batch_size=batch_size,
embedding_batch_size=embedding_batch_size,
embedding_mini_batch_size=embedding_mini_batch_size,
reward_scale=reward_scale,
)
set_gpu_mode(use_gpu, gpu_id=0)
if use_gpu:
pearl.to()
trainer.setup(algo=pearl, env=env[0]())
trainer.train(n_epochs=num_epochs, batch_size=batch_size)
|
Train PEARL with HalfCheetahVel environment.
Args:
ctxt (garage.experiment.ExperimentContext): The experiment
configuration used by Trainer to create the snapshotter.
seed (int): Used to seed the random number generator to produce
determinism.
num_epochs (int): Number of training epochs.
num_train_tasks (int): Number of tasks for training.
num_test_tasks (int): Number of tasks to use for testing.
latent_size (int): Size of latent context vector.
encoder_hidden_size (int): Output dimension of dense layer of the
context encoder.
net_size (int): Output dimension of a dense layer of Q-function and
value function.
meta_batch_size (int): Meta batch size.
num_steps_per_epoch (int): Number of iterations per epoch.
num_initial_steps (int): Number of transitions obtained per task before
training.
num_tasks_sample (int): Number of random tasks to obtain data for each
iteration.
num_steps_prior (int): Number of transitions to obtain per task with
z ~ prior.
num_extra_rl_steps_posterior (int): Number of additional transitions
to obtain per task with z ~ posterior that are only used to train
the policy and NOT the encoder.
batch_size (int): Number of transitions in RL batch.
embedding_batch_size (int): Number of transitions in context batch.
embedding_mini_batch_size (int): Number of transitions in mini context
batch; should be same as embedding_batch_size for non-recurrent
encoder.
max_episode_length (int): Maximum episode length.
reward_scale (int): Reward scale.
use_gpu (bool): Whether or not to use GPU for training.
|
pearl_half_cheetah_vel
|
python
|
rlworkgroup/garage
|
src/garage/examples/torch/pearl_half_cheetah_vel.py
|
https://github.com/rlworkgroup/garage/blob/master/src/garage/examples/torch/pearl_half_cheetah_vel.py
|
MIT
|
def pearl_metaworld_ml10(ctxt=None,
seed=1,
num_epochs=1000,
num_train_tasks=10,
latent_size=7,
encoder_hidden_size=200,
net_size=300,
meta_batch_size=16,
num_steps_per_epoch=4000,
num_initial_steps=4000,
num_tasks_sample=15,
num_steps_prior=750,
num_extra_rl_steps_posterior=750,
batch_size=256,
embedding_batch_size=64,
embedding_mini_batch_size=64,
reward_scale=10.,
use_gpu=False):
"""Train PEARL with ML10 environments.
Args:
ctxt (garage.experiment.ExperimentContext): The experiment
configuration used by Trainer to create the snapshotter.
seed (int): Used to seed the random number generator to produce
determinism.
num_epochs (int): Number of training epochs.
num_train_tasks (int): Number of tasks for training.
latent_size (int): Size of latent context vector.
encoder_hidden_size (int): Output dimension of dense layer of the
context encoder.
net_size (int): Output dimension of a dense layer of Q-function and
value function.
meta_batch_size (int): Meta batch size.
num_steps_per_epoch (int): Number of iterations per epoch.
num_initial_steps (int): Number of transitions obtained per task before
training.
num_tasks_sample (int): Number of random tasks to obtain data for each
iteration.
num_steps_prior (int): Number of transitions to obtain per task with
z ~ prior.
num_extra_rl_steps_posterior (int): Number of additional transitions
to obtain per task with z ~ posterior that are only used to train
the policy and NOT the encoder.
batch_size (int): Number of transitions in RL batch.
embedding_batch_size (int): Number of transitions in context batch.
embedding_mini_batch_size (int): Number of transitions in mini context
batch; should be same as embedding_batch_size for non-recurrent
encoder.
reward_scale (int): Reward scale.
use_gpu (bool): Whether or not to use GPU for training.
"""
set_seed(seed)
encoder_hidden_sizes = (encoder_hidden_size, encoder_hidden_size,
encoder_hidden_size)
ml10 = metaworld.ML10()
train_env = MetaWorldSetTaskEnv(ml10, 'train')
env_sampler = SetTaskSampler(MetaWorldSetTaskEnv,
env=train_env,
wrapper=lambda env, _: normalize(env))
env = env_sampler.sample(num_train_tasks)
test_env = MetaWorldSetTaskEnv(ml10, 'test')
test_env_sampler = SetTaskSampler(MetaWorldSetTaskEnv,
env=test_env,
wrapper=lambda env, _: normalize(env))
trainer = Trainer(ctxt)
# instantiate networks
augmented_env = PEARL.augment_env_spec(env[0](), latent_size)
qf = ContinuousMLPQFunction(env_spec=augmented_env,
hidden_sizes=[net_size, net_size, net_size])
vf_env = PEARL.get_env_spec(env[0](), latent_size, 'vf')
vf = ContinuousMLPQFunction(env_spec=vf_env,
hidden_sizes=[net_size, net_size, net_size])
inner_policy = TanhGaussianMLPPolicy(
env_spec=augmented_env, hidden_sizes=[net_size, net_size, net_size])
sampler = LocalSampler(agents=None,
envs=env[0](),
max_episode_length=env[0]().spec.max_episode_length,
n_workers=1,
worker_class=PEARLWorker)
pearl = PEARL(
env=env,
policy_class=ContextConditionedPolicy,
encoder_class=MLPEncoder,
inner_policy=inner_policy,
qf=qf,
vf=vf,
sampler=sampler,
num_train_tasks=num_train_tasks,
latent_dim=latent_size,
encoder_hidden_sizes=encoder_hidden_sizes,
test_env_sampler=test_env_sampler,
meta_batch_size=meta_batch_size,
num_steps_per_epoch=num_steps_per_epoch,
num_initial_steps=num_initial_steps,
num_tasks_sample=num_tasks_sample,
num_steps_prior=num_steps_prior,
num_extra_rl_steps_posterior=num_extra_rl_steps_posterior,
batch_size=batch_size,
embedding_batch_size=embedding_batch_size,
embedding_mini_batch_size=embedding_mini_batch_size,
reward_scale=reward_scale,
)
set_gpu_mode(use_gpu, gpu_id=0)
if use_gpu:
pearl.to()
trainer.setup(algo=pearl, env=env[0]())
trainer.train(n_epochs=num_epochs, batch_size=batch_size)
|
Train PEARL with ML10 environments.
Args:
ctxt (garage.experiment.ExperimentContext): The experiment
configuration used by Trainer to create the snapshotter.
seed (int): Used to seed the random number generator to produce
determinism.
num_epochs (int): Number of training epochs.
num_train_tasks (int): Number of tasks for training.
latent_size (int): Size of latent context vector.
encoder_hidden_size (int): Output dimension of dense layer of the
context encoder.
net_size (int): Output dimension of a dense layer of Q-function and
value function.
meta_batch_size (int): Meta batch size.
num_steps_per_epoch (int): Number of iterations per epoch.
num_initial_steps (int): Number of transitions obtained per task before
training.
num_tasks_sample (int): Number of random tasks to obtain data for each
iteration.
num_steps_prior (int): Number of transitions to obtain per task with
z ~ prior.
num_extra_rl_steps_posterior (int): Number of additional transitions
to obtain per task with z ~ posterior that are only used to train
the policy and NOT the encoder.
batch_size (int): Number of transitions in RL batch.
embedding_batch_size (int): Number of transitions in context batch.
embedding_mini_batch_size (int): Number of transitions in mini context
batch; should be same as embedding_batch_size for non-recurrent
encoder.
reward_scale (int): Reward scale.
use_gpu (bool): Whether or not to use GPU for training.
|
pearl_metaworld_ml10
|
python
|
rlworkgroup/garage
|
src/garage/examples/torch/pearl_metaworld_ml10.py
|
https://github.com/rlworkgroup/garage/blob/master/src/garage/examples/torch/pearl_metaworld_ml10.py
|
MIT
|
def pearl_metaworld_ml1_push(ctxt=None,
seed=1,
num_epochs=1000,
num_train_tasks=50,
latent_size=7,
encoder_hidden_size=200,
net_size=300,
meta_batch_size=16,
num_steps_per_epoch=4000,
num_initial_steps=4000,
num_tasks_sample=15,
num_steps_prior=750,
num_extra_rl_steps_posterior=750,
batch_size=256,
embedding_batch_size=64,
embedding_mini_batch_size=64,
reward_scale=10.,
use_gpu=False):
"""Train PEARL with ML1 environments.
Args:
ctxt (garage.experiment.ExperimentContext): The experiment
configuration used by Trainer to create the snapshotter.
seed (int): Used to seed the random number generator to produce
determinism.
num_epochs (int): Number of training epochs.
num_train_tasks (int): Number of tasks for training.
latent_size (int): Size of latent context vector.
encoder_hidden_size (int): Output dimension of dense layer of the
context encoder.
net_size (int): Output dimension of a dense layer of Q-function and
value function.
meta_batch_size (int): Meta batch size.
num_steps_per_epoch (int): Number of iterations per epoch.
num_initial_steps (int): Number of transitions obtained per task before
training.
num_tasks_sample (int): Number of random tasks to obtain data for each
iteration.
num_steps_prior (int): Number of transitions to obtain per task with
z ~ prior.
num_extra_rl_steps_posterior (int): Number of additional transitions
to obtain per task with z ~ posterior that are only used to train
the policy and NOT the encoder.
batch_size (int): Number of transitions in RL batch.
embedding_batch_size (int): Number of transitions in context batch.
embedding_mini_batch_size (int): Number of transitions in mini context
batch; should be same as embedding_batch_size for non-recurrent
encoder.
reward_scale (int): Reward scale.
use_gpu (bool): Whether or not to use GPU for training.
"""
set_seed(seed)
encoder_hidden_sizes = (encoder_hidden_size, encoder_hidden_size,
encoder_hidden_size)
# create multi-task environment and sample tasks
ml1 = metaworld.ML1('push-v1')
train_env = MetaWorldSetTaskEnv(ml1, 'train')
env_sampler = SetTaskSampler(MetaWorldSetTaskEnv,
env=train_env,
wrapper=lambda env, _: normalize(env))
env = env_sampler.sample(num_train_tasks)
test_env = MetaWorldSetTaskEnv(ml1, 'test')
test_env_sampler = SetTaskSampler(MetaWorldSetTaskEnv,
env=test_env,
wrapper=lambda env, _: normalize(env))
trainer = Trainer(ctxt)
# instantiate networks
augmented_env = PEARL.augment_env_spec(env[0](), latent_size)
qf = ContinuousMLPQFunction(env_spec=augmented_env,
hidden_sizes=[net_size, net_size, net_size])
vf_env = PEARL.get_env_spec(env[0](), latent_size, 'vf')
vf = ContinuousMLPQFunction(env_spec=vf_env,
hidden_sizes=[net_size, net_size, net_size])
inner_policy = TanhGaussianMLPPolicy(
env_spec=augmented_env, hidden_sizes=[net_size, net_size, net_size])
sampler = LocalSampler(agents=None,
envs=env[0](),
max_episode_length=env[0]().spec.max_episode_length,
n_workers=1,
worker_class=PEARLWorker)
pearl = PEARL(
env=env,
policy_class=ContextConditionedPolicy,
encoder_class=MLPEncoder,
inner_policy=inner_policy,
qf=qf,
vf=vf,
sampler=sampler,
num_train_tasks=num_train_tasks,
latent_dim=latent_size,
encoder_hidden_sizes=encoder_hidden_sizes,
test_env_sampler=test_env_sampler,
meta_batch_size=meta_batch_size,
num_steps_per_epoch=num_steps_per_epoch,
num_initial_steps=num_initial_steps,
num_tasks_sample=num_tasks_sample,
num_steps_prior=num_steps_prior,
num_extra_rl_steps_posterior=num_extra_rl_steps_posterior,
batch_size=batch_size,
embedding_batch_size=embedding_batch_size,
embedding_mini_batch_size=embedding_mini_batch_size,
reward_scale=reward_scale,
)
set_gpu_mode(use_gpu, gpu_id=0)
if use_gpu:
pearl.to()
trainer.setup(algo=pearl, env=env[0]())
trainer.train(n_epochs=num_epochs, batch_size=batch_size)
|
Train PEARL with ML1 environments.
Args:
ctxt (garage.experiment.ExperimentContext): The experiment
configuration used by Trainer to create the snapshotter.
seed (int): Used to seed the random number generator to produce
determinism.
num_epochs (int): Number of training epochs.
num_train_tasks (int): Number of tasks for training.
latent_size (int): Size of latent context vector.
encoder_hidden_size (int): Output dimension of dense layer of the
context encoder.
net_size (int): Output dimension of a dense layer of Q-function and
value function.
meta_batch_size (int): Meta batch size.
num_steps_per_epoch (int): Number of iterations per epoch.
num_initial_steps (int): Number of transitions obtained per task before
training.
num_tasks_sample (int): Number of random tasks to obtain data for each
iteration.
num_steps_prior (int): Number of transitions to obtain per task with
z ~ prior.
num_extra_rl_steps_posterior (int): Number of additional transitions
to obtain per task with z ~ posterior that are only used to train
the policy and NOT the encoder.
batch_size (int): Number of transitions in RL batch.
embedding_batch_size (int): Number of transitions in context batch.
embedding_mini_batch_size (int): Number of transitions in mini context
batch; should be same as embedding_batch_size for non-recurrent
encoder.
reward_scale (int): Reward scale.
use_gpu (bool): Whether or not to use GPU for training.
|
pearl_metaworld_ml1_push
|
python
|
rlworkgroup/garage
|
src/garage/examples/torch/pearl_metaworld_ml1_push.py
|
https://github.com/rlworkgroup/garage/blob/master/src/garage/examples/torch/pearl_metaworld_ml1_push.py
|
MIT
|
def pearl_metaworld_ml45(ctxt=None,
seed=1,
num_epochs=1000,
num_train_tasks=45,
latent_size=7,
encoder_hidden_size=200,
net_size=300,
meta_batch_size=16,
num_steps_per_epoch=4000,
num_initial_steps=4000,
num_tasks_sample=15,
num_steps_prior=750,
num_extra_rl_steps_posterior=750,
batch_size=256,
embedding_batch_size=64,
embedding_mini_batch_size=64,
reward_scale=10.,
use_gpu=False):
"""Train PEARL with ML45 environments.
Args:
ctxt (garage.experiment.ExperimentContext): The experiment
configuration used by Trainer to create the snapshotter.
seed (int): Used to seed the random number generator to produce
determinism.
num_epochs (int): Number of training epochs.
num_train_tasks (int): Number of tasks for training.
latent_size (int): Size of latent context vector.
encoder_hidden_size (int): Output dimension of dense layer of the
context encoder.
net_size (int): Output dimension of a dense layer of Q-function and
value function.
meta_batch_size (int): Meta batch size.
num_steps_per_epoch (int): Number of iterations per epoch.
num_initial_steps (int): Number of transitions obtained per task before
training.
num_tasks_sample (int): Number of random tasks to obtain data for each
iteration.
num_steps_prior (int): Number of transitions to obtain per task with
z ~ prior.
num_extra_rl_steps_posterior (int): Number of additional transitions
to obtain per task with z ~ posterior that are only used to train
the policy and NOT the encoder.
batch_size (int): Number of transitions in RL batch.
embedding_batch_size (int): Number of transitions in context batch.
embedding_mini_batch_size (int): Number of transitions in mini context
batch; should be same as embedding_batch_size for non-recurrent
encoder.
reward_scale (int): Reward scale.
use_gpu (bool): Whether or not to use GPU for training.
"""
set_seed(seed)
encoder_hidden_sizes = (encoder_hidden_size, encoder_hidden_size,
encoder_hidden_size)
ml45 = metaworld.ML45()
train_env = MetaWorldSetTaskEnv(ml45, 'train')
env_sampler = SetTaskSampler(MetaWorldSetTaskEnv,
env=train_env,
wrapper=lambda env, _: normalize(env))
env = env_sampler.sample(num_train_tasks)
test_env = MetaWorldSetTaskEnv(ml45, 'test')
test_env_sampler = SetTaskSampler(MetaWorldSetTaskEnv,
env=test_env,
wrapper=lambda env, _: normalize(env))
trainer = Trainer(ctxt)
# instantiate networks
augmented_env = PEARL.augment_env_spec(env[0](), latent_size)
qf = ContinuousMLPQFunction(env_spec=augmented_env,
hidden_sizes=[net_size, net_size, net_size])
vf_env = PEARL.get_env_spec(env[0](), latent_size, 'vf')
vf = ContinuousMLPQFunction(env_spec=vf_env,
hidden_sizes=[net_size, net_size, net_size])
inner_policy = TanhGaussianMLPPolicy(
env_spec=augmented_env, hidden_sizes=[net_size, net_size, net_size])
sampler = LocalSampler(agents=None,
envs=env[0](),
max_episode_length=env[0]().spec.max_episode_length,
n_workers=1,
worker_class=PEARLWorker)
pearl = PEARL(
env=env,
policy_class=ContextConditionedPolicy,
encoder_class=MLPEncoder,
inner_policy=inner_policy,
qf=qf,
vf=vf,
sampler=sampler,
num_train_tasks=num_train_tasks,
latent_dim=latent_size,
encoder_hidden_sizes=encoder_hidden_sizes,
test_env_sampler=test_env_sampler,
meta_batch_size=meta_batch_size,
num_steps_per_epoch=num_steps_per_epoch,
num_initial_steps=num_initial_steps,
num_tasks_sample=num_tasks_sample,
num_steps_prior=num_steps_prior,
num_extra_rl_steps_posterior=num_extra_rl_steps_posterior,
batch_size=batch_size,
embedding_batch_size=embedding_batch_size,
embedding_mini_batch_size=embedding_mini_batch_size,
reward_scale=reward_scale,
)
set_gpu_mode(use_gpu, gpu_id=0)
if use_gpu:
pearl.to()
trainer.setup(algo=pearl, env=env[0]())
trainer.train(n_epochs=num_epochs, batch_size=batch_size)
|
Train PEARL with ML45 environments.
Args:
ctxt (garage.experiment.ExperimentContext): The experiment
configuration used by Trainer to create the snapshotter.
seed (int): Used to seed the random number generator to produce
determinism.
num_epochs (int): Number of training epochs.
num_train_tasks (int): Number of tasks for training.
latent_size (int): Size of latent context vector.
encoder_hidden_size (int): Output dimension of dense layer of the
context encoder.
net_size (int): Output dimension of a dense layer of Q-function and
value function.
meta_batch_size (int): Meta batch size.
num_steps_per_epoch (int): Number of iterations per epoch.
num_initial_steps (int): Number of transitions obtained per task before
training.
num_tasks_sample (int): Number of random tasks to obtain data for each
iteration.
num_steps_prior (int): Number of transitions to obtain per task with
z ~ prior.
num_extra_rl_steps_posterior (int): Number of additional transitions
to obtain per task with z ~ posterior that are only used to train
the policy and NOT the encoder.
batch_size (int): Number of transitions in RL batch.
embedding_batch_size (int): Number of transitions in context batch.
embedding_mini_batch_size (int): Number of transitions in mini context
batch; should be same as embedding_batch_size for non-recurrent
encoder.
reward_scale (int): Reward scale.
use_gpu (bool): Whether or not to use GPU for training.
|
pearl_metaworld_ml45
|
python
|
rlworkgroup/garage
|
src/garage/examples/torch/pearl_metaworld_ml45.py
|
https://github.com/rlworkgroup/garage/blob/master/src/garage/examples/torch/pearl_metaworld_ml45.py
|
MIT
|
def ppo_pendulum(ctxt=None, seed=1):
"""Train PPO with InvertedDoublePendulum-v2 environment.
Args:
ctxt (garage.experiment.ExperimentContext): The experiment
configuration used by Trainer to create the snapshotter.
seed (int): Used to seed the random number generator to produce
determinism.
"""
set_seed(seed)
env = GymEnv('InvertedDoublePendulum-v2')
trainer = Trainer(ctxt)
policy = GaussianMLPPolicy(env.spec,
hidden_sizes=[64, 64],
hidden_nonlinearity=torch.tanh,
output_nonlinearity=None)
value_function = GaussianMLPValueFunction(env_spec=env.spec,
hidden_sizes=(32, 32),
hidden_nonlinearity=torch.tanh,
output_nonlinearity=None)
sampler = RaySampler(agents=policy,
envs=env,
max_episode_length=env.spec.max_episode_length)
algo = PPO(env_spec=env.spec,
policy=policy,
value_function=value_function,
sampler=sampler,
discount=0.99,
center_adv=False)
trainer.setup(algo, env)
trainer.train(n_epochs=100, batch_size=10000)
|
Train PPO with InvertedDoublePendulum-v2 environment.
Args:
ctxt (garage.experiment.ExperimentContext): The experiment
configuration used by Trainer to create the snapshotter.
seed (int): Used to seed the random number generator to produce
determinism.
|
ppo_pendulum
|
python
|
rlworkgroup/garage
|
src/garage/examples/torch/ppo_pendulum.py
|
https://github.com/rlworkgroup/garage/blob/master/src/garage/examples/torch/ppo_pendulum.py
|
MIT
|
def sac_half_cheetah_batch(ctxt=None, seed=1):
"""Set up environment and algorithm and run the task.
Args:
ctxt (garage.experiment.ExperimentContext): The experiment
configuration used by Trainer to create the snapshotter.
seed (int): Used to seed the random number generator to produce
determinism.
"""
deterministic.set_seed(seed)
trainer = Trainer(snapshot_config=ctxt)
env = normalize(GymEnv('HalfCheetah-v2'))
policy = TanhGaussianMLPPolicy(
env_spec=env.spec,
hidden_sizes=[256, 256],
hidden_nonlinearity=nn.ReLU,
output_nonlinearity=None,
min_std=np.exp(-20.),
max_std=np.exp(2.),
)
qf1 = ContinuousMLPQFunction(env_spec=env.spec,
hidden_sizes=[256, 256],
hidden_nonlinearity=F.relu)
qf2 = ContinuousMLPQFunction(env_spec=env.spec,
hidden_sizes=[256, 256],
hidden_nonlinearity=F.relu)
replay_buffer = PathBuffer(capacity_in_transitions=int(1e6))
sampler = LocalSampler(agents=policy,
envs=env,
max_episode_length=env.spec.max_episode_length,
worker_class=FragmentWorker)
sac = SAC(env_spec=env.spec,
policy=policy,
qf1=qf1,
qf2=qf2,
sampler=sampler,
gradient_steps_per_itr=1000,
max_episode_length_eval=1000,
replay_buffer=replay_buffer,
min_buffer_size=1e4,
target_update_tau=5e-3,
discount=0.99,
buffer_batch_size=256,
reward_scale=1.,
steps_per_epoch=1)
if torch.cuda.is_available():
set_gpu_mode(True)
else:
set_gpu_mode(False)
sac.to()
trainer.setup(algo=sac, env=env)
trainer.train(n_epochs=1000, batch_size=1000)
|
Set up environment and algorithm and run the task.
Args:
ctxt (garage.experiment.ExperimentContext): The experiment
configuration used by Trainer to create the snapshotter.
seed (int): Used to seed the random number generator to produce
determinism.
|
sac_half_cheetah_batch
|
python
|
rlworkgroup/garage
|
src/garage/examples/torch/sac_half_cheetah_batch.py
|
https://github.com/rlworkgroup/garage/blob/master/src/garage/examples/torch/sac_half_cheetah_batch.py
|
MIT
|
def td3_half_cheetah(ctxt=None, seed=1):
"""Train TD3 with InvertedDoublePendulum-v2 environment.
Args:
ctxt (garage.experiment.ExperimentContext): The experiment
configuration used by LocalRunner to create the snapshotter.
seed (int): Used to seed the random number generator to produce
determinism.
"""
set_seed(seed)
n_epochs = 500
steps_per_epoch = 20
sampler_batch_size = 250
num_timesteps = n_epochs * steps_per_epoch * sampler_batch_size
trainer = Trainer(ctxt)
env = normalize(GymEnv('HalfCheetah-v2'))
policy = DeterministicMLPPolicy(env_spec=env.spec,
hidden_sizes=[256, 256],
hidden_nonlinearity=F.relu,
output_nonlinearity=torch.tanh)
exploration_policy = AddGaussianNoise(env.spec,
policy,
total_timesteps=num_timesteps,
max_sigma=0.1,
min_sigma=0.1)
uniform_random_policy = UniformRandomPolicy(env.spec)
qf1 = ContinuousMLPQFunction(env_spec=env.spec,
hidden_sizes=[256, 256],
hidden_nonlinearity=F.relu)
qf2 = ContinuousMLPQFunction(env_spec=env.spec,
hidden_sizes=[256, 256],
hidden_nonlinearity=F.relu)
replay_buffer = PathBuffer(capacity_in_transitions=int(1e6))
sampler = LocalSampler(agents=exploration_policy,
envs=env,
max_episode_length=env.spec.max_episode_length,
worker_class=FragmentWorker)
td3 = TD3(env_spec=env.spec,
policy=policy,
qf1=qf1,
qf2=qf2,
replay_buffer=replay_buffer,
sampler=sampler,
policy_optimizer=torch.optim.Adam,
qf_optimizer=torch.optim.Adam,
exploration_policy=exploration_policy,
uniform_random_policy=uniform_random_policy,
target_update_tau=0.005,
discount=0.99,
policy_noise_clip=0.5,
policy_noise=0.2,
policy_lr=1e-3,
qf_lr=1e-3,
steps_per_epoch=40,
start_steps=1000,
grad_steps_per_env_step=50,
min_buffer_size=1000,
buffer_batch_size=100)
trainer.setup(algo=td3, env=env)
trainer.train(n_epochs=750, batch_size=100)
|
Train TD3 with InvertedDoublePendulum-v2 environment.
Args:
ctxt (garage.experiment.ExperimentContext): The experiment
configuration used by LocalRunner to create the snapshotter.
seed (int): Used to seed the random number generator to produce
determinism.
|
td3_half_cheetah
|
python
|
rlworkgroup/garage
|
src/garage/examples/torch/td3_halfcheetah.py
|
https://github.com/rlworkgroup/garage/blob/master/src/garage/examples/torch/td3_halfcheetah.py
|
MIT
|
def td3_pendulum(ctxt=None, seed=1):
"""Train TD3 with InvertedDoublePendulum-v2 environment.
Args:
ctxt (garage.experiment.ExperimentContext): The experiment
configuration used by LocalRunner to create the snapshotter.
seed (int): Used to seed the random number generator to produce
determinism.
"""
set_seed(seed)
n_epochs = 750
steps_per_epoch = 40
sampler_batch_size = 100
num_timesteps = n_epochs * steps_per_epoch * sampler_batch_size
trainer = Trainer(ctxt)
env = normalize(GymEnv('InvertedDoublePendulum-v2'))
policy = DeterministicMLPPolicy(env_spec=env.spec,
hidden_sizes=[256, 256],
hidden_nonlinearity=F.relu,
output_nonlinearity=torch.tanh)
exploration_policy = AddGaussianNoise(env.spec,
policy,
total_timesteps=num_timesteps,
max_sigma=0.1,
min_sigma=0.1)
uniform_random_policy = UniformRandomPolicy(env.spec)
qf1 = ContinuousMLPQFunction(env_spec=env.spec,
hidden_sizes=[256, 256],
hidden_nonlinearity=F.relu)
qf2 = ContinuousMLPQFunction(env_spec=env.spec,
hidden_sizes=[256, 256],
hidden_nonlinearity=F.relu)
replay_buffer = PathBuffer(capacity_in_transitions=int(1e6))
sampler = LocalSampler(agents=exploration_policy,
envs=env,
max_episode_length=env.spec.max_episode_length,
worker_class=FragmentWorker)
td3 = TD3(env_spec=env.spec,
policy=policy,
qf1=qf1,
qf2=qf2,
replay_buffer=replay_buffer,
sampler=sampler,
policy_optimizer=torch.optim.Adam,
qf_optimizer=torch.optim.Adam,
exploration_policy=exploration_policy,
uniform_random_policy=uniform_random_policy,
target_update_tau=0.005,
discount=0.99,
policy_noise_clip=0.5,
policy_noise=0.2,
policy_lr=1e-3,
qf_lr=1e-3,
steps_per_epoch=steps_per_epoch,
start_steps=1000,
grad_steps_per_env_step=1,
min_buffer_size=int(1e4),
buffer_batch_size=100)
prefer_gpu()
td3.to()
trainer.setup(algo=td3, env=env)
trainer.train(n_epochs=n_epochs, batch_size=sampler_batch_size)
|
Train TD3 with InvertedDoublePendulum-v2 environment.
Args:
ctxt (garage.experiment.ExperimentContext): The experiment
configuration used by LocalRunner to create the snapshotter.
seed (int): Used to seed the random number generator to produce
determinism.
|
td3_pendulum
|
python
|
rlworkgroup/garage
|
src/garage/examples/torch/td3_pendulum.py
|
https://github.com/rlworkgroup/garage/blob/master/src/garage/examples/torch/td3_pendulum.py
|
MIT
|
def trpo_pendulum(ctxt=None, seed=1):
"""Train TRPO with InvertedDoublePendulum-v2 environment.
Args:
ctxt (garage.experiment.ExperimentContext): The experiment
configuration used by Trainer to create the snapshotter.
seed (int): Used to seed the random number generator to produce
determinism.
"""
set_seed(seed)
env = GymEnv('InvertedDoublePendulum-v2')
trainer = Trainer(ctxt)
policy = GaussianMLPPolicy(env.spec,
hidden_sizes=[32, 32],
hidden_nonlinearity=torch.tanh,
output_nonlinearity=None)
value_function = GaussianMLPValueFunction(env_spec=env.spec,
hidden_sizes=(32, 32),
hidden_nonlinearity=torch.tanh,
output_nonlinearity=None)
sampler = LocalSampler(agents=policy,
envs=env,
max_episode_length=env.spec.max_episode_length)
algo = TRPO(env_spec=env.spec,
policy=policy,
value_function=value_function,
sampler=sampler,
discount=0.99,
center_adv=False)
trainer.setup(algo, env)
trainer.train(n_epochs=100, batch_size=1024)
|
Train TRPO with InvertedDoublePendulum-v2 environment.
Args:
ctxt (garage.experiment.ExperimentContext): The experiment
configuration used by Trainer to create the snapshotter.
seed (int): Used to seed the random number generator to produce
determinism.
|
trpo_pendulum
|
python
|
rlworkgroup/garage
|
src/garage/examples/torch/trpo_pendulum.py
|
https://github.com/rlworkgroup/garage/blob/master/src/garage/examples/torch/trpo_pendulum.py
|
MIT
|
def trpo_pendulum_ray_sampler(ctxt=None, seed=1):
"""Set up environment and algorithm and run the task.
Args:
ctxt (garage.experiment.ExperimentContext): The experiment
configuration used by Trainer to create the snapshotter.
seed (int): Used to seed the random number generator to produce
determinism.
"""
# Since this is an example, we are running ray in a reduced state.
# One can comment this line out in order to run ray at full capacity
ray.init(_memory=52428800,
object_store_memory=78643200,
ignore_reinit_error=True,
log_to_driver=False,
include_dashboard=False)
deterministic.set_seed(seed)
env = GymEnv('InvertedDoublePendulum-v2')
trainer = Trainer(ctxt)
policy = GaussianMLPPolicy(env.spec,
hidden_sizes=[32, 32],
hidden_nonlinearity=torch.tanh,
output_nonlinearity=None)
value_function = GaussianMLPValueFunction(env_spec=env.spec,
hidden_sizes=(32, 32),
hidden_nonlinearity=torch.tanh,
output_nonlinearity=None)
sampler = RaySampler(agents=policy,
envs=env,
max_episode_length=env.spec.max_episode_length)
algo = TRPO(env_spec=env.spec,
policy=policy,
value_function=value_function,
sampler=sampler,
discount=0.99,
center_adv=False)
trainer.setup(algo, env)
trainer.train(n_epochs=100, batch_size=1024)
|
Set up environment and algorithm and run the task.
Args:
ctxt (garage.experiment.ExperimentContext): The experiment
configuration used by Trainer to create the snapshotter.
seed (int): Used to seed the random number generator to produce
determinism.
|
trpo_pendulum_ray_sampler
|
python
|
rlworkgroup/garage
|
src/garage/examples/torch/trpo_pendulum_ray_sampler.py
|
https://github.com/rlworkgroup/garage/blob/master/src/garage/examples/torch/trpo_pendulum_ray_sampler.py
|
MIT
|
def train(self, trainer):
"""Obtain samplers and start actual training for each epoch.
Args:
trainer (Trainer): Experiment trainer.
"""
for epoch in trainer.step_epochs():
samples = trainer.obtain_samples(epoch)
log_performance(epoch,
EpisodeBatch.from_list(self.env_spec, samples),
self._discount)
self._train_once(samples)
|
Obtain samplers and start actual training for each epoch.
Args:
trainer (Trainer): Experiment trainer.
|
train
|
python
|
rlworkgroup/garage
|
src/garage/examples/torch/tutorial_vpg.py
|
https://github.com/rlworkgroup/garage/blob/master/src/garage/examples/torch/tutorial_vpg.py
|
MIT
|
def _train_once(self, samples):
"""Perform one step of policy optimization given one batch of samples.
Args:
samples (list[dict]): A list of collected paths.
Returns:
numpy.float64: Average return.
"""
losses = []
self._policy_opt.zero_grad()
for path in samples:
returns_numpy = discount_cumsum(path['rewards'], self._discount)
returns = torch.Tensor(returns_numpy.copy())
obs = torch.Tensor(path['observations'])
actions = torch.Tensor(path['actions'])
dist = self.policy(obs)[0]
log_likelihoods = dist.log_prob(actions)
loss = (-log_likelihoods * returns).mean()
loss.backward()
losses.append(loss.item())
self._policy_opt.step()
return np.mean(losses)
|
Perform one step of policy optimization given one batch of samples.
Args:
samples (list[dict]): A list of collected paths.
Returns:
numpy.float64: Average return.
|
_train_once
|
python
|
rlworkgroup/garage
|
src/garage/examples/torch/tutorial_vpg.py
|
https://github.com/rlworkgroup/garage/blob/master/src/garage/examples/torch/tutorial_vpg.py
|
MIT
|
def tutorial_vpg(ctxt=None):
"""Train VPG with PointEnv environment.
Args:
ctxt (ExperimentContext): The experiment configuration used by
:class:`~Trainer` to create the :class:`~Snapshotter`.
"""
set_seed(100)
trainer = Trainer(ctxt)
env = PointEnv()
policy = GaussianMLPPolicy(env.spec)
sampler = LocalSampler(agents=policy,
envs=env,
max_episode_length=env.spec.max_episode_length)
algo = SimpleVPG(env.spec, policy, sampler)
trainer.setup(algo, env)
trainer.train(n_epochs=200, batch_size=4000)
|
Train VPG with PointEnv environment.
Args:
ctxt (ExperimentContext): The experiment configuration used by
:class:`~Trainer` to create the :class:`~Snapshotter`.
|
tutorial_vpg
|
python
|
rlworkgroup/garage
|
src/garage/examples/torch/tutorial_vpg.py
|
https://github.com/rlworkgroup/garage/blob/master/src/garage/examples/torch/tutorial_vpg.py
|
MIT
|
def vpg_pendulum(ctxt=None, seed=1):
"""Train PPO with InvertedDoublePendulum-v2 environment.
Args:
ctxt (garage.experiment.ExperimentContext): The experiment
configuration used by Trainer to create the snapshotter.
seed (int): Used to seed the random number generator to produce
determinism.
"""
set_seed(seed)
env = GymEnv('InvertedDoublePendulum-v2')
trainer = Trainer(ctxt)
policy = GaussianMLPPolicy(env.spec,
hidden_sizes=[64, 64],
hidden_nonlinearity=torch.tanh,
output_nonlinearity=None)
value_function = GaussianMLPValueFunction(env_spec=env.spec,
hidden_sizes=(32, 32),
hidden_nonlinearity=torch.tanh,
output_nonlinearity=None)
sampler = RaySampler(agents=policy,
envs=env,
max_episode_length=env.spec.max_episode_length)
algo = VPG(env_spec=env.spec,
policy=policy,
value_function=value_function,
sampler=sampler,
discount=0.99,
center_adv=False)
trainer.setup(algo, env)
trainer.train(n_epochs=100, batch_size=10000)
|
Train PPO with InvertedDoublePendulum-v2 environment.
Args:
ctxt (garage.experiment.ExperimentContext): The experiment
configuration used by Trainer to create the snapshotter.
seed (int): Used to seed the random number generator to produce
determinism.
|
vpg_pendulum
|
python
|
rlworkgroup/garage
|
src/garage/examples/torch/vpg_pendulum.py
|
https://github.com/rlworkgroup/garage/blob/master/src/garage/examples/torch/vpg_pendulum.py
|
MIT
|
def watch_atari(saved_dir, env=None, num_episodes=10):
"""Watch a trained agent play an atari game.
Args:
saved_dir (str): Directory containing the pickle file.
env (str): Environment to run episodes on. If None, the pickled
environment is used.
num_episodes (int): Number of episodes to play. Note that when using
the EpisodicLife wrapper, an episode is considered done when a
life is lost. Defaults to 10.
"""
snapshotter = Snapshotter()
data = snapshotter.load(saved_dir)
if env is not None:
env = gym.make(env)
env = Noop(env, noop_max=30)
env = MaxAndSkip(env, skip=4)
env = EpisodicLife(env)
if 'FIRE' in env.unwrapped.get_action_meanings():
env = FireReset(env)
env = Grayscale(env)
env = Resize(env, 84, 84)
env = ClipReward(env)
env = StackFrames(env, 4, axis=0)
env = GymEnv(env)
else:
env = data['env']
exploration_policy = data['algo'].exploration_policy
exploration_policy.policy._qf.to('cpu')
ep_rewards = np.asarray([])
for _ in range(num_episodes):
episode_data = rollout(env,
exploration_policy.policy,
animated=True,
pause_per_frame=0.02)
ep_rewards = np.append(ep_rewards, np.sum(episode_data['rewards']))
print('Average Reward {}'.format(np.mean(ep_rewards)))
|
Watch a trained agent play an atari game.
Args:
saved_dir (str): Directory containing the pickle file.
env (str): Environment to run episodes on. If None, the pickled
environment is used.
num_episodes (int): Number of episodes to play. Note that when using
the EpisodicLife wrapper, an episode is considered done when a
life is lost. Defaults to 10.
|
watch_atari
|
python
|
rlworkgroup/garage
|
src/garage/examples/torch/watch_atari.py
|
https://github.com/rlworkgroup/garage/blob/master/src/garage/examples/torch/watch_atari.py
|
MIT
|
def set_seed(seed):
"""Set the process-wide random seed.
Args:
seed (int): A positive integer
"""
seed %= 4294967294
# pylint: disable=global-statement
global seed_
global seed_stream_
seed_ = seed
random.seed(seed)
np.random.seed(seed)
if 'tensorflow' in sys.modules:
import tensorflow as tf # pylint: disable=import-outside-toplevel
tf.compat.v1.set_random_seed(seed)
try:
# pylint: disable=import-outside-toplevel
import tensorflow_probability as tfp
seed_stream_ = tfp.util.SeedStream(seed_, salt='garage')
except ImportError:
pass
if 'torch' in sys.modules:
warnings.warn(
'Enabeling deterministic mode in PyTorch can have a performance '
'impact when using GPU.')
import torch # pylint: disable=import-outside-toplevel
torch.manual_seed(seed)
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = False
|
Set the process-wide random seed.
Args:
seed (int): A positive integer
|
set_seed
|
python
|
rlworkgroup/garage
|
src/garage/experiment/deterministic.py
|
https://github.com/rlworkgroup/garage/blob/master/src/garage/experiment/deterministic.py
|
MIT
|
def get_tf_seed_stream():
"""Get the pseudo-random number generator (PRNG) for TensorFlow ops.
Returns:
int: A seed generated by a PRNG with fixed global seed.
"""
if seed_stream_ is None:
set_seed(0)
return seed_stream_() % 4294967294
|
Get the pseudo-random number generator (PRNG) for TensorFlow ops.
Returns:
int: A seed generated by a PRNG with fixed global seed.
|
get_tf_seed_stream
|
python
|
rlworkgroup/garage
|
src/garage/experiment/deterministic.py
|
https://github.com/rlworkgroup/garage/blob/master/src/garage/experiment/deterministic.py
|
MIT
|
def _make_sequential_log_dir(log_dir):
"""Creates log_dir, appending a number if necessary.
Attempts to create the directory `log_dir`. If it already exists, appends
"_1". If that already exists, appends "_2" instead, etc.
Args:
log_dir (str): The log directory to attempt to create.
Returns:
str: The log directory actually created.
"""
i = 0
while True:
try:
if i == 0:
os.makedirs(log_dir)
else:
possible_log_dir = '{}_{}'.format(log_dir, i)
os.makedirs(possible_log_dir)
log_dir = possible_log_dir
return log_dir
except FileExistsError:
i += 1
|
Creates log_dir, appending a number if necessary.
Attempts to create the directory `log_dir`. If it already exists, appends
"_1". If that already exists, appends "_2" instead, etc.
Args:
log_dir (str): The log directory to attempt to create.
Returns:
str: The log directory actually created.
|
_make_sequential_log_dir
|
python
|
rlworkgroup/garage
|
src/garage/experiment/experiment.py
|
https://github.com/rlworkgroup/garage/blob/master/src/garage/experiment/experiment.py
|
MIT
|
def _make_experiment_signature(function):
"""Generate an ExperimentTemplate's signature from its function.
Checks that the first parameter is named ctxt and removes it from the
signature. Makes all other parameters keyword only.
Args:
function (callable[ExperimentContext, ...]): The wrapped function.
Returns:
inspect.Signature: The signature of the ExperimentTemplate.
Raises:
ValueError: If the wrapped function's first parameter is not 'ctxt'.
"""
func_sig = inspect.signature(function)
new_params = []
saw_first_param = False
for param in func_sig.parameters.values():
if not saw_first_param:
# Don't output it to the experiment params, since it will contain
# the context.
if param.name != 'ctxt':
raise ValueError(
'Experiment functions should have a first '
"parameter named 'ctxt' instead of {!r}".format(
param.name))
saw_first_param = True
else:
new_params.append(
inspect.Parameter(name=param.name,
kind=inspect.Parameter.KEYWORD_ONLY,
default=param.default,
annotation=param.annotation))
if not saw_first_param:
raise ValueError(
'Experiment functions should have a first parameter '
"named 'ctxt', but {!r} has no parameters".format(function))
return inspect.Signature(new_params,
return_annotation=func_sig.return_annotation)
|
Generate an ExperimentTemplate's signature from its function.
Checks that the first parameter is named ctxt and removes it from the
signature. Makes all other parameters keyword only.
Args:
function (callable[ExperimentContext, ...]): The wrapped function.
Returns:
inspect.Signature: The signature of the ExperimentTemplate.
Raises:
ValueError: If the wrapped function's first parameter is not 'ctxt'.
|
_make_experiment_signature
|
python
|
rlworkgroup/garage
|
src/garage/experiment/experiment.py
|
https://github.com/rlworkgroup/garage/blob/master/src/garage/experiment/experiment.py
|
MIT
|
def _update_wrap_params(self):
"""Update self to "look like" the wrapped funciton.
Mostly, this involves creating a function signature for the
ExperimentTemplate that looks like the wrapped function, but with the
first argument (ctxt) excluded, and all other arguments required to be
keyword only.
"""
functools.update_wrapper(self, self.function)
self.__signature__ = _make_experiment_signature(self.function)
|
Update self to "look like" the wrapped funciton.
Mostly, this involves creating a function signature for the
ExperimentTemplate that looks like the wrapped function, but with the
first argument (ctxt) excluded, and all other arguments required to be
keyword only.
|
_update_wrap_params
|
python
|
rlworkgroup/garage
|
src/garage/experiment/experiment.py
|
https://github.com/rlworkgroup/garage/blob/master/src/garage/experiment/experiment.py
|
MIT
|
def _augment_name(cls, options, name, params):
"""Augment the experiment name with parameters.
Args:
options (dict): Options to `wrap_experiment` itself. See the
function documentation for details.
name (str): Name without parameter names.
params (dict): Dictionary of parameters.
Raises:
ValueError: If self.name_parameters is not set to None, "passed",
or "all".
Returns:
str: Returns the augmented name.
"""
name_parameters = collections.OrderedDict()
if options['name_parameters'] == 'passed':
for param in options['signature'].parameters.values():
try:
name_parameters[param.name] = params[param.name]
except KeyError:
pass
elif options['name_parameters'] == 'all':
for param in options['signature'].parameters.values():
name_parameters[param.name] = params.get(
param.name, param.default)
elif options['name_parameters'] is not None:
raise ValueError('wrap_experiment.name_parameters should be set '
'to one of None, "passed", or "all"')
param_str = '_'.join('{}={}'.format(k, v)
for (k, v) in name_parameters.items())
if param_str:
return '{}_{}'.format(name, param_str)
else:
return name
|
Augment the experiment name with parameters.
Args:
options (dict): Options to `wrap_experiment` itself. See the
function documentation for details.
name (str): Name without parameter names.
params (dict): Dictionary of parameters.
Raises:
ValueError: If self.name_parameters is not set to None, "passed",
or "all".
Returns:
str: Returns the augmented name.
|
_augment_name
|
python
|
rlworkgroup/garage
|
src/garage/experiment/experiment.py
|
https://github.com/rlworkgroup/garage/blob/master/src/garage/experiment/experiment.py
|
MIT
|
def _get_options(self, *args):
"""Get the options for wrap_experiment.
This method combines options passed to `wrap_experiment` itself and to
the wrapped experiment.
Args:
args (list[dict]): Unnamed arguments to the wrapped experiment. May
be an empty list or a list containing a single dictionary.
Raises:
ValueError: If args contains more than one value, or the value is
not a dictionary containing at most the same keys as are
arguments to `wrap_experiment`.
Returns:
dict: The final options.
"""
options = dict(name=self.name,
function=self.function,
prefix=self.prefix,
name_parameters=self.name_parameters,
log_dir=self.log_dir,
archive_launch_repo=self.archive_launch_repo,
snapshot_gap=self.snapshot_gap,
snapshot_mode=self.snapshot_mode,
use_existing_dir=self.use_existing_dir,
x_axis=self.x_axis,
signature=self.__signature__)
if args:
if len(args) == 1 and isinstance(args[0], dict):
for k in args[0]:
if k not in options:
raise ValueError('Unknown key {} in wrap_experiment '
'options'.format(k))
options.update(args[0])
else:
raise ValueError('garage.experiment currently only supports '
'keyword arguments')
return options
|
Get the options for wrap_experiment.
This method combines options passed to `wrap_experiment` itself and to
the wrapped experiment.
Args:
args (list[dict]): Unnamed arguments to the wrapped experiment. May
be an empty list or a list containing a single dictionary.
Raises:
ValueError: If args contains more than one value, or the value is
not a dictionary containing at most the same keys as are
arguments to `wrap_experiment`.
Returns:
dict: The final options.
|
_get_options
|
python
|
rlworkgroup/garage
|
src/garage/experiment/experiment.py
|
https://github.com/rlworkgroup/garage/blob/master/src/garage/experiment/experiment.py
|
MIT
|
def _make_context(cls, options, **kwargs):
"""Make a context from the template information and variant args.
Currently, all arguments should be keyword arguments.
Args:
options (dict): Options to `wrap_experiment` itself. See the
function documentation for details.
kwargs (dict): Keyword arguments for the wrapped function. Will be
logged to `variant.json`
Returns:
ExperimentContext: The created experiment context.
"""
name = options['name']
if name is None:
name = options['function'].__name__
name = cls._augment_name(options, name, kwargs)
log_dir = options['log_dir']
if log_dir is None:
log_dir = ('{data}/local/{prefix}/{name}'.format(
data=os.path.join(os.getcwd(), 'data'),
prefix=options['prefix'],
name=name))
if options['use_existing_dir']:
os.makedirs(log_dir, exist_ok=True)
else:
log_dir = _make_sequential_log_dir(log_dir)
tabular_log_file = os.path.join(log_dir, 'progress.csv')
text_log_file = os.path.join(log_dir, 'debug.log')
variant_log_file = os.path.join(log_dir, 'variant.json')
metadata_log_file = os.path.join(log_dir, 'metadata.json')
dump_json(variant_log_file, kwargs)
git_root_path, metadata = get_metadata()
dump_json(metadata_log_file, metadata)
if git_root_path and options['archive_launch_repo']:
make_launcher_archive(git_root_path=git_root_path, log_dir=log_dir)
logger.add_output(dowel.TextOutput(text_log_file))
logger.add_output(dowel.CsvOutput(tabular_log_file))
logger.add_output(
dowel.TensorBoardOutput(log_dir, x_axis=options['x_axis']))
logger.add_output(dowel.StdOutput())
logger.push_prefix('[{}] '.format(name))
logger.log('Logging to {}'.format(log_dir))
return ExperimentContext(snapshot_dir=log_dir,
snapshot_mode=options['snapshot_mode'],
snapshot_gap=options['snapshot_gap'])
|
Make a context from the template information and variant args.
Currently, all arguments should be keyword arguments.
Args:
options (dict): Options to `wrap_experiment` itself. See the
function documentation for details.
kwargs (dict): Keyword arguments for the wrapped function. Will be
logged to `variant.json`
Returns:
ExperimentContext: The created experiment context.
|
_make_context
|
python
|
rlworkgroup/garage
|
src/garage/experiment/experiment.py
|
https://github.com/rlworkgroup/garage/blob/master/src/garage/experiment/experiment.py
|
MIT
|
def __call__(self, *args, **kwargs):
"""Wrap a function to turn it into an ExperimentTemplate.
Note that this docstring will be overriden to match the function's
docstring on the ExperimentTemplate once a function is passed in.
Args:
args (list): If no function has been set yet, must be a list
containing a single callable. If the function has been set, may
be a single value, a dictionary containing overrides for the
original arguments to `wrap_experiment`.
kwargs (dict): Arguments passed onto the wrapped function.
Returns:
object: The returned value of the wrapped function.
Raises:
ValueError: If not passed a single callable argument.
"""
if self.function is None:
if len(args) != 1 or len(kwargs) != 0 or not callable(args[0]):
raise ValueError('Please apply the result of '
'wrap_experiment() to a single function')
# Apply ourselves as a decorator
self.function = args[0]
self._update_wrap_params()
return self
else:
ctxt = self._make_context(self._get_options(*args), **kwargs)
result = self.function(ctxt, **kwargs)
logger.remove_all()
logger.pop_prefix()
gc.collect() # See dowel issue #44
return result
|
Wrap a function to turn it into an ExperimentTemplate.
Note that this docstring will be overriden to match the function's
docstring on the ExperimentTemplate once a function is passed in.
Args:
args (list): If no function has been set yet, must be a list
containing a single callable. If the function has been set, may
be a single value, a dictionary containing overrides for the
original arguments to `wrap_experiment`.
kwargs (dict): Arguments passed onto the wrapped function.
Returns:
object: The returned value of the wrapped function.
Raises:
ValueError: If not passed a single callable argument.
|
__call__
|
python
|
rlworkgroup/garage
|
src/garage/experiment/experiment.py
|
https://github.com/rlworkgroup/garage/blob/master/src/garage/experiment/experiment.py
|
MIT
|
def dump_json(filename, data):
"""Dump a dictionary to a file in JSON format.
Args:
filename(str): Filename for the file.
data(dict): Data to save to file.
"""
pathlib.Path(os.path.dirname(filename)).mkdir(parents=True, exist_ok=True)
with open(filename, 'w') as f:
# We do our own circular reference handling.
# Sometimes sort_keys fails because the keys don't get made into
# strings early enough.
json.dump(data,
f,
indent=2,
sort_keys=False,
cls=LogEncoder,
check_circular=False)
|
Dump a dictionary to a file in JSON format.
Args:
filename(str): Filename for the file.
data(dict): Data to save to file.
|
dump_json
|
python
|
rlworkgroup/garage
|
src/garage/experiment/experiment.py
|
https://github.com/rlworkgroup/garage/blob/master/src/garage/experiment/experiment.py
|
MIT
|
def make_launcher_archive(*, git_root_path, log_dir):
"""Saves an archive of the launcher's git repo to the log directory.
Args:
git_root_path (str): Absolute path to git repo to archive.
log_dir (str): Absolute path to the log directory.
"""
git_files = subprocess.check_output(
('git', 'ls-files', '--others', '--exclude-standard', '--cached',
'-z'),
cwd=git_root_path).strip()
repo_size = 0
files_to_archive = []
for f in git_files.split(b'\0'):
try:
file_size = os.stat(os.path.join(git_root_path, f)).st_size
repo_size += file_size
if file_size < EIGHT_MEBIBYTES:
files_to_archive.append(f)
except FileNotFoundError:
pass
if repo_size >= EIGHT_MEBIBYTES:
warnings.warn('Archiving a launch repo larger than 8MiB. This may be '
'slow. Set archive_launch_repo=False in wrap_experiment '
'to disable this behavior.')
archive_path = os.path.join(log_dir, 'launch_archive.tar.xz')
subprocess.run(('tar', '--null', '--files-from', '-', '--xz', '--create',
'--file', archive_path),
input=b'\0'.join(files_to_archive),
cwd=git_root_path,
check=True)
|
Saves an archive of the launcher's git repo to the log directory.
Args:
git_root_path (str): Absolute path to git repo to archive.
log_dir (str): Absolute path to the log directory.
|
make_launcher_archive
|
python
|
rlworkgroup/garage
|
src/garage/experiment/experiment.py
|
https://github.com/rlworkgroup/garage/blob/master/src/garage/experiment/experiment.py
|
MIT
|
def default(self, o):
"""Perform JSON encoding.
Args:
o (object): Object to encode.
Raises:
TypeError: If `o` cannot be turned into JSON even using `repr(o)`.
Returns:
dict or str or float or bool: Object encoded in JSON.
"""
# Why is this method hidden? What does that mean?
# pylint: disable=method-hidden
# pylint: disable=too-many-branches
# pylint: disable=too-many-return-statements
# This circular reference checking code was copied from the standard
# library json implementation, but it outputs a repr'd string instead
# of ValueError on a circular reference.
if isinstance(o, (int, bool, float, str)):
return o
else:
markerid = id(o)
if markerid in self._markers:
return 'circular ' + repr(o)
else:
self._markers[markerid] = o
try:
return self._default_inner(o)
finally:
del self._markers[markerid]
|
Perform JSON encoding.
Args:
o (object): Object to encode.
Raises:
TypeError: If `o` cannot be turned into JSON even using `repr(o)`.
Returns:
dict or str or float or bool: Object encoded in JSON.
|
default
|
python
|
rlworkgroup/garage
|
src/garage/experiment/experiment.py
|
https://github.com/rlworkgroup/garage/blob/master/src/garage/experiment/experiment.py
|
MIT
|
def _default_inner(self, o):
"""Perform JSON encoding.
Args:
o (object): Object to encode.
Raises:
TypeError: If `o` cannot be turned into JSON even using `repr(o)`.
ValueError: If raised by calling repr on an object.
Returns:
dict or str or float or bool: Object encoded in JSON.
"""
# Why is this method hidden? What does that mean?
# pylint: disable=method-hidden
# pylint: disable=too-many-branches
# pylint: disable=too-many-return-statements
# This circular reference checking code was copied from the standard
# library json implementation, but it outputs a repr'd string instead
# of ValueError on a circular reference.
try:
return json.JSONEncoder.default(self, o)
except TypeError as err:
if isinstance(o, dict):
data = {}
for (k, v) in o.items():
if isinstance(k, str):
data[k] = self.default(v)
else:
data[repr(k)] = self.default(v)
return data
elif isinstance(o, weakref.ref):
return repr(o)
elif type(o).__module__.split('.')[0] in self.BLOCKED_MODULES:
return repr(o)
elif isinstance(o, type):
return {'$typename': o.__module__ + '.' + o.__name__}
elif isinstance(o, np.number):
# For some reason these aren't natively considered
# serializable.
# JSON doesn't actually have ints, so always use a float.
return float(o)
elif isinstance(o, np.bool8):
return bool(o)
elif isinstance(o, enum.Enum):
return {
'$enum':
o.__module__ + '.' + o.__class__.__name__ + '.' + o.name
}
elif isinstance(o, np.ndarray):
return repr(o)
elif hasattr(o, '__dict__') or hasattr(o, '__slots__'):
obj_dict = getattr(o, '__dict__', None)
if obj_dict is not None:
data = {k: self.default(v) for (k, v) in obj_dict.items()}
else:
data = {
s: self.default(getattr(o, s))
for s in o.__slots__
}
t = type(o)
data['$type'] = t.__module__ + '.' + t.__name__
return data
elif callable(o) and hasattr(o, '__name__'):
if getattr(o, '__module__', None) is not None:
return {'$function': o.__module__ + '.' + o.__name__}
else:
return repr(o)
else:
try:
# This case handles many built-in datatypes like deques
return [self.default(v) for v in list(o)]
except TypeError:
pass
try:
# This case handles most other weird objects.
return repr(o)
except TypeError:
pass
raise err
|
Perform JSON encoding.
Args:
o (object): Object to encode.
Raises:
TypeError: If `o` cannot be turned into JSON even using `repr(o)`.
ValueError: If raised by calling repr on an object.
Returns:
dict or str or float or bool: Object encoded in JSON.
|
_default_inner
|
python
|
rlworkgroup/garage
|
src/garage/experiment/experiment.py
|
https://github.com/rlworkgroup/garage/blob/master/src/garage/experiment/experiment.py
|
MIT
|
def evaluate(self, algo, test_episodes_per_task=None):
"""Evaluate the Meta-RL algorithm on the test tasks.
Args:
algo (MetaRLAlgorithm): The algorithm to evaluate.
test_episodes_per_task (int or None): Number of episodes per task.
"""
if test_episodes_per_task is None:
test_episodes_per_task = self._n_test_episodes
adapted_episodes = []
logger.log('Sampling for adapation and meta-testing...')
env_updates = self._test_task_sampler.sample(self._n_test_tasks)
if self._test_sampler is None:
env = env_updates[0]()
self._max_episode_length = env.spec.max_episode_length
self._test_sampler = LocalSampler.from_worker_factory(
WorkerFactory(seed=get_seed(),
max_episode_length=self._max_episode_length,
n_workers=1,
worker_class=self._worker_class,
worker_args=self._worker_args),
agents=algo.get_exploration_policy(),
envs=env)
for env_up in env_updates:
policy = algo.get_exploration_policy()
eps = EpisodeBatch.concatenate(*[
self._test_sampler.obtain_samples(self._eval_itr, 1, policy,
env_up)
for _ in range(self._n_exploration_eps)
])
adapted_policy = algo.adapt_policy(policy, eps)
adapted_eps = self._test_sampler.obtain_samples(
self._eval_itr,
test_episodes_per_task * self._max_episode_length,
adapted_policy)
adapted_episodes.append(adapted_eps)
logger.log('Finished meta-testing...')
if self._test_task_names is not None:
name_map = dict(enumerate(self._test_task_names))
else:
name_map = None
with tabular.prefix(self._prefix + '/' if self._prefix else ''):
log_multitask_performance(
self._eval_itr,
EpisodeBatch.concatenate(*adapted_episodes),
getattr(algo, 'discount', 1.0),
name_map=name_map)
self._eval_itr += 1
|
Evaluate the Meta-RL algorithm on the test tasks.
Args:
algo (MetaRLAlgorithm): The algorithm to evaluate.
test_episodes_per_task (int or None): Number of episodes per task.
|
evaluate
|
python
|
rlworkgroup/garage
|
src/garage/experiment/meta_evaluator.py
|
https://github.com/rlworkgroup/garage/blob/master/src/garage/experiment/meta_evaluator.py
|
MIT
|
def save_itr_params(self, itr, params):
"""Save the parameters if at the right iteration.
Args:
itr (int): Number of iterations. Used as the index of snapshot.
params (obj): Content of snapshot to be saved.
Raises:
ValueError: If snapshot_mode is not one of "all", "last", "gap",
"gap_overwrite", "gap_and_last", or "none".
"""
file_name = None
if self._snapshot_mode == 'all':
file_name = os.path.join(self._snapshot_dir, 'itr_%d.pkl' % itr)
elif self._snapshot_mode == 'gap_overwrite':
if itr % self._snapshot_gap == 0:
file_name = os.path.join(self._snapshot_dir, 'params.pkl')
elif self._snapshot_mode == 'last':
# override previous params
file_name = os.path.join(self._snapshot_dir, 'params.pkl')
elif self._snapshot_mode == 'gap':
if itr % self._snapshot_gap == 0:
file_name = os.path.join(self._snapshot_dir,
'itr_%d.pkl' % itr)
elif self._snapshot_mode == 'gap_and_last':
if itr % self._snapshot_gap == 0:
file_name = os.path.join(self._snapshot_dir,
'itr_%d.pkl' % itr)
file_name_last = os.path.join(self._snapshot_dir, 'params.pkl')
with open(file_name_last, 'wb') as file:
cloudpickle.dump(params, file)
elif self._snapshot_mode == 'none':
pass
else:
raise ValueError('Invalid snapshot mode {}'.format(
self._snapshot_mode))
if file_name:
with open(file_name, 'wb') as file:
cloudpickle.dump(params, file)
|
Save the parameters if at the right iteration.
Args:
itr (int): Number of iterations. Used as the index of snapshot.
params (obj): Content of snapshot to be saved.
Raises:
ValueError: If snapshot_mode is not one of "all", "last", "gap",
"gap_overwrite", "gap_and_last", or "none".
|
save_itr_params
|
python
|
rlworkgroup/garage
|
src/garage/experiment/snapshotter.py
|
https://github.com/rlworkgroup/garage/blob/master/src/garage/experiment/snapshotter.py
|
MIT
|
def load(self, load_dir, itr='last'):
# pylint: disable=no-self-use
"""Load one snapshot of parameters from disk.
Args:
load_dir (str): Directory of the cloudpickle file
to resume experiment from.
itr (int or string): Iteration to load.
Can be an integer, 'last' or 'first'.
Returns:
dict: Loaded snapshot.
Raises:
ValueError: If itr is neither an integer nor
one of ("last", "first").
FileNotFoundError: If the snapshot file is not found in load_dir.
NotAFileError: If the snapshot exists but is not a file.
"""
if isinstance(itr, int) or itr.isdigit():
load_from_file = os.path.join(load_dir, 'itr_{}.pkl'.format(itr))
else:
if itr not in ('last', 'first'):
raise ValueError(
"itr should be an integer or 'last' or 'first'")
load_from_file = os.path.join(load_dir, 'params.pkl')
if not os.path.isfile(load_from_file):
files = [f for f in os.listdir(load_dir) if f.endswith('.pkl')]
if not files:
raise FileNotFoundError(errno.ENOENT,
os.strerror(errno.ENOENT),
'*.pkl file in', load_dir)
files.sort(key=_extract_snapshot_itr)
load_from_file = files[0] if itr == 'first' else files[-1]
load_from_file = os.path.join(load_dir, load_from_file)
if not os.path.isfile(load_from_file):
raise NotAFileError('File not existing: ', load_from_file)
with open(load_from_file, 'rb') as file:
return cloudpickle.load(file)
|
Load one snapshot of parameters from disk.
Args:
load_dir (str): Directory of the cloudpickle file
to resume experiment from.
itr (int or string): Iteration to load.
Can be an integer, 'last' or 'first'.
Returns:
dict: Loaded snapshot.
Raises:
ValueError: If itr is neither an integer nor
one of ("last", "first").
FileNotFoundError: If the snapshot file is not found in load_dir.
NotAFileError: If the snapshot exists but is not a file.
|
load
|
python
|
rlworkgroup/garage
|
src/garage/experiment/snapshotter.py
|
https://github.com/rlworkgroup/garage/blob/master/src/garage/experiment/snapshotter.py
|
MIT
|
def _extract_snapshot_itr(filename: str) -> int:
"""Extracts the integer itr from a filename.
Args:
filename(str): The snapshot filename.
Returns:
int: The snapshot as an integer.
"""
base = os.path.splitext(filename)[0]
digits = base.split('itr_')[1]
return int(digits)
|
Extracts the integer itr from a filename.
Args:
filename(str): The snapshot filename.
Returns:
int: The snapshot as an integer.
|
_extract_snapshot_itr
|
python
|
rlworkgroup/garage
|
src/garage/experiment/snapshotter.py
|
https://github.com/rlworkgroup/garage/blob/master/src/garage/experiment/snapshotter.py
|
MIT
|
def _sample_indices(n_to_sample, n_available_tasks, with_replacement):
"""Select indices of tasks to sample.
Args:
n_to_sample (int): Number of environments to sample. May be greater
than n_available_tasks.
n_available_tasks (int): Number of available tasks. Task indices will
be selected in the range [0, n_available_tasks).
with_replacement (bool): Whether tasks can repeat when sampled.
Note that if more tasks are sampled than exist, then tasks may
repeat, but only after every environment has been included at
least once in this batch. Ignored for continuous task spaces.
Returns:
np.ndarray[int]: Array of task indices.
"""
if with_replacement:
return np.random.randint(n_available_tasks, size=n_to_sample)
else:
blocks = []
for _ in range(math.ceil(n_to_sample / n_available_tasks)):
s = np.arange(n_available_tasks)
np.random.shuffle(s)
blocks.append(s)
return np.concatenate(blocks)[:n_to_sample]
|
Select indices of tasks to sample.
Args:
n_to_sample (int): Number of environments to sample. May be greater
than n_available_tasks.
n_available_tasks (int): Number of available tasks. Task indices will
be selected in the range [0, n_available_tasks).
with_replacement (bool): Whether tasks can repeat when sampled.
Note that if more tasks are sampled than exist, then tasks may
repeat, but only after every environment has been included at
least once in this batch. Ignored for continuous task spaces.
Returns:
np.ndarray[int]: Array of task indices.
|
_sample_indices
|
python
|
rlworkgroup/garage
|
src/garage/experiment/task_sampler.py
|
https://github.com/rlworkgroup/garage/blob/master/src/garage/experiment/task_sampler.py
|
MIT
|
def sample(self, n_tasks, with_replacement=False):
"""Sample a list of environment updates.
Args:
n_tasks (int): Number of updates to sample.
with_replacement (bool): Whether tasks can repeat when sampled.
Note that if more tasks are sampled than exist, then tasks may
repeat, but only after every environment has been included at
least once in this batch. Ignored for continuous task spaces.
Returns:
list[EnvUpdate]: Batch of sampled environment updates, which, when
invoked on environments, will configure them with new tasks.
See :py:class:`~EnvUpdate` for more information.
"""
|
Sample a list of environment updates.
Args:
n_tasks (int): Number of updates to sample.
with_replacement (bool): Whether tasks can repeat when sampled.
Note that if more tasks are sampled than exist, then tasks may
repeat, but only after every environment has been included at
least once in this batch. Ignored for continuous task spaces.
Returns:
list[EnvUpdate]: Batch of sampled environment updates, which, when
invoked on environments, will configure them with new tasks.
See :py:class:`~EnvUpdate` for more information.
|
sample
|
python
|
rlworkgroup/garage
|
src/garage/experiment/task_sampler.py
|
https://github.com/rlworkgroup/garage/blob/master/src/garage/experiment/task_sampler.py
|
MIT
|
def sample(self, n_tasks, with_replacement=False):
"""Sample a list of environment updates.
Args:
n_tasks (int): Number of updates to sample.
with_replacement (bool): Whether tasks can repeat when sampled.
Note that if more tasks are sampled than exist, then tasks may
repeat, but only after every environment has been included at
least once in this batch. Ignored for continuous task spaces.
Returns:
list[EnvUpdate]: Batch of sampled environment updates, which, when
invoked on environments, will configure them with new tasks.
See :py:class:`~EnvUpdate` for more information.
"""
return [
NewEnvUpdate(self._env_constructors[i]) for i in _sample_indices(
n_tasks, len(self._env_constructors), with_replacement)
]
|
Sample a list of environment updates.
Args:
n_tasks (int): Number of updates to sample.
with_replacement (bool): Whether tasks can repeat when sampled.
Note that if more tasks are sampled than exist, then tasks may
repeat, but only after every environment has been included at
least once in this batch. Ignored for continuous task spaces.
Returns:
list[EnvUpdate]: Batch of sampled environment updates, which, when
invoked on environments, will configure them with new tasks.
See :py:class:`~EnvUpdate` for more information.
|
sample
|
python
|
rlworkgroup/garage
|
src/garage/experiment/task_sampler.py
|
https://github.com/rlworkgroup/garage/blob/master/src/garage/experiment/task_sampler.py
|
MIT
|
def sample(self, n_tasks, with_replacement=False):
"""Sample a list of environment updates.
Args:
n_tasks (int): Number of updates to sample.
with_replacement (bool): Whether tasks can repeat when sampled.
Note that if more tasks are sampled than exist, then tasks may
repeat, but only after every environment has been included at
least once in this batch. Ignored for continuous task spaces.
Returns:
list[EnvUpdate]: Batch of sampled environment updates, which, when
invoked on environments, will configure them with new tasks.
See :py:class:`~EnvUpdate` for more information.
"""
return [
SetTaskUpdate(self._env_constructor, task, self._wrapper)
for task in self._env.sample_tasks(n_tasks)
]
|
Sample a list of environment updates.
Args:
n_tasks (int): Number of updates to sample.
with_replacement (bool): Whether tasks can repeat when sampled.
Note that if more tasks are sampled than exist, then tasks may
repeat, but only after every environment has been included at
least once in this batch. Ignored for continuous task spaces.
Returns:
list[EnvUpdate]: Batch of sampled environment updates, which, when
invoked on environments, will configure them with new tasks.
See :py:class:`~EnvUpdate` for more information.
|
sample
|
python
|
rlworkgroup/garage
|
src/garage/experiment/task_sampler.py
|
https://github.com/rlworkgroup/garage/blob/master/src/garage/experiment/task_sampler.py
|
MIT
|
def sample(self, n_tasks, with_replacement=False):
"""Sample a list of environment updates.
Args:
n_tasks (int): Number of updates to sample.
with_replacement (bool): Whether tasks can repeat when sampled.
Since this cannot be easily implemented for an object pool,
setting this to True results in ValueError.
Raises:
ValueError: If the number of requested tasks is larger than the
pool, or with_replacement is set.
Returns:
list[EnvUpdate]: Batch of sampled environment updates, which, when
invoked on environments, will configure them with new tasks.
See :py:class:`~EnvUpdate` for more information.
"""
if n_tasks > len(self._envs):
raise ValueError('Cannot sample more environments than are '
'present in the pool. If more tasks are needed, '
'call grow_pool to copy random existing tasks.')
if with_replacement:
raise ValueError('EnvPoolSampler cannot meaningfully sample with '
'replacement.')
envs = list(self._envs)
np.random.shuffle(envs)
return [ExistingEnvUpdate(env) for env in envs[:n_tasks]]
|
Sample a list of environment updates.
Args:
n_tasks (int): Number of updates to sample.
with_replacement (bool): Whether tasks can repeat when sampled.
Since this cannot be easily implemented for an object pool,
setting this to True results in ValueError.
Raises:
ValueError: If the number of requested tasks is larger than the
pool, or with_replacement is set.
Returns:
list[EnvUpdate]: Batch of sampled environment updates, which, when
invoked on environments, will configure them with new tasks.
See :py:class:`~EnvUpdate` for more information.
|
sample
|
python
|
rlworkgroup/garage
|
src/garage/experiment/task_sampler.py
|
https://github.com/rlworkgroup/garage/blob/master/src/garage/experiment/task_sampler.py
|
MIT
|
def grow_pool(self, new_size):
"""Increase the size of the pool by copying random tasks in it.
Note that this only copies the tasks already in the pool, and cannot
create new original tasks in any way.
Args:
new_size (int): Size the pool should be after growning.
"""
if new_size <= len(self._envs):
return
to_copy = _sample_indices(new_size - len(self._envs),
len(self._envs),
with_replacement=False)
for idx in to_copy:
self._envs.append(copy.deepcopy(self._envs[idx]))
|
Increase the size of the pool by copying random tasks in it.
Note that this only copies the tasks already in the pool, and cannot
create new original tasks in any way.
Args:
new_size (int): Size the pool should be after growning.
|
grow_pool
|
python
|
rlworkgroup/garage
|
src/garage/experiment/task_sampler.py
|
https://github.com/rlworkgroup/garage/blob/master/src/garage/experiment/task_sampler.py
|
MIT
|
def sample(self, n_tasks, with_replacement=False):
"""Sample a list of environment updates.
Note that this will always return environments in the same order, to
make parallel sampling across workers efficient. If randomizing the
environment order is required, shuffle the result of this method.
Args:
n_tasks (int): Number of updates to sample. Must be a multiple of
the number of env classes in the benchmark (e.g. 1 for MT/ML1,
10 for MT10, 50 for MT50). Tasks for each environment will be
grouped to be adjacent to each other.
with_replacement (bool): Whether tasks can repeat when sampled.
Since this cannot be easily implemented for an object pool,
setting this to True results in ValueError.
Raises:
ValueError: If the number of requested tasks is not equal to the
number of classes or the number of total tasks.
Returns:
list[EnvUpdate]: Batch of sampled environment updates, which, when
invoked on environments, will configure them with new tasks.
See :py:class:`~EnvUpdate` for more information.
"""
if n_tasks % len(self._classes) != 0:
raise ValueError('For this benchmark, n_tasks must be a multiple '
f'of {len(self._classes)}')
tasks_per_class = n_tasks // len(self._classes)
updates = []
# Avoid pickling the entire task sampler into every EnvUpdate
inner_wrapper = self._inner_wrapper
add_env_onehot = self._add_env_onehot
task_indices = self._task_indices
def wrap(env, task):
"""Wrap an environment in a metaworld benchmark.
Args:
env (gym.Env): A metaworld / gym environment.
task (metaworld.Task): A metaworld task.
Returns:
garage.Env: The wrapped environment.
"""
env = GymEnv(env, max_episode_length=env.max_path_length)
env = TaskNameWrapper(env, task_name=task.env_name)
if add_env_onehot:
env = TaskOnehotWrapper(env,
task_index=task_indices[task.env_name],
n_total_tasks=len(task_indices))
if inner_wrapper is not None:
env = inner_wrapper(env, task)
return env
for env_name, env in self._classes.items():
order_index = self._next_order_index
for _ in range(tasks_per_class):
task_index = self._task_orders[env_name][order_index]
task = self._task_map[env_name][task_index]
updates.append(SetTaskUpdate(env, task, wrap))
if with_replacement:
order_index = np.random.randint(0, MW_TASKS_PER_ENV)
else:
order_index += 1
order_index %= MW_TASKS_PER_ENV
self._next_order_index += tasks_per_class
if self._next_order_index >= MW_TASKS_PER_ENV:
self._next_order_index %= MW_TASKS_PER_ENV
self._shuffle_tasks()
return updates
|
Sample a list of environment updates.
Note that this will always return environments in the same order, to
make parallel sampling across workers efficient. If randomizing the
environment order is required, shuffle the result of this method.
Args:
n_tasks (int): Number of updates to sample. Must be a multiple of
the number of env classes in the benchmark (e.g. 1 for MT/ML1,
10 for MT10, 50 for MT50). Tasks for each environment will be
grouped to be adjacent to each other.
with_replacement (bool): Whether tasks can repeat when sampled.
Since this cannot be easily implemented for an object pool,
setting this to True results in ValueError.
Raises:
ValueError: If the number of requested tasks is not equal to the
number of classes or the number of total tasks.
Returns:
list[EnvUpdate]: Batch of sampled environment updates, which, when
invoked on environments, will configure them with new tasks.
See :py:class:`~EnvUpdate` for more information.
|
sample
|
python
|
rlworkgroup/garage
|
src/garage/experiment/task_sampler.py
|
https://github.com/rlworkgroup/garage/blob/master/src/garage/experiment/task_sampler.py
|
MIT
|
def wrap(env, task):
"""Wrap an environment in a metaworld benchmark.
Args:
env (gym.Env): A metaworld / gym environment.
task (metaworld.Task): A metaworld task.
Returns:
garage.Env: The wrapped environment.
"""
env = GymEnv(env, max_episode_length=env.max_path_length)
env = TaskNameWrapper(env, task_name=task.env_name)
if add_env_onehot:
env = TaskOnehotWrapper(env,
task_index=task_indices[task.env_name],
n_total_tasks=len(task_indices))
if inner_wrapper is not None:
env = inner_wrapper(env, task)
return env
|
Wrap an environment in a metaworld benchmark.
Args:
env (gym.Env): A metaworld / gym environment.
task (metaworld.Task): A metaworld task.
Returns:
garage.Env: The wrapped environment.
|
wrap
|
python
|
rlworkgroup/garage
|
src/garage/experiment/task_sampler.py
|
https://github.com/rlworkgroup/garage/blob/master/src/garage/experiment/task_sampler.py
|
MIT
|
def explained_variance_1d(ypred, y, valids=None):
"""Explained variation for 1D inputs.
It is the proportion of the variance in one variable that is explained or
predicted from another variable.
Args:
ypred (np.ndarray): Sample data from the first variable.
Shape: :math:`(N, max_episode_length)`.
y (np.ndarray): Sample data from the second variable.
Shape: :math:`(N, max_episode_length)`.
valids (np.ndarray): Optional argument. Array indicating valid indices.
If None, it assumes the entire input array are valid.
Shape: :math:`(N, max_episode_length)`.
Returns:
float: The explained variance.
"""
if valids is not None:
ypred = ypred[valids.astype(np.bool)]
y = y[valids.astype(np.bool)]
assert y.ndim == 1 and ypred.ndim == 1
vary = np.var(y)
if np.isclose(vary, 0):
if np.var(ypred) > 0:
return 0
return 1
return 1 - np.var(y - ypred) / (vary + 1e-8)
|
Explained variation for 1D inputs.
It is the proportion of the variance in one variable that is explained or
predicted from another variable.
Args:
ypred (np.ndarray): Sample data from the first variable.
Shape: :math:`(N, max_episode_length)`.
y (np.ndarray): Sample data from the second variable.
Shape: :math:`(N, max_episode_length)`.
valids (np.ndarray): Optional argument. Array indicating valid indices.
If None, it assumes the entire input array are valid.
Shape: :math:`(N, max_episode_length)`.
Returns:
float: The explained variance.
|
explained_variance_1d
|
python
|
rlworkgroup/garage
|
src/garage/np/_functions.py
|
https://github.com/rlworkgroup/garage/blob/master/src/garage/np/_functions.py
|
MIT
|
def rrse(actual, predicted):
"""Root Relative Squared Error.
Args:
actual (np.ndarray): The actual value.
predicted (np.ndarray): The predicted value.
Returns:
float: The root relative square error between the actual and the
predicted value.
"""
return np.sqrt(
np.sum(np.square(actual - predicted)) /
np.sum(np.square(actual - np.mean(actual))))
|
Root Relative Squared Error.
Args:
actual (np.ndarray): The actual value.
predicted (np.ndarray): The predicted value.
Returns:
float: The root relative square error between the actual and the
predicted value.
|
rrse
|
python
|
rlworkgroup/garage
|
src/garage/np/_functions.py
|
https://github.com/rlworkgroup/garage/blob/master/src/garage/np/_functions.py
|
MIT
|
def sliding_window(t, window, smear=False):
"""Create a sliding window over a tensor.
Args:
t (np.ndarray): A tensor to create sliding window from,
with shape :math:`(N, D)`, where N is the length of a trajectory,
D is the dimension of each step in trajectory.
window (int): Window size, mush be less than N.
smear (bool): If true, copy the last window so that N windows are
generated.
Returns:
np.ndarray: All windows generate over t, with shape :math:`(M, W, D)`,
where W is the window size. If smear if False, M is :math:`N-W+1`,
otherwise M is N.
Raises:
NotImplementedError: If step_size is not 1.
ValueError: If window size is larger than the input tensor.
"""
if window > t.shape[0]:
raise ValueError('`window` must be <= `t.shape[0]`')
if window == t.shape[0]:
return np.stack([t] * window)
# The stride trick works only on the last dimension of an ndarray, so we
# operate on the transpose, which reverses the dimensions of t.
t_T = t.T
shape = t_T.shape[:-1] + (t_T.shape[-1] - window, window)
strides = t_T.strides + (t_T.strides[-1], )
t_T_win = np.lib.stride_tricks.as_strided(t_T,
shape=shape,
strides=strides)
# t_T_win has shape (d_k, d_k-1, ..., (n - window_size), window_size)
# To arrive at the final shape, we first transpose the result to arrive at
# (window_size, (n - window_size), d_1, ..., d_k), then swap the firs two
# axes
t_win = np.swapaxes(t_T_win.T, 0, 1)
# Optionally smear the last element to preserve the first dimension
if smear:
t_win = pad_tensor(t_win, t.shape[0], mode='last')
return t_win
|
Create a sliding window over a tensor.
Args:
t (np.ndarray): A tensor to create sliding window from,
with shape :math:`(N, D)`, where N is the length of a trajectory,
D is the dimension of each step in trajectory.
window (int): Window size, mush be less than N.
smear (bool): If true, copy the last window so that N windows are
generated.
Returns:
np.ndarray: All windows generate over t, with shape :math:`(M, W, D)`,
where W is the window size. If smear if False, M is :math:`N-W+1`,
otherwise M is N.
Raises:
NotImplementedError: If step_size is not 1.
ValueError: If window size is larger than the input tensor.
|
sliding_window
|
python
|
rlworkgroup/garage
|
src/garage/np/_functions.py
|
https://github.com/rlworkgroup/garage/blob/master/src/garage/np/_functions.py
|
MIT
|
def flatten_tensors(tensors):
"""Flatten a list of tensors.
Args:
tensors (list[numpy.ndarray]): List of tensors to be flattened.
Returns:
numpy.ndarray: Flattened tensors.
Example:
.. testsetup::
from garage.np import flatten_tensors
>>> flatten_tensors([np.ndarray([1]), np.ndarray([1])])
array(...)
"""
if tensors:
return np.concatenate([np.reshape(x, [-1]) for x in tensors])
return np.asarray([])
|
Flatten a list of tensors.
Args:
tensors (list[numpy.ndarray]): List of tensors to be flattened.
Returns:
numpy.ndarray: Flattened tensors.
Example:
.. testsetup::
from garage.np import flatten_tensors
>>> flatten_tensors([np.ndarray([1]), np.ndarray([1])])
array(...)
|
flatten_tensors
|
python
|
rlworkgroup/garage
|
src/garage/np/_functions.py
|
https://github.com/rlworkgroup/garage/blob/master/src/garage/np/_functions.py
|
MIT
|
def unflatten_tensors(flattened, tensor_shapes):
"""Unflatten a flattened tensors into a list of tensors.
Args:
flattened (numpy.ndarray): Flattened tensors.
tensor_shapes (tuple): Tensor shapes.
Returns:
list[numpy.ndarray]: Unflattened list of tensors.
"""
tensor_sizes = list(map(np.prod, tensor_shapes))
indices = np.cumsum(tensor_sizes)[:-1]
return [
np.reshape(pair[0], pair[1])
for pair in zip(np.split(flattened, indices), tensor_shapes)
]
|
Unflatten a flattened tensors into a list of tensors.
Args:
flattened (numpy.ndarray): Flattened tensors.
tensor_shapes (tuple): Tensor shapes.
Returns:
list[numpy.ndarray]: Unflattened list of tensors.
|
unflatten_tensors
|
python
|
rlworkgroup/garage
|
src/garage/np/_functions.py
|
https://github.com/rlworkgroup/garage/blob/master/src/garage/np/_functions.py
|
MIT
|
def pad_tensor(x, max_len, mode='zero'):
"""Pad tensors.
Args:
x (numpy.ndarray): Tensors to be padded.
max_len (int): Maximum length.
mode (str): If 'last', pad with the last element, otherwise pad with 0.
Returns:
numpy.ndarray: Padded tensor.
"""
padding = np.zeros_like(x[0])
if mode == 'last':
padding = x[-1]
return np.concatenate(
[x, np.tile(padding, (max_len - len(x), ) + (1, ) * np.ndim(x[0]))])
|
Pad tensors.
Args:
x (numpy.ndarray): Tensors to be padded.
max_len (int): Maximum length.
mode (str): If 'last', pad with the last element, otherwise pad with 0.
Returns:
numpy.ndarray: Padded tensor.
|
pad_tensor
|
python
|
rlworkgroup/garage
|
src/garage/np/_functions.py
|
https://github.com/rlworkgroup/garage/blob/master/src/garage/np/_functions.py
|
MIT
|
def pad_tensor_n(xs, max_len):
"""Pad array of tensors.
Args:
xs (numpy.ndarray): Tensors to be padded.
max_len (int): Maximum length.
Returns:
numpy.ndarray: Padded tensor.
"""
ret = np.zeros((len(xs), max_len) + xs[0].shape[1:], dtype=xs[0].dtype)
for idx, x in enumerate(xs):
ret[idx][:len(x)] = x
return ret
|
Pad array of tensors.
Args:
xs (numpy.ndarray): Tensors to be padded.
max_len (int): Maximum length.
Returns:
numpy.ndarray: Padded tensor.
|
pad_tensor_n
|
python
|
rlworkgroup/garage
|
src/garage/np/_functions.py
|
https://github.com/rlworkgroup/garage/blob/master/src/garage/np/_functions.py
|
MIT
|
def pad_tensor_dict(tensor_dict, max_len, mode='zero'):
"""Pad dictionary of tensors.
Args:
tensor_dict (dict[numpy.ndarray]): Tensors to be padded.
max_len (int): Maximum length.
mode (str): If 'last', pad with the last element, otherwise pad with 0.
Returns:
dict[numpy.ndarray]: Padded tensor.
"""
keys = list(tensor_dict.keys())
ret = dict()
for k in keys:
if isinstance(tensor_dict[k], dict):
ret[k] = pad_tensor_dict(tensor_dict[k], max_len, mode=mode)
else:
ret[k] = pad_tensor(tensor_dict[k], max_len, mode=mode)
return ret
|
Pad dictionary of tensors.
Args:
tensor_dict (dict[numpy.ndarray]): Tensors to be padded.
max_len (int): Maximum length.
mode (str): If 'last', pad with the last element, otherwise pad with 0.
Returns:
dict[numpy.ndarray]: Padded tensor.
|
pad_tensor_dict
|
python
|
rlworkgroup/garage
|
src/garage/np/_functions.py
|
https://github.com/rlworkgroup/garage/blob/master/src/garage/np/_functions.py
|
MIT
|
def stack_tensor_dict_list(tensor_dict_list):
"""Stack a list of dictionaries of {tensors or dictionary of tensors}.
Args:
tensor_dict_list (dict[list]): a list of dictionaries of {tensors or
dictionary of tensors}.
Return:
dict: a dictionary of {stacked tensors or dictionary of
stacked tensors}
"""
keys = list(tensor_dict_list[0].keys())
ret = dict()
for k in keys:
example = tensor_dict_list[0][k]
dict_list = [x[k] if k in x else [] for x in tensor_dict_list]
if isinstance(example, dict):
v = stack_tensor_dict_list(dict_list)
else:
v = np.array(dict_list)
ret[k] = v
return ret
|
Stack a list of dictionaries of {tensors or dictionary of tensors}.
Args:
tensor_dict_list (dict[list]): a list of dictionaries of {tensors or
dictionary of tensors}.
Return:
dict: a dictionary of {stacked tensors or dictionary of
stacked tensors}
|
stack_tensor_dict_list
|
python
|
rlworkgroup/garage
|
src/garage/np/_functions.py
|
https://github.com/rlworkgroup/garage/blob/master/src/garage/np/_functions.py
|
MIT
|
def stack_and_pad_tensor_dict_list(tensor_dict_list, max_len):
"""Stack and pad array of list of tensors.
Input paths are a list of N dicts, each with values of shape
:math:`(D, S^*)`. This function stack and pad the values with the input
key with max_len, so output will be shape :math:`(N, D, S^*)`.
Args:
tensor_dict_list (list[dict]): List of dict to be stacked and padded.
Value of each dict will be shape of :math:`(D, S^*)`.
max_len (int): Maximum length for padding.
Returns:
dict: a dictionary of {stacked tensors or dictionary of
stacked tensors}. Shape: :math:`(N, D, S^*)`
where N is the len of input paths.
"""
keys = list(tensor_dict_list[0].keys())
ret = dict()
for k in keys:
example = tensor_dict_list[0][k]
dict_list = [x[k] if k in x else [] for x in tensor_dict_list]
if isinstance(example, dict):
v = stack_and_pad_tensor_dict_list(dict_list, max_len)
else:
v = pad_tensor_n(np.array(dict_list), max_len)
ret[k] = v
return ret
|
Stack and pad array of list of tensors.
Input paths are a list of N dicts, each with values of shape
:math:`(D, S^*)`. This function stack and pad the values with the input
key with max_len, so output will be shape :math:`(N, D, S^*)`.
Args:
tensor_dict_list (list[dict]): List of dict to be stacked and padded.
Value of each dict will be shape of :math:`(D, S^*)`.
max_len (int): Maximum length for padding.
Returns:
dict: a dictionary of {stacked tensors or dictionary of
stacked tensors}. Shape: :math:`(N, D, S^*)`
where N is the len of input paths.
|
stack_and_pad_tensor_dict_list
|
python
|
rlworkgroup/garage
|
src/garage/np/_functions.py
|
https://github.com/rlworkgroup/garage/blob/master/src/garage/np/_functions.py
|
MIT
|
def concat_tensor_dict_list(tensor_dict_list):
"""Concatenate dictionary of list of tensor.
Args:
tensor_dict_list (dict[list]): a list of dictionaries of {tensors or
dictionary of tensors}.
Return:
dict: a dictionary of {stacked tensors or dictionary of
stacked tensors}
"""
keys = list(tensor_dict_list[0].keys())
ret = dict()
for k in keys:
example = tensor_dict_list[0][k]
dict_list = [x[k] if k in x else [] for x in tensor_dict_list]
if isinstance(example, dict):
v = concat_tensor_dict_list(dict_list)
else:
v = np.concatenate(dict_list, axis=0)
ret[k] = v
return ret
|
Concatenate dictionary of list of tensor.
Args:
tensor_dict_list (dict[list]): a list of dictionaries of {tensors or
dictionary of tensors}.
Return:
dict: a dictionary of {stacked tensors or dictionary of
stacked tensors}
|
concat_tensor_dict_list
|
python
|
rlworkgroup/garage
|
src/garage/np/_functions.py
|
https://github.com/rlworkgroup/garage/blob/master/src/garage/np/_functions.py
|
MIT
|
def truncate_tensor_dict(tensor_dict, truncated_len):
"""Truncate dictionary of list of tensor.
Args:
tensor_dict (dict[numpy.ndarray]): a dictionary of {tensors or
dictionary of tensors}.
truncated_len (int): Length to truncate.
Return:
dict: a dictionary of {stacked tensors or dictionary of
stacked tensors}
"""
ret = dict()
for k, v in tensor_dict.items():
if isinstance(v, dict):
ret[k] = truncate_tensor_dict(v, truncated_len)
else:
ret[k] = v[:truncated_len]
return ret
|
Truncate dictionary of list of tensor.
Args:
tensor_dict (dict[numpy.ndarray]): a dictionary of {tensors or
dictionary of tensors}.
truncated_len (int): Length to truncate.
Return:
dict: a dictionary of {stacked tensors or dictionary of
stacked tensors}
|
truncate_tensor_dict
|
python
|
rlworkgroup/garage
|
src/garage/np/_functions.py
|
https://github.com/rlworkgroup/garage/blob/master/src/garage/np/_functions.py
|
MIT
|
def slice_nested_dict(dict_or_array, start, stop):
"""Slice a dictionary containing arrays (or dictionaries).
This function is primarily intended for un-batching env_infos and
action_infos.
Args:
dict_or_array (dict[str, dict or np.ndarray] or np.ndarray): A nested
dictionary should only contain dictionaries and numpy arrays
(recursively).
start (int): First index to be included in the slice.
stop (int): First index to be excluded from the slice. In other words,
these are typical python slice indices.
Returns:
dict or np.ndarray: The input, but sliced.
"""
if isinstance(dict_or_array, dict):
return {
k: slice_nested_dict(v, start, stop)
for (k, v) in dict_or_array.items()
}
else:
# It *should* be a numpy array (unless someone ignored the type
# signature).
return dict_or_array[start:stop]
|
Slice a dictionary containing arrays (or dictionaries).
This function is primarily intended for un-batching env_infos and
action_infos.
Args:
dict_or_array (dict[str, dict or np.ndarray] or np.ndarray): A nested
dictionary should only contain dictionaries and numpy arrays
(recursively).
start (int): First index to be included in the slice.
stop (int): First index to be excluded from the slice. In other words,
these are typical python slice indices.
Returns:
dict or np.ndarray: The input, but sliced.
|
slice_nested_dict
|
python
|
rlworkgroup/garage
|
src/garage/np/_functions.py
|
https://github.com/rlworkgroup/garage/blob/master/src/garage/np/_functions.py
|
MIT
|
def pad_batch_array(array, lengths, max_length=None):
r"""Convert a packed into a padded array with one more dimension.
Args:
array (np.ndarray): Array of length :math:`(N \bullet [T], X^*)`
lengths (list[int]): List of length :math:`N` containing the length
of each episode in the batch array.
max_length (int): Defaults to max(lengths) if not provided.
Returns:
np.ndarray: Of shape :math:`(N, max_length, X^*)`
"""
assert array.shape[0] == sum(lengths)
if max_length is None:
max_length = max(lengths)
elif max_length < max(lengths):
# We have at least one episode longther than max_length (whtich is
# usually max_episode_length).
# This is probably not a good idea to allow, but RL2 already uses it.
warnings.warn('Creating a padded array with longer length than '
'requested')
max_length = max(lengths)
padded = np.zeros((len(lengths), max_length) + array.shape[1:],
dtype=array.dtype)
start = 0
for i, length in enumerate(lengths):
stop = start + length
padded[i][0:length] = array[start:stop]
start = stop
return padded
|
Convert a packed into a padded array with one more dimension.
Args:
array (np.ndarray): Array of length :math:`(N \bullet [T], X^*)`
lengths (list[int]): List of length :math:`N` containing the length
of each episode in the batch array.
max_length (int): Defaults to max(lengths) if not provided.
Returns:
np.ndarray: Of shape :math:`(N, max_length, X^*)`
|
pad_batch_array
|
python
|
rlworkgroup/garage
|
src/garage/np/_functions.py
|
https://github.com/rlworkgroup/garage/blob/master/src/garage/np/_functions.py
|
MIT
|
def _sample_params(self, epoch):
"""Return sample parameters.
Args:
epoch (int): Epoch number.
Returns:
np.ndarray: A numpy array of parameter values.
"""
extra_var_mult = max(1.0 - epoch / self._extra_decay_time, 0)
sample_std = np.sqrt(
np.square(self._cur_std) +
np.square(self._extra_std) * extra_var_mult)
return np.random.standard_normal(
self._n_params) * sample_std + self._cur_mean
|
Return sample parameters.
Args:
epoch (int): Epoch number.
Returns:
np.ndarray: A numpy array of parameter values.
|
_sample_params
|
python
|
rlworkgroup/garage
|
src/garage/np/algos/cem.py
|
https://github.com/rlworkgroup/garage/blob/master/src/garage/np/algos/cem.py
|
MIT
|
def train(self, trainer):
"""Initialize variables and start training.
Args:
trainer (Trainer): Experiment trainer, which provides services
such as snapshotting and sampler control.
Returns:
float: The average return in last epoch cycle.
"""
# epoch-wise
self._cur_std = self._init_std
self._cur_mean = self.policy.get_param_values()
# epoch-cycle-wise
self._cur_params = self._cur_mean
self._all_returns = []
self._all_params = [self._cur_mean.copy()]
# constant
self._n_best = int(self._n_samples * self._best_frac)
assert self._n_best >= 1, (
'n_samples is too low. Make sure that n_samples * best_frac >= 1')
self._n_params = len(self._cur_mean)
# start actual training
last_return = None
for _ in trainer.step_epochs():
for _ in range(self._n_samples):
trainer.step_episode = trainer.obtain_episodes(
trainer.step_itr)
last_return = self._train_once(trainer.step_itr,
trainer.step_episode)
trainer.step_itr += 1
return last_return
|
Initialize variables and start training.
Args:
trainer (Trainer): Experiment trainer, which provides services
such as snapshotting and sampler control.
Returns:
float: The average return in last epoch cycle.
|
train
|
python
|
rlworkgroup/garage
|
src/garage/np/algos/cem.py
|
https://github.com/rlworkgroup/garage/blob/master/src/garage/np/algos/cem.py
|
MIT
|
def _train_once(self, itr, episodes):
"""Perform one step of policy optimization given one batch of samples.
Args:
itr (int): Iteration number.
episodes (garage.EpisodeBatch): Episodes collected using the
current policy.
Returns:
float: The average return of epoch cycle.
"""
# -- Stage: Run and calculate performance of the algorithm
undiscounted_returns = log_performance(itr,
episodes,
discount=self._discount)
self._episode_reward_mean.extend(undiscounted_returns)
tabular.record('Extras/EpisodeRewardMean',
np.mean(self._episode_reward_mean))
average_return = np.mean(undiscounted_returns)
epoch = itr // self._n_samples
i_sample = itr - epoch * self._n_samples
tabular.record('Epoch', epoch)
tabular.record('# Sample', i_sample)
rtn = average_return
self._all_returns.append(average_return)
# -- Stage: Update policy distribution.
if (itr + 1) % self._n_samples == 0:
avg_rtns = np.array(self._all_returns)
best_inds = np.argsort(-avg_rtns)[:self._n_best]
best_params = np.array(self._all_params)[best_inds]
# MLE of normal distribution
self._cur_mean = best_params.mean(axis=0)
self._cur_std = best_params.std(axis=0)
self.policy.set_param_values(self._cur_mean)
# Clear for next epoch
rtn = max(self._all_returns)
self._all_returns.clear()
self._all_params.clear()
# -- Stage: Generate a new policy for next path sampling
self._cur_params = self._sample_params(itr)
self._all_params.append(self._cur_params.copy())
self.policy.set_param_values(self._cur_params)
logger.log(tabular)
return rtn
|
Perform one step of policy optimization given one batch of samples.
Args:
itr (int): Iteration number.
episodes (garage.EpisodeBatch): Episodes collected using the
current policy.
Returns:
float: The average return of epoch cycle.
|
_train_once
|
python
|
rlworkgroup/garage
|
src/garage/np/algos/cem.py
|
https://github.com/rlworkgroup/garage/blob/master/src/garage/np/algos/cem.py
|
MIT
|
def train(self, trainer):
"""Initialize variables and start training.
Args:
trainer (Trainer): Trainer is passed to give algorithm
the access to trainer.step_epochs(), which provides services
such as snapshotting and sampler control.
Returns:
float: The average return in last epoch cycle.
"""
init_mean = self.policy.get_param_values()
self._es = cma.CMAEvolutionStrategy(init_mean, self._sigma0,
{'popsize': self._n_samples})
self._all_params = self._sample_params()
self._cur_params = self._all_params[0]
self.policy.set_param_values(self._cur_params)
self._all_returns = []
# start actual training
last_return = None
for _ in trainer.step_epochs():
for _ in range(self._n_samples):
trainer.step_episode = trainer.obtain_episodes(
trainer.step_itr)
last_return = self._train_once(trainer.step_itr,
trainer.step_episode)
trainer.step_itr += 1
return last_return
|
Initialize variables and start training.
Args:
trainer (Trainer): Trainer is passed to give algorithm
the access to trainer.step_epochs(), which provides services
such as snapshotting and sampler control.
Returns:
float: The average return in last epoch cycle.
|
train
|
python
|
rlworkgroup/garage
|
src/garage/np/algos/cma_es.py
|
https://github.com/rlworkgroup/garage/blob/master/src/garage/np/algos/cma_es.py
|
MIT
|
def _train_once(self, itr, episodes):
"""Perform one step of policy optimization given one batch of samples.
Args:
itr (int): Iteration number.
episodes (garage.EpisodeBatch): Episodes collected using the
current policy.
Returns:
float: The average return of epoch cycle.
"""
# -- Stage: Run and calculate performance of the algorithm
undiscounted_returns = log_performance(itr,
episodes,
discount=self._discount)
self._episode_reward_mean.extend(undiscounted_returns)
tabular.record('Extras/EpisodeRewardMean',
np.mean(self._episode_reward_mean))
average_return = np.mean(undiscounted_returns)
epoch = itr // self._n_samples
i_sample = itr - epoch * self._n_samples
tabular.record('Epoch', epoch)
tabular.record('# Sample', i_sample)
rtn = average_return
self._all_returns.append(average_return)
if (itr + 1) % self._n_samples == 0:
avg_rtns = np.array(self._all_returns)
self._es.tell(self._all_params, -avg_rtns)
self.policy.set_param_values(self._es.best.get()[0])
# Clear for next epoch
rtn = max(self._all_returns)
self._all_returns.clear()
self._all_params = self._sample_params()
self._cur_params = self._all_params[(i_sample + 1) % self._n_samples]
self.policy.set_param_values(self._cur_params)
logger.log(tabular)
return rtn
|
Perform one step of policy optimization given one batch of samples.
Args:
itr (int): Iteration number.
episodes (garage.EpisodeBatch): Episodes collected using the
current policy.
Returns:
float: The average return of epoch cycle.
|
_train_once
|
python
|
rlworkgroup/garage
|
src/garage/np/algos/cma_es.py
|
https://github.com/rlworkgroup/garage/blob/master/src/garage/np/algos/cma_es.py
|
MIT
|
def get_exploration_policy(self):
"""Return a policy used before adaptation to a specific task.
Each time it is retrieved, this policy should only be evaluated in one
task.
Returns:
Policy: The policy used to obtain samples, which are later used for
meta-RL adaptation.
"""
|
Return a policy used before adaptation to a specific task.
Each time it is retrieved, this policy should only be evaluated in one
task.
Returns:
Policy: The policy used to obtain samples, which are later used for
meta-RL adaptation.
|
get_exploration_policy
|
python
|
rlworkgroup/garage
|
src/garage/np/algos/meta_rl_algorithm.py
|
https://github.com/rlworkgroup/garage/blob/master/src/garage/np/algos/meta_rl_algorithm.py
|
MIT
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.