repo
stringlengths
2
99
file
stringlengths
13
225
code
stringlengths
0
18.3M
file_length
int64
0
18.3M
avg_line_length
float64
0
1.36M
max_line_length
int64
0
4.26M
extension_type
stringclasses
1 value
baselines
baselines-master/baselines/deepq/experiments/train_pong.py
from baselines import deepq from baselines import bench from baselines import logger from baselines.common.atari_wrappers import make_atari def main(): logger.configure() env = make_atari('PongNoFrameskip-v4') env = bench.Monitor(env, logger.get_dir()) env = deepq.wrap_atari_dqn(env) model = deepq.learn( env, "conv_only", convs=[(32, 8, 4), (64, 4, 2), (64, 3, 1)], hiddens=[256], dueling=True, lr=1e-4, total_timesteps=int(1e7), buffer_size=10000, exploration_fraction=0.1, exploration_final_eps=0.01, train_freq=4, learning_starts=10000, target_network_update_freq=1000, gamma=0.99, ) model.save('pong_model.pkl') env.close() if __name__ == '__main__': main()
817
22.371429
54
py
baselines
baselines-master/baselines/deepq/experiments/enjoy_cartpole.py
import gym from baselines import deepq def main(): env = gym.make("CartPole-v0") act = deepq.learn(env, network='mlp', total_timesteps=0, load_path="cartpole_model.pkl") while True: obs, done = env.reset(), False episode_rew = 0 while not done: env.render() obs, rew, done, _ = env.step(act(obs[None])[0]) episode_rew += rew print("Episode reward", episode_rew) if __name__ == '__main__': main()
486
21.136364
92
py
baselines
baselines-master/baselines/deepq/experiments/__init__.py
0
0
0
py
baselines
baselines-master/baselines/deepq/experiments/custom_cartpole.py
import gym import itertools import numpy as np import tensorflow as tf import tensorflow.contrib.layers as layers import baselines.common.tf_util as U from baselines import logger from baselines import deepq from baselines.deepq.replay_buffer import ReplayBuffer from baselines.deepq.utils import ObservationInput from baselines.common.schedules import LinearSchedule def model(inpt, num_actions, scope, reuse=False): """This model takes as input an observation and returns values of all actions.""" with tf.variable_scope(scope, reuse=reuse): out = inpt out = layers.fully_connected(out, num_outputs=64, activation_fn=tf.nn.tanh) out = layers.fully_connected(out, num_outputs=num_actions, activation_fn=None) return out if __name__ == '__main__': with U.make_session(num_cpu=8): # Create the environment env = gym.make("CartPole-v0") # Create all the functions necessary to train the model act, train, update_target, debug = deepq.build_train( make_obs_ph=lambda name: ObservationInput(env.observation_space, name=name), q_func=model, num_actions=env.action_space.n, optimizer=tf.train.AdamOptimizer(learning_rate=5e-4), ) # Create the replay buffer replay_buffer = ReplayBuffer(50000) # Create the schedule for exploration starting from 1 (every action is random) down to # 0.02 (98% of actions are selected according to values predicted by the model). exploration = LinearSchedule(schedule_timesteps=10000, initial_p=1.0, final_p=0.02) # Initialize the parameters and copy them to the target network. U.initialize() update_target() episode_rewards = [0.0] obs = env.reset() for t in itertools.count(): # Take action and update exploration to the newest value action = act(obs[None], update_eps=exploration.value(t))[0] new_obs, rew, done, _ = env.step(action) # Store transition in the replay buffer. replay_buffer.add(obs, action, rew, new_obs, float(done)) obs = new_obs episode_rewards[-1] += rew if done: obs = env.reset() episode_rewards.append(0) is_solved = t > 100 and np.mean(episode_rewards[-101:-1]) >= 200 if is_solved: # Show off the result env.render() else: # Minimize the error in Bellman's equation on a batch sampled from replay buffer. if t > 1000: obses_t, actions, rewards, obses_tp1, dones = replay_buffer.sample(32) train(obses_t, actions, rewards, obses_tp1, dones, np.ones_like(rewards)) # Update target network periodically. if t % 1000 == 0: update_target() if done and len(episode_rewards) % 10 == 0: logger.record_tabular("steps", t) logger.record_tabular("episodes", len(episode_rewards)) logger.record_tabular("mean episode reward", round(np.mean(episode_rewards[-101:-1]), 1)) logger.record_tabular("% time spent exploring", int(100 * exploration.value(t))) logger.dump_tabular()
3,358
40.9875
105
py
baselines
baselines-master/baselines/trpo_mpi/defaults.py
from baselines.common.models import mlp, cnn_small def atari(): return dict( network = cnn_small(), timesteps_per_batch=512, max_kl=0.001, cg_iters=10, cg_damping=1e-3, gamma=0.98, lam=1.0, vf_iters=3, vf_stepsize=1e-4, entcoeff=0.00, ) def mujoco(): return dict( network = mlp(num_hidden=32, num_layers=2), timesteps_per_batch=1024, max_kl=0.01, cg_iters=10, cg_damping=0.1, gamma=0.99, lam=0.98, vf_iters=5, vf_stepsize=1e-3, normalize_observations=True, )
638
19.612903
51
py
baselines
baselines-master/baselines/trpo_mpi/__init__.py
0
0
0
py
baselines
baselines-master/baselines/trpo_mpi/trpo_mpi.py
from baselines.common import explained_variance, zipsame, dataset from baselines import logger import baselines.common.tf_util as U import tensorflow as tf, numpy as np import time from baselines.common import colorize from collections import deque from baselines.common import set_global_seeds from baselines.common.mpi_adam import MpiAdam from baselines.common.cg import cg from baselines.common.input import observation_placeholder from baselines.common.policies import build_policy from contextlib import contextmanager try: from mpi4py import MPI except ImportError: MPI = None def traj_segment_generator(pi, env, horizon, stochastic): # Initialize state variables t = 0 ac = env.action_space.sample() new = True rew = 0.0 ob = env.reset() cur_ep_ret = 0 cur_ep_len = 0 ep_rets = [] ep_lens = [] # Initialize history arrays obs = np.array([ob for _ in range(horizon)]) rews = np.zeros(horizon, 'float32') vpreds = np.zeros(horizon, 'float32') news = np.zeros(horizon, 'int32') acs = np.array([ac for _ in range(horizon)]) prevacs = acs.copy() while True: prevac = ac ac, vpred, _, _ = pi.step(ob, stochastic=stochastic) # Slight weirdness here because we need value function at time T # before returning segment [0, T-1] so we get the correct # terminal value if t > 0 and t % horizon == 0: yield {"ob" : obs, "rew" : rews, "vpred" : vpreds, "new" : news, "ac" : acs, "prevac" : prevacs, "nextvpred": vpred * (1 - new), "ep_rets" : ep_rets, "ep_lens" : ep_lens} _, vpred, _, _ = pi.step(ob, stochastic=stochastic) # Be careful!!! if you change the downstream algorithm to aggregate # several of these batches, then be sure to do a deepcopy ep_rets = [] ep_lens = [] i = t % horizon obs[i] = ob vpreds[i] = vpred news[i] = new acs[i] = ac prevacs[i] = prevac ob, rew, new, _ = env.step(ac) rews[i] = rew cur_ep_ret += rew cur_ep_len += 1 if new: ep_rets.append(cur_ep_ret) ep_lens.append(cur_ep_len) cur_ep_ret = 0 cur_ep_len = 0 ob = env.reset() t += 1 def add_vtarg_and_adv(seg, gamma, lam): new = np.append(seg["new"], 0) # last element is only used for last vtarg, but we already zeroed it if last new = 1 vpred = np.append(seg["vpred"], seg["nextvpred"]) T = len(seg["rew"]) seg["adv"] = gaelam = np.empty(T, 'float32') rew = seg["rew"] lastgaelam = 0 for t in reversed(range(T)): nonterminal = 1-new[t+1] delta = rew[t] + gamma * vpred[t+1] * nonterminal - vpred[t] gaelam[t] = lastgaelam = delta + gamma * lam * nonterminal * lastgaelam seg["tdlamret"] = seg["adv"] + seg["vpred"] def learn(*, network, env, total_timesteps, timesteps_per_batch=1024, # what to train on max_kl=0.001, cg_iters=10, gamma=0.99, lam=1.0, # advantage estimation seed=None, ent_coef=0.0, cg_damping=1e-2, vf_stepsize=3e-4, vf_iters =3, max_episodes=0, max_iters=0, # time constraint callback=None, load_path=None, **network_kwargs ): ''' learn a policy function with TRPO algorithm Parameters: ---------- network neural network to learn. Can be either string ('mlp', 'cnn', 'lstm', 'lnlstm' for basic types) or function that takes input placeholder and returns tuple (output, None) for feedforward nets or (output, (state_placeholder, state_output, mask_placeholder)) for recurrent nets env environment (one of the gym environments or wrapped via baselines.common.vec_env.VecEnv-type class timesteps_per_batch timesteps per gradient estimation batch max_kl max KL divergence between old policy and new policy ( KL(pi_old || pi) ) ent_coef coefficient of policy entropy term in the optimization objective cg_iters number of iterations of conjugate gradient algorithm cg_damping conjugate gradient damping vf_stepsize learning rate for adam optimizer used to optimie value function loss vf_iters number of iterations of value function optimization iterations per each policy optimization step total_timesteps max number of timesteps max_episodes max number of episodes max_iters maximum number of policy optimization iterations callback function to be called with (locals(), globals()) each policy optimization step load_path str, path to load the model from (default: None, i.e. no model is loaded) **network_kwargs keyword arguments to the policy / network builder. See baselines.common/policies.py/build_policy and arguments to a particular type of network Returns: ------- learnt model ''' if MPI is not None: nworkers = MPI.COMM_WORLD.Get_size() rank = MPI.COMM_WORLD.Get_rank() else: nworkers = 1 rank = 0 cpus_per_worker = 1 U.get_session(config=tf.ConfigProto( allow_soft_placement=True, inter_op_parallelism_threads=cpus_per_worker, intra_op_parallelism_threads=cpus_per_worker )) policy = build_policy(env, network, value_network='copy', **network_kwargs) set_global_seeds(seed) np.set_printoptions(precision=3) # Setup losses and stuff # ---------------------------------------- ob_space = env.observation_space ac_space = env.action_space ob = observation_placeholder(ob_space) with tf.variable_scope("pi"): pi = policy(observ_placeholder=ob) with tf.variable_scope("oldpi"): oldpi = policy(observ_placeholder=ob) atarg = tf.placeholder(dtype=tf.float32, shape=[None]) # Target advantage function (if applicable) ret = tf.placeholder(dtype=tf.float32, shape=[None]) # Empirical return ac = pi.pdtype.sample_placeholder([None]) kloldnew = oldpi.pd.kl(pi.pd) ent = pi.pd.entropy() meankl = tf.reduce_mean(kloldnew) meanent = tf.reduce_mean(ent) entbonus = ent_coef * meanent vferr = tf.reduce_mean(tf.square(pi.vf - ret)) ratio = tf.exp(pi.pd.logp(ac) - oldpi.pd.logp(ac)) # advantage * pnew / pold surrgain = tf.reduce_mean(ratio * atarg) optimgain = surrgain + entbonus losses = [optimgain, meankl, entbonus, surrgain, meanent] loss_names = ["optimgain", "meankl", "entloss", "surrgain", "entropy"] dist = meankl all_var_list = get_trainable_variables("pi") # var_list = [v for v in all_var_list if v.name.split("/")[1].startswith("pol")] # vf_var_list = [v for v in all_var_list if v.name.split("/")[1].startswith("vf")] var_list = get_pi_trainable_variables("pi") vf_var_list = get_vf_trainable_variables("pi") vfadam = MpiAdam(vf_var_list) get_flat = U.GetFlat(var_list) set_from_flat = U.SetFromFlat(var_list) klgrads = tf.gradients(dist, var_list) flat_tangent = tf.placeholder(dtype=tf.float32, shape=[None], name="flat_tan") shapes = [var.get_shape().as_list() for var in var_list] start = 0 tangents = [] for shape in shapes: sz = U.intprod(shape) tangents.append(tf.reshape(flat_tangent[start:start+sz], shape)) start += sz gvp = tf.add_n([tf.reduce_sum(g*tangent) for (g, tangent) in zipsame(klgrads, tangents)]) #pylint: disable=E1111 fvp = U.flatgrad(gvp, var_list) assign_old_eq_new = U.function([],[], updates=[tf.assign(oldv, newv) for (oldv, newv) in zipsame(get_variables("oldpi"), get_variables("pi"))]) compute_losses = U.function([ob, ac, atarg], losses) compute_lossandgrad = U.function([ob, ac, atarg], losses + [U.flatgrad(optimgain, var_list)]) compute_fvp = U.function([flat_tangent, ob, ac, atarg], fvp) compute_vflossandgrad = U.function([ob, ret], U.flatgrad(vferr, vf_var_list)) @contextmanager def timed(msg): if rank == 0: print(colorize(msg, color='magenta')) tstart = time.time() yield print(colorize("done in %.3f seconds"%(time.time() - tstart), color='magenta')) else: yield def allmean(x): assert isinstance(x, np.ndarray) if MPI is not None: out = np.empty_like(x) MPI.COMM_WORLD.Allreduce(x, out, op=MPI.SUM) out /= nworkers else: out = np.copy(x) return out U.initialize() if load_path is not None: pi.load(load_path) th_init = get_flat() if MPI is not None: MPI.COMM_WORLD.Bcast(th_init, root=0) set_from_flat(th_init) vfadam.sync() print("Init param sum", th_init.sum(), flush=True) # Prepare for rollouts # ---------------------------------------- seg_gen = traj_segment_generator(pi, env, timesteps_per_batch, stochastic=True) episodes_so_far = 0 timesteps_so_far = 0 iters_so_far = 0 tstart = time.time() lenbuffer = deque(maxlen=40) # rolling buffer for episode lengths rewbuffer = deque(maxlen=40) # rolling buffer for episode rewards if sum([max_iters>0, total_timesteps>0, max_episodes>0])==0: # noththing to be done return pi assert sum([max_iters>0, total_timesteps>0, max_episodes>0]) < 2, \ 'out of max_iters, total_timesteps, and max_episodes only one should be specified' while True: if callback: callback(locals(), globals()) if total_timesteps and timesteps_so_far >= total_timesteps: break elif max_episodes and episodes_so_far >= max_episodes: break elif max_iters and iters_so_far >= max_iters: break logger.log("********** Iteration %i ************"%iters_so_far) with timed("sampling"): seg = seg_gen.__next__() add_vtarg_and_adv(seg, gamma, lam) # ob, ac, atarg, ret, td1ret = map(np.concatenate, (obs, acs, atargs, rets, td1rets)) ob, ac, atarg, tdlamret = seg["ob"], seg["ac"], seg["adv"], seg["tdlamret"] vpredbefore = seg["vpred"] # predicted value function before udpate atarg = (atarg - atarg.mean()) / atarg.std() # standardized advantage function estimate if hasattr(pi, "ret_rms"): pi.ret_rms.update(tdlamret) if hasattr(pi, "ob_rms"): pi.ob_rms.update(ob) # update running mean/std for policy args = seg["ob"], seg["ac"], atarg fvpargs = [arr[::5] for arr in args] def fisher_vector_product(p): return allmean(compute_fvp(p, *fvpargs)) + cg_damping * p assign_old_eq_new() # set old parameter values to new parameter values with timed("computegrad"): *lossbefore, g = compute_lossandgrad(*args) lossbefore = allmean(np.array(lossbefore)) g = allmean(g) if np.allclose(g, 0): logger.log("Got zero gradient. not updating") else: with timed("cg"): stepdir = cg(fisher_vector_product, g, cg_iters=cg_iters, verbose=rank==0) assert np.isfinite(stepdir).all() shs = .5*stepdir.dot(fisher_vector_product(stepdir)) lm = np.sqrt(shs / max_kl) # logger.log("lagrange multiplier:", lm, "gnorm:", np.linalg.norm(g)) fullstep = stepdir / lm expectedimprove = g.dot(fullstep) surrbefore = lossbefore[0] stepsize = 1.0 thbefore = get_flat() for _ in range(10): thnew = thbefore + fullstep * stepsize set_from_flat(thnew) meanlosses = surr, kl, *_ = allmean(np.array(compute_losses(*args))) improve = surr - surrbefore logger.log("Expected: %.3f Actual: %.3f"%(expectedimprove, improve)) if not np.isfinite(meanlosses).all(): logger.log("Got non-finite value of losses -- bad!") elif kl > max_kl * 1.5: logger.log("violated KL constraint. shrinking step.") elif improve < 0: logger.log("surrogate didn't improve. shrinking step.") else: logger.log("Stepsize OK!") break stepsize *= .5 else: logger.log("couldn't compute a good step") set_from_flat(thbefore) if nworkers > 1 and iters_so_far % 20 == 0: paramsums = MPI.COMM_WORLD.allgather((thnew.sum(), vfadam.getflat().sum())) # list of tuples assert all(np.allclose(ps, paramsums[0]) for ps in paramsums[1:]) for (lossname, lossval) in zip(loss_names, meanlosses): logger.record_tabular(lossname, lossval) with timed("vf"): for _ in range(vf_iters): for (mbob, mbret) in dataset.iterbatches((seg["ob"], seg["tdlamret"]), include_final_partial_batch=False, batch_size=64): g = allmean(compute_vflossandgrad(mbob, mbret)) vfadam.update(g, vf_stepsize) logger.record_tabular("ev_tdlam_before", explained_variance(vpredbefore, tdlamret)) lrlocal = (seg["ep_lens"], seg["ep_rets"]) # local values if MPI is not None: listoflrpairs = MPI.COMM_WORLD.allgather(lrlocal) # list of tuples else: listoflrpairs = [lrlocal] lens, rews = map(flatten_lists, zip(*listoflrpairs)) lenbuffer.extend(lens) rewbuffer.extend(rews) logger.record_tabular("EpLenMean", np.mean(lenbuffer)) logger.record_tabular("EpRewMean", np.mean(rewbuffer)) logger.record_tabular("EpThisIter", len(lens)) episodes_so_far += len(lens) timesteps_so_far += sum(lens) iters_so_far += 1 logger.record_tabular("EpisodesSoFar", episodes_so_far) logger.record_tabular("TimestepsSoFar", timesteps_so_far) logger.record_tabular("TimeElapsed", time.time() - tstart) if rank==0: logger.dump_tabular() return pi def flatten_lists(listoflists): return [el for list_ in listoflists for el in list_] def get_variables(scope): return tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, scope) def get_trainable_variables(scope): return tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, scope) def get_vf_trainable_variables(scope): return [v for v in get_trainable_variables(scope) if 'vf' in v.name[len(scope):].split('/')] def get_pi_trainable_variables(scope): return [v for v in get_trainable_variables(scope) if 'pi' in v.name[len(scope):].split('/')]
15,098
35.91687
170
py
baselines
baselines-master/baselines/common/mpi_adam.py
import baselines.common.tf_util as U import tensorflow as tf import numpy as np try: from mpi4py import MPI except ImportError: MPI = None class MpiAdam(object): def __init__(self, var_list, *, beta1=0.9, beta2=0.999, epsilon=1e-08, scale_grad_by_procs=True, comm=None): self.var_list = var_list self.beta1 = beta1 self.beta2 = beta2 self.epsilon = epsilon self.scale_grad_by_procs = scale_grad_by_procs size = sum(U.numel(v) for v in var_list) self.m = np.zeros(size, 'float32') self.v = np.zeros(size, 'float32') self.t = 0 self.setfromflat = U.SetFromFlat(var_list) self.getflat = U.GetFlat(var_list) self.comm = MPI.COMM_WORLD if comm is None and MPI is not None else comm def update(self, localg, stepsize): if self.t % 100 == 0: self.check_synced() localg = localg.astype('float32') if self.comm is not None: globalg = np.zeros_like(localg) self.comm.Allreduce(localg, globalg, op=MPI.SUM) if self.scale_grad_by_procs: globalg /= self.comm.Get_size() else: globalg = np.copy(localg) self.t += 1 a = stepsize * np.sqrt(1 - self.beta2**self.t)/(1 - self.beta1**self.t) self.m = self.beta1 * self.m + (1 - self.beta1) * globalg self.v = self.beta2 * self.v + (1 - self.beta2) * (globalg * globalg) step = (- a) * self.m / (np.sqrt(self.v) + self.epsilon) self.setfromflat(self.getflat() + step) def sync(self): if self.comm is None: return theta = self.getflat() self.comm.Bcast(theta, root=0) self.setfromflat(theta) def check_synced(self): if self.comm is None: return if self.comm.Get_rank() == 0: # this is root theta = self.getflat() self.comm.Bcast(theta, root=0) else: thetalocal = self.getflat() thetaroot = np.empty_like(thetalocal) self.comm.Bcast(thetaroot, root=0) assert (thetaroot == thetalocal).all(), (thetaroot, thetalocal) @U.in_session def test_MpiAdam(): np.random.seed(0) tf.set_random_seed(0) a = tf.Variable(np.random.randn(3).astype('float32')) b = tf.Variable(np.random.randn(2,5).astype('float32')) loss = tf.reduce_sum(tf.square(a)) + tf.reduce_sum(tf.sin(b)) stepsize = 1e-2 update_op = tf.train.AdamOptimizer(stepsize).minimize(loss) do_update = U.function([], loss, updates=[update_op]) tf.get_default_session().run(tf.global_variables_initializer()) losslist_ref = [] for i in range(10): l = do_update() print(i, l) losslist_ref.append(l) tf.set_random_seed(0) tf.get_default_session().run(tf.global_variables_initializer()) var_list = [a,b] lossandgrad = U.function([], [loss, U.flatgrad(loss, var_list)]) adam = MpiAdam(var_list) losslist_test = [] for i in range(10): l,g = lossandgrad() adam.update(g, stepsize) print(i,l) losslist_test.append(l) np.testing.assert_allclose(np.array(losslist_ref), np.array(losslist_test), atol=1e-4) if __name__ == '__main__': test_MpiAdam()
3,296
30.701923
112
py
baselines
baselines-master/baselines/common/cg.py
import numpy as np def cg(f_Ax, b, cg_iters=10, callback=None, verbose=False, residual_tol=1e-10): """ Demmel p 312 """ p = b.copy() r = b.copy() x = np.zeros_like(b) rdotr = r.dot(r) fmtstr = "%10i %10.3g %10.3g" titlestr = "%10s %10s %10s" if verbose: print(titlestr % ("iter", "residual norm", "soln norm")) for i in range(cg_iters): if callback is not None: callback(x) if verbose: print(fmtstr % (i, rdotr, np.linalg.norm(x))) z = f_Ax(p) v = rdotr / p.dot(z) x += v*p r -= v*z newrdotr = r.dot(r) mu = newrdotr/rdotr p = r + mu*p rdotr = newrdotr if rdotr < residual_tol: break if callback is not None: callback(x) if verbose: print(fmtstr % (i+1, rdotr, np.linalg.norm(x))) # pylint: disable=W0631 return x
897
24.657143
88
py
baselines
baselines-master/baselines/common/runners.py
import numpy as np from abc import ABC, abstractmethod class AbstractEnvRunner(ABC): def __init__(self, *, env, model, nsteps): self.env = env self.model = model self.nenv = nenv = env.num_envs if hasattr(env, 'num_envs') else 1 self.batch_ob_shape = (nenv*nsteps,) + env.observation_space.shape self.obs = np.zeros((nenv,) + env.observation_space.shape, dtype=env.observation_space.dtype.name) self.obs[:] = env.reset() self.nsteps = nsteps self.states = model.initial_state self.dones = [False for _ in range(nenv)] @abstractmethod def run(self): raise NotImplementedError
670
32.55
106
py
baselines
baselines-master/baselines/common/distributions.py
import tensorflow as tf import numpy as np import baselines.common.tf_util as U from baselines.a2c.utils import fc from tensorflow.python.ops import math_ops class Pd(object): """ A particular probability distribution """ def flatparam(self): raise NotImplementedError def mode(self): raise NotImplementedError def neglogp(self, x): # Usually it's easier to define the negative logprob raise NotImplementedError def kl(self, other): raise NotImplementedError def entropy(self): raise NotImplementedError def sample(self): raise NotImplementedError def logp(self, x): return - self.neglogp(x) def get_shape(self): return self.flatparam().shape @property def shape(self): return self.get_shape() def __getitem__(self, idx): return self.__class__(self.flatparam()[idx]) class PdType(object): """ Parametrized family of probability distributions """ def pdclass(self): raise NotImplementedError def pdfromflat(self, flat): return self.pdclass()(flat) def pdfromlatent(self, latent_vector, init_scale, init_bias): raise NotImplementedError def param_shape(self): raise NotImplementedError def sample_shape(self): raise NotImplementedError def sample_dtype(self): raise NotImplementedError def param_placeholder(self, prepend_shape, name=None): return tf.placeholder(dtype=tf.float32, shape=prepend_shape+self.param_shape(), name=name) def sample_placeholder(self, prepend_shape, name=None): return tf.placeholder(dtype=self.sample_dtype(), shape=prepend_shape+self.sample_shape(), name=name) def __eq__(self, other): return (type(self) == type(other)) and (self.__dict__ == other.__dict__) class CategoricalPdType(PdType): def __init__(self, ncat): self.ncat = ncat def pdclass(self): return CategoricalPd def pdfromlatent(self, latent_vector, init_scale=1.0, init_bias=0.0): pdparam = _matching_fc(latent_vector, 'pi', self.ncat, init_scale=init_scale, init_bias=init_bias) return self.pdfromflat(pdparam), pdparam def param_shape(self): return [self.ncat] def sample_shape(self): return [] def sample_dtype(self): return tf.int32 class MultiCategoricalPdType(PdType): def __init__(self, nvec): self.ncats = nvec.astype('int32') assert (self.ncats > 0).all() def pdclass(self): return MultiCategoricalPd def pdfromflat(self, flat): return MultiCategoricalPd(self.ncats, flat) def pdfromlatent(self, latent, init_scale=1.0, init_bias=0.0): pdparam = _matching_fc(latent, 'pi', self.ncats.sum(), init_scale=init_scale, init_bias=init_bias) return self.pdfromflat(pdparam), pdparam def param_shape(self): return [sum(self.ncats)] def sample_shape(self): return [len(self.ncats)] def sample_dtype(self): return tf.int32 class DiagGaussianPdType(PdType): def __init__(self, size): self.size = size def pdclass(self): return DiagGaussianPd def pdfromlatent(self, latent_vector, init_scale=1.0, init_bias=0.0): mean = _matching_fc(latent_vector, 'pi', self.size, init_scale=init_scale, init_bias=init_bias) logstd = tf.get_variable(name='pi/logstd', shape=[1, self.size], initializer=tf.zeros_initializer()) pdparam = tf.concat([mean, mean * 0.0 + logstd], axis=1) return self.pdfromflat(pdparam), mean def param_shape(self): return [2*self.size] def sample_shape(self): return [self.size] def sample_dtype(self): return tf.float32 class BernoulliPdType(PdType): def __init__(self, size): self.size = size def pdclass(self): return BernoulliPd def param_shape(self): return [self.size] def sample_shape(self): return [self.size] def sample_dtype(self): return tf.int32 def pdfromlatent(self, latent_vector, init_scale=1.0, init_bias=0.0): pdparam = _matching_fc(latent_vector, 'pi', self.size, init_scale=init_scale, init_bias=init_bias) return self.pdfromflat(pdparam), pdparam # WRONG SECOND DERIVATIVES # class CategoricalPd(Pd): # def __init__(self, logits): # self.logits = logits # self.ps = tf.nn.softmax(logits) # @classmethod # def fromflat(cls, flat): # return cls(flat) # def flatparam(self): # return self.logits # def mode(self): # return U.argmax(self.logits, axis=-1) # def logp(self, x): # return -tf.nn.sparse_softmax_cross_entropy_with_logits(self.logits, x) # def kl(self, other): # return tf.nn.softmax_cross_entropy_with_logits(other.logits, self.ps) \ # - tf.nn.softmax_cross_entropy_with_logits(self.logits, self.ps) # def entropy(self): # return tf.nn.softmax_cross_entropy_with_logits(self.logits, self.ps) # def sample(self): # u = tf.random_uniform(tf.shape(self.logits)) # return U.argmax(self.logits - tf.log(-tf.log(u)), axis=-1) class CategoricalPd(Pd): def __init__(self, logits): self.logits = logits def flatparam(self): return self.logits def mode(self): return tf.argmax(self.logits, axis=-1) @property def mean(self): return tf.nn.softmax(self.logits) def neglogp(self, x): # return tf.nn.sparse_softmax_cross_entropy_with_logits(logits=self.logits, labels=x) # Note: we can't use sparse_softmax_cross_entropy_with_logits because # the implementation does not allow second-order derivatives... if x.dtype in {tf.uint8, tf.int32, tf.int64}: # one-hot encoding x_shape_list = x.shape.as_list() logits_shape_list = self.logits.get_shape().as_list()[:-1] for xs, ls in zip(x_shape_list, logits_shape_list): if xs is not None and ls is not None: assert xs == ls, 'shape mismatch: {} in x vs {} in logits'.format(xs, ls) x = tf.one_hot(x, self.logits.get_shape().as_list()[-1]) else: # already encoded assert x.shape.as_list() == self.logits.shape.as_list() return tf.nn.softmax_cross_entropy_with_logits_v2( logits=self.logits, labels=x) def kl(self, other): a0 = self.logits - tf.reduce_max(self.logits, axis=-1, keepdims=True) a1 = other.logits - tf.reduce_max(other.logits, axis=-1, keepdims=True) ea0 = tf.exp(a0) ea1 = tf.exp(a1) z0 = tf.reduce_sum(ea0, axis=-1, keepdims=True) z1 = tf.reduce_sum(ea1, axis=-1, keepdims=True) p0 = ea0 / z0 return tf.reduce_sum(p0 * (a0 - tf.log(z0) - a1 + tf.log(z1)), axis=-1) def entropy(self): a0 = self.logits - tf.reduce_max(self.logits, axis=-1, keepdims=True) ea0 = tf.exp(a0) z0 = tf.reduce_sum(ea0, axis=-1, keepdims=True) p0 = ea0 / z0 return tf.reduce_sum(p0 * (tf.log(z0) - a0), axis=-1) def sample(self): u = tf.random_uniform(tf.shape(self.logits), dtype=self.logits.dtype) return tf.argmax(self.logits - tf.log(-tf.log(u)), axis=-1) @classmethod def fromflat(cls, flat): return cls(flat) class MultiCategoricalPd(Pd): def __init__(self, nvec, flat): self.flat = flat self.categoricals = list(map(CategoricalPd, tf.split(flat, np.array(nvec, dtype=np.int32), axis=-1))) def flatparam(self): return self.flat def mode(self): return tf.cast(tf.stack([p.mode() for p in self.categoricals], axis=-1), tf.int32) def neglogp(self, x): return tf.add_n([p.neglogp(px) for p, px in zip(self.categoricals, tf.unstack(x, axis=-1))]) def kl(self, other): return tf.add_n([p.kl(q) for p, q in zip(self.categoricals, other.categoricals)]) def entropy(self): return tf.add_n([p.entropy() for p in self.categoricals]) def sample(self): return tf.cast(tf.stack([p.sample() for p in self.categoricals], axis=-1), tf.int32) @classmethod def fromflat(cls, flat): raise NotImplementedError class DiagGaussianPd(Pd): def __init__(self, flat): self.flat = flat mean, logstd = tf.split(axis=len(flat.shape)-1, num_or_size_splits=2, value=flat) self.mean = mean self.logstd = logstd self.std = tf.exp(logstd) def flatparam(self): return self.flat def mode(self): return self.mean def neglogp(self, x): return 0.5 * tf.reduce_sum(tf.square((x - self.mean) / self.std), axis=-1) \ + 0.5 * np.log(2.0 * np.pi) * tf.to_float(tf.shape(x)[-1]) \ + tf.reduce_sum(self.logstd, axis=-1) def kl(self, other): assert isinstance(other, DiagGaussianPd) return tf.reduce_sum(other.logstd - self.logstd + (tf.square(self.std) + tf.square(self.mean - other.mean)) / (2.0 * tf.square(other.std)) - 0.5, axis=-1) def entropy(self): return tf.reduce_sum(self.logstd + .5 * np.log(2.0 * np.pi * np.e), axis=-1) def sample(self): return self.mean + self.std * tf.random_normal(tf.shape(self.mean)) @classmethod def fromflat(cls, flat): return cls(flat) class BernoulliPd(Pd): def __init__(self, logits): self.logits = logits self.ps = tf.sigmoid(logits) def flatparam(self): return self.logits @property def mean(self): return self.ps def mode(self): return tf.round(self.ps) def neglogp(self, x): return tf.reduce_sum(tf.nn.sigmoid_cross_entropy_with_logits(logits=self.logits, labels=tf.to_float(x)), axis=-1) def kl(self, other): return tf.reduce_sum(tf.nn.sigmoid_cross_entropy_with_logits(logits=other.logits, labels=self.ps), axis=-1) - tf.reduce_sum(tf.nn.sigmoid_cross_entropy_with_logits(logits=self.logits, labels=self.ps), axis=-1) def entropy(self): return tf.reduce_sum(tf.nn.sigmoid_cross_entropy_with_logits(logits=self.logits, labels=self.ps), axis=-1) def sample(self): u = tf.random_uniform(tf.shape(self.ps)) return tf.to_float(math_ops.less(u, self.ps)) @classmethod def fromflat(cls, flat): return cls(flat) def make_pdtype(ac_space): from gym import spaces if isinstance(ac_space, spaces.Box): assert len(ac_space.shape) == 1 return DiagGaussianPdType(ac_space.shape[0]) elif isinstance(ac_space, spaces.Discrete): return CategoricalPdType(ac_space.n) elif isinstance(ac_space, spaces.MultiDiscrete): return MultiCategoricalPdType(ac_space.nvec) elif isinstance(ac_space, spaces.MultiBinary): return BernoulliPdType(ac_space.n) else: raise NotImplementedError def shape_el(v, i): maybe = v.get_shape()[i] if maybe is not None: return maybe else: return tf.shape(v)[i] @U.in_session def test_probtypes(): np.random.seed(0) pdparam_diag_gauss = np.array([-.2, .3, .4, -.5, .1, -.5, .1, 0.8]) diag_gauss = DiagGaussianPdType(pdparam_diag_gauss.size // 2) #pylint: disable=E1101 validate_probtype(diag_gauss, pdparam_diag_gauss) pdparam_categorical = np.array([-.2, .3, .5]) categorical = CategoricalPdType(pdparam_categorical.size) #pylint: disable=E1101 validate_probtype(categorical, pdparam_categorical) nvec = [1,2,3] pdparam_multicategorical = np.array([-.2, .3, .5, .1, 1, -.1]) multicategorical = MultiCategoricalPdType(nvec) #pylint: disable=E1101 validate_probtype(multicategorical, pdparam_multicategorical) pdparam_bernoulli = np.array([-.2, .3, .5]) bernoulli = BernoulliPdType(pdparam_bernoulli.size) #pylint: disable=E1101 validate_probtype(bernoulli, pdparam_bernoulli) def validate_probtype(probtype, pdparam): N = 100000 # Check to see if mean negative log likelihood == differential entropy Mval = np.repeat(pdparam[None, :], N, axis=0) M = probtype.param_placeholder([N]) X = probtype.sample_placeholder([N]) pd = probtype.pdfromflat(M) calcloglik = U.function([X, M], pd.logp(X)) calcent = U.function([M], pd.entropy()) Xval = tf.get_default_session().run(pd.sample(), feed_dict={M:Mval}) logliks = calcloglik(Xval, Mval) entval_ll = - logliks.mean() #pylint: disable=E1101 entval_ll_stderr = logliks.std() / np.sqrt(N) #pylint: disable=E1101 entval = calcent(Mval).mean() #pylint: disable=E1101 assert np.abs(entval - entval_ll) < 3 * entval_ll_stderr # within 3 sigmas # Check to see if kldiv[p,q] = - ent[p] - E_p[log q] M2 = probtype.param_placeholder([N]) pd2 = probtype.pdfromflat(M2) q = pdparam + np.random.randn(pdparam.size) * 0.1 Mval2 = np.repeat(q[None, :], N, axis=0) calckl = U.function([M, M2], pd.kl(pd2)) klval = calckl(Mval, Mval2).mean() #pylint: disable=E1101 logliks = calcloglik(Xval, Mval2) klval_ll = - entval - logliks.mean() #pylint: disable=E1101 klval_ll_stderr = logliks.std() / np.sqrt(N) #pylint: disable=E1101 assert np.abs(klval - klval_ll) < 3 * klval_ll_stderr # within 3 sigmas print('ok on', probtype, pdparam) def _matching_fc(tensor, name, size, init_scale, init_bias): if tensor.shape[-1] == size: return tensor else: return fc(tensor, name, size, init_scale=init_scale, init_bias=init_bias)
13,595
37.191011
217
py
baselines
baselines-master/baselines/common/mpi_util.py
from collections import defaultdict import os, numpy as np import platform import shutil import subprocess import warnings import sys try: from mpi4py import MPI except ImportError: MPI = None def sync_from_root(sess, variables, comm=None): """ Send the root node's parameters to every worker. Arguments: sess: the TensorFlow session. variables: all parameter variables including optimizer's """ if comm is None: comm = MPI.COMM_WORLD import tensorflow as tf values = comm.bcast(sess.run(variables)) sess.run([tf.assign(var, val) for (var, val) in zip(variables, values)]) def gpu_count(): """ Count the GPUs on this machine. """ if shutil.which('nvidia-smi') is None: return 0 output = subprocess.check_output(['nvidia-smi', '--query-gpu=gpu_name', '--format=csv']) return max(0, len(output.split(b'\n')) - 2) def setup_mpi_gpus(): """ Set CUDA_VISIBLE_DEVICES to MPI rank if not already set """ if 'CUDA_VISIBLE_DEVICES' not in os.environ: if sys.platform == 'darwin': # This Assumes if you're on OSX you're just ids = [] # doing a smoke test and don't want GPUs else: lrank, _lsize = get_local_rank_size(MPI.COMM_WORLD) ids = [lrank] os.environ["CUDA_VISIBLE_DEVICES"] = ",".join(map(str, ids)) def get_local_rank_size(comm): """ Returns the rank of each process on its machine The processes on a given machine will be assigned ranks 0, 1, 2, ..., N-1, where N is the number of processes on this machine. Useful if you want to assign one gpu per machine """ this_node = platform.node() ranks_nodes = comm.allgather((comm.Get_rank(), this_node)) node2rankssofar = defaultdict(int) local_rank = None for (rank, node) in ranks_nodes: if rank == comm.Get_rank(): local_rank = node2rankssofar[node] node2rankssofar[node] += 1 assert local_rank is not None return local_rank, node2rankssofar[this_node] def share_file(comm, path): """ Copies the file from rank 0 to all other ranks Puts it in the same place on all machines """ localrank, _ = get_local_rank_size(comm) if comm.Get_rank() == 0: with open(path, 'rb') as fh: data = fh.read() comm.bcast(data) else: data = comm.bcast(None) if localrank == 0: os.makedirs(os.path.dirname(path), exist_ok=True) with open(path, 'wb') as fh: fh.write(data) comm.Barrier() def dict_gather(comm, d, op='mean', assert_all_have_data=True): """ Perform a reduction operation over dicts """ if comm is None: return d alldicts = comm.allgather(d) size = comm.size k2li = defaultdict(list) for d in alldicts: for (k,v) in d.items(): k2li[k].append(v) result = {} for (k,li) in k2li.items(): if assert_all_have_data: assert len(li)==size, "only %i out of %i MPI workers have sent '%s'" % (len(li), size, k) if op=='mean': result[k] = np.mean(li, axis=0) elif op=='sum': result[k] = np.sum(li, axis=0) else: assert 0, op return result def mpi_weighted_mean(comm, local_name2valcount): """ Perform a weighted average over dicts that are each on a different node Input: local_name2valcount: dict mapping key -> (value, count) Returns: key -> mean """ all_name2valcount = comm.gather(local_name2valcount) if comm.rank == 0: name2sum = defaultdict(float) name2count = defaultdict(float) for n2vc in all_name2valcount: for (name, (val, count)) in n2vc.items(): try: val = float(val) except ValueError: if comm.rank == 0: warnings.warn('WARNING: tried to compute mean on non-float {}={}'.format(name, val)) else: name2sum[name] += val * count name2count[name] += count return {name : name2sum[name] / name2count[name] for name in name2sum} else: return {}
4,259
30.791045
108
py
baselines
baselines-master/baselines/common/schedules.py
"""This file is used for specifying various schedules that evolve over time throughout the execution of the algorithm, such as: - learning rate for the optimizer - exploration epsilon for the epsilon greedy exploration strategy - beta parameter for beta parameter in prioritized replay Each schedule has a function `value(t)` which returns the current value of the parameter given the timestep t of the optimization procedure. """ class Schedule(object): def value(self, t): """Value of the schedule at time t""" raise NotImplementedError() class ConstantSchedule(object): def __init__(self, value): """Value remains constant over time. Parameters ---------- value: float Constant value of the schedule """ self._v = value def value(self, t): """See Schedule.value""" return self._v def linear_interpolation(l, r, alpha): return l + alpha * (r - l) class PiecewiseSchedule(object): def __init__(self, endpoints, interpolation=linear_interpolation, outside_value=None): """Piecewise schedule. endpoints: [(int, int)] list of pairs `(time, value)` meanining that schedule should output `value` when `t==time`. All the values for time must be sorted in an increasing order. When t is between two times, e.g. `(time_a, value_a)` and `(time_b, value_b)`, such that `time_a <= t < time_b` then value outputs `interpolation(value_a, value_b, alpha)` where alpha is a fraction of time passed between `time_a` and `time_b` for time `t`. interpolation: lambda float, float, float: float a function that takes value to the left and to the right of t according to the `endpoints`. Alpha is the fraction of distance from left endpoint to right endpoint that t has covered. See linear_interpolation for example. outside_value: float if the value is requested outside of all the intervals sepecified in `endpoints` this value is returned. If None then AssertionError is raised when outside value is requested. """ idxes = [e[0] for e in endpoints] assert idxes == sorted(idxes) self._interpolation = interpolation self._outside_value = outside_value self._endpoints = endpoints def value(self, t): """See Schedule.value""" for (l_t, l), (r_t, r) in zip(self._endpoints[:-1], self._endpoints[1:]): if l_t <= t and t < r_t: alpha = float(t - l_t) / (r_t - l_t) return self._interpolation(l, r, alpha) # t does not belong to any of the pieces, so doom. assert self._outside_value is not None return self._outside_value class LinearSchedule(object): def __init__(self, schedule_timesteps, final_p, initial_p=1.0): """Linear interpolation between initial_p and final_p over schedule_timesteps. After this many timesteps pass final_p is returned. Parameters ---------- schedule_timesteps: int Number of timesteps for which to linearly anneal initial_p to final_p initial_p: float initial output value final_p: float final output value """ self.schedule_timesteps = schedule_timesteps self.final_p = final_p self.initial_p = initial_p def value(self, t): """See Schedule.value""" fraction = min(float(t) / self.schedule_timesteps, 1.0) return self.initial_p + fraction * (self.final_p - self.initial_p)
3,702
36.03
90
py
baselines
baselines-master/baselines/common/atari_wrappers.py
import numpy as np import os os.environ.setdefault('PATH', '') from collections import deque import gym from gym import spaces import cv2 cv2.ocl.setUseOpenCL(False) from .wrappers import TimeLimit class NoopResetEnv(gym.Wrapper): def __init__(self, env, noop_max=30): """Sample initial states by taking random number of no-ops on reset. No-op is assumed to be action 0. """ gym.Wrapper.__init__(self, env) self.noop_max = noop_max self.override_num_noops = None self.noop_action = 0 assert env.unwrapped.get_action_meanings()[0] == 'NOOP' def reset(self, **kwargs): """ Do no-op action for a number of steps in [1, noop_max].""" self.env.reset(**kwargs) if self.override_num_noops is not None: noops = self.override_num_noops else: noops = self.unwrapped.np_random.randint(1, self.noop_max + 1) #pylint: disable=E1101 assert noops > 0 obs = None for _ in range(noops): obs, _, done, _ = self.env.step(self.noop_action) if done: obs = self.env.reset(**kwargs) return obs def step(self, ac): return self.env.step(ac) class FireResetEnv(gym.Wrapper): def __init__(self, env): """Take action on reset for environments that are fixed until firing.""" gym.Wrapper.__init__(self, env) assert env.unwrapped.get_action_meanings()[1] == 'FIRE' assert len(env.unwrapped.get_action_meanings()) >= 3 def reset(self, **kwargs): self.env.reset(**kwargs) obs, _, done, _ = self.env.step(1) if done: self.env.reset(**kwargs) obs, _, done, _ = self.env.step(2) if done: self.env.reset(**kwargs) return obs def step(self, ac): return self.env.step(ac) class EpisodicLifeEnv(gym.Wrapper): def __init__(self, env): """Make end-of-life == end-of-episode, but only reset on true game over. Done by DeepMind for the DQN and co. since it helps value estimation. """ gym.Wrapper.__init__(self, env) self.lives = 0 self.was_real_done = True def step(self, action): obs, reward, done, info = self.env.step(action) self.was_real_done = done # check current lives, make loss of life terminal, # then update lives to handle bonus lives lives = self.env.unwrapped.ale.lives() if lives < self.lives and lives > 0: # for Qbert sometimes we stay in lives == 0 condition for a few frames # so it's important to keep lives > 0, so that we only reset once # the environment advertises done. done = True self.lives = lives return obs, reward, done, info def reset(self, **kwargs): """Reset only when lives are exhausted. This way all states are still reachable even though lives are episodic, and the learner need not know about any of this behind-the-scenes. """ if self.was_real_done: obs = self.env.reset(**kwargs) else: # no-op step to advance from terminal/lost life state obs, _, _, _ = self.env.step(0) self.lives = self.env.unwrapped.ale.lives() return obs class MaxAndSkipEnv(gym.Wrapper): def __init__(self, env, skip=4): """Return only every `skip`-th frame""" gym.Wrapper.__init__(self, env) # most recent raw observations (for max pooling across time steps) self._obs_buffer = np.zeros((2,)+env.observation_space.shape, dtype=np.uint8) self._skip = skip def step(self, action): """Repeat action, sum reward, and max over last observations.""" total_reward = 0.0 done = None for i in range(self._skip): obs, reward, done, info = self.env.step(action) if i == self._skip - 2: self._obs_buffer[0] = obs if i == self._skip - 1: self._obs_buffer[1] = obs total_reward += reward if done: break # Note that the observation on the done=True frame # doesn't matter max_frame = self._obs_buffer.max(axis=0) return max_frame, total_reward, done, info def reset(self, **kwargs): return self.env.reset(**kwargs) class ClipRewardEnv(gym.RewardWrapper): def __init__(self, env): gym.RewardWrapper.__init__(self, env) def reward(self, reward): """Bin reward to {+1, 0, -1} by its sign.""" return np.sign(reward) class WarpFrame(gym.ObservationWrapper): def __init__(self, env, width=84, height=84, grayscale=True, dict_space_key=None): """ Warp frames to 84x84 as done in the Nature paper and later work. If the environment uses dictionary observations, `dict_space_key` can be specified which indicates which observation should be warped. """ super().__init__(env) self._width = width self._height = height self._grayscale = grayscale self._key = dict_space_key if self._grayscale: num_colors = 1 else: num_colors = 3 new_space = gym.spaces.Box( low=0, high=255, shape=(self._height, self._width, num_colors), dtype=np.uint8, ) if self._key is None: original_space = self.observation_space self.observation_space = new_space else: original_space = self.observation_space.spaces[self._key] self.observation_space.spaces[self._key] = new_space assert original_space.dtype == np.uint8 and len(original_space.shape) == 3 def observation(self, obs): if self._key is None: frame = obs else: frame = obs[self._key] if self._grayscale: frame = cv2.cvtColor(frame, cv2.COLOR_RGB2GRAY) frame = cv2.resize( frame, (self._width, self._height), interpolation=cv2.INTER_AREA ) if self._grayscale: frame = np.expand_dims(frame, -1) if self._key is None: obs = frame else: obs = obs.copy() obs[self._key] = frame return obs class FrameStack(gym.Wrapper): def __init__(self, env, k): """Stack k last frames. Returns lazy array, which is much more memory efficient. See Also -------- baselines.common.atari_wrappers.LazyFrames """ gym.Wrapper.__init__(self, env) self.k = k self.frames = deque([], maxlen=k) shp = env.observation_space.shape self.observation_space = spaces.Box(low=0, high=255, shape=(shp[:-1] + (shp[-1] * k,)), dtype=env.observation_space.dtype) def reset(self): ob = self.env.reset() for _ in range(self.k): self.frames.append(ob) return self._get_ob() def step(self, action): ob, reward, done, info = self.env.step(action) self.frames.append(ob) return self._get_ob(), reward, done, info def _get_ob(self): assert len(self.frames) == self.k return LazyFrames(list(self.frames)) class ScaledFloatFrame(gym.ObservationWrapper): def __init__(self, env): gym.ObservationWrapper.__init__(self, env) self.observation_space = gym.spaces.Box(low=0, high=1, shape=env.observation_space.shape, dtype=np.float32) def observation(self, observation): # careful! This undoes the memory optimization, use # with smaller replay buffers only. return np.array(observation).astype(np.float32) / 255.0 class LazyFrames(object): def __init__(self, frames): """This object ensures that common frames between the observations are only stored once. It exists purely to optimize memory usage which can be huge for DQN's 1M frames replay buffers. This object should only be converted to numpy array before being passed to the model. You'd not believe how complex the previous solution was.""" self._frames = frames self._out = None def _force(self): if self._out is None: self._out = np.concatenate(self._frames, axis=-1) self._frames = None return self._out def __array__(self, dtype=None): out = self._force() if dtype is not None: out = out.astype(dtype) return out def __len__(self): return len(self._force()) def __getitem__(self, i): return self._force()[i] def count(self): frames = self._force() return frames.shape[frames.ndim - 1] def frame(self, i): return self._force()[..., i] def make_atari(env_id, max_episode_steps=None): env = gym.make(env_id) assert 'NoFrameskip' in env.spec.id env = NoopResetEnv(env, noop_max=30) env = MaxAndSkipEnv(env, skip=4) if max_episode_steps is not None: env = TimeLimit(env, max_episode_steps=max_episode_steps) return env def wrap_deepmind(env, episode_life=True, clip_rewards=True, frame_stack=False, scale=False): """Configure environment for DeepMind-style Atari. """ if episode_life: env = EpisodicLifeEnv(env) if 'FIRE' in env.unwrapped.get_action_meanings(): env = FireResetEnv(env) env = WarpFrame(env) if scale: env = ScaledFloatFrame(env) if clip_rewards: env = ClipRewardEnv(env) if frame_stack: env = FrameStack(env, 4) return env
9,686
32.28866
130
py
baselines
baselines-master/baselines/common/mpi_running_mean_std.py
try: from mpi4py import MPI except ImportError: MPI = None import tensorflow as tf, baselines.common.tf_util as U, numpy as np class RunningMeanStd(object): # https://en.wikipedia.org/wiki/Algorithms_for_calculating_variance#Parallel_algorithm def __init__(self, epsilon=1e-2, shape=()): self._sum = tf.get_variable( dtype=tf.float64, shape=shape, initializer=tf.constant_initializer(0.0), name="runningsum", trainable=False) self._sumsq = tf.get_variable( dtype=tf.float64, shape=shape, initializer=tf.constant_initializer(epsilon), name="runningsumsq", trainable=False) self._count = tf.get_variable( dtype=tf.float64, shape=(), initializer=tf.constant_initializer(epsilon), name="count", trainable=False) self.shape = shape self.mean = tf.to_float(self._sum / self._count) self.std = tf.sqrt( tf.maximum( tf.to_float(self._sumsq / self._count) - tf.square(self.mean) , 1e-2 )) newsum = tf.placeholder(shape=self.shape, dtype=tf.float64, name='sum') newsumsq = tf.placeholder(shape=self.shape, dtype=tf.float64, name='var') newcount = tf.placeholder(shape=[], dtype=tf.float64, name='count') self.incfiltparams = U.function([newsum, newsumsq, newcount], [], updates=[tf.assign_add(self._sum, newsum), tf.assign_add(self._sumsq, newsumsq), tf.assign_add(self._count, newcount)]) def update(self, x): x = x.astype('float64') n = int(np.prod(self.shape)) totalvec = np.zeros(n*2+1, 'float64') addvec = np.concatenate([x.sum(axis=0).ravel(), np.square(x).sum(axis=0).ravel(), np.array([len(x)],dtype='float64')]) if MPI is not None: MPI.COMM_WORLD.Allreduce(addvec, totalvec, op=MPI.SUM) self.incfiltparams(totalvec[0:n].reshape(self.shape), totalvec[n:2*n].reshape(self.shape), totalvec[2*n]) @U.in_session def test_runningmeanstd(): for (x1, x2, x3) in [ (np.random.randn(3), np.random.randn(4), np.random.randn(5)), (np.random.randn(3,2), np.random.randn(4,2), np.random.randn(5,2)), ]: rms = RunningMeanStd(epsilon=0.0, shape=x1.shape[1:]) U.initialize() x = np.concatenate([x1, x2, x3], axis=0) ms1 = [x.mean(axis=0), x.std(axis=0)] rms.update(x1) rms.update(x2) rms.update(x3) ms2 = [rms.mean.eval(), rms.std.eval()] assert np.allclose(ms1, ms2) @U.in_session def test_dist(): np.random.seed(0) p1,p2,p3=(np.random.randn(3,1), np.random.randn(4,1), np.random.randn(5,1)) q1,q2,q3=(np.random.randn(6,1), np.random.randn(7,1), np.random.randn(8,1)) # p1,p2,p3=(np.random.randn(3), np.random.randn(4), np.random.randn(5)) # q1,q2,q3=(np.random.randn(6), np.random.randn(7), np.random.randn(8)) comm = MPI.COMM_WORLD assert comm.Get_size()==2 if comm.Get_rank()==0: x1,x2,x3 = p1,p2,p3 elif comm.Get_rank()==1: x1,x2,x3 = q1,q2,q3 else: assert False rms = RunningMeanStd(epsilon=0.0, shape=(1,)) U.initialize() rms.update(x1) rms.update(x2) rms.update(x3) bigvec = np.concatenate([p1,p2,p3,q1,q2,q3]) def checkallclose(x,y): print(x,y) return np.allclose(x,y) assert checkallclose( bigvec.mean(axis=0), rms.mean.eval(), ) assert checkallclose( bigvec.std(axis=0), rms.std.eval(), ) if __name__ == "__main__": # Run with mpirun -np 2 python <filename> test_dist()
3,706
31.80531
126
py
baselines
baselines-master/baselines/common/test_mpi_util.py
from baselines.common import mpi_util from baselines import logger from baselines.common.tests.test_with_mpi import with_mpi try: from mpi4py import MPI except ImportError: MPI = None @with_mpi() def test_mpi_weighted_mean(): comm = MPI.COMM_WORLD with logger.scoped_configure(comm=comm): if comm.rank == 0: name2valcount = {'a' : (10, 2), 'b' : (20,3)} elif comm.rank == 1: name2valcount = {'a' : (19, 1), 'c' : (42,3)} else: raise NotImplementedError d = mpi_util.mpi_weighted_mean(comm, name2valcount) correctval = {'a' : (10 * 2 + 19) / 3.0, 'b' : 20, 'c' : 42} if comm.rank == 0: assert d == correctval, '{} != {}'.format(d, correctval) for name, (val, count) in name2valcount.items(): for _ in range(count): logger.logkv_mean(name, val) d2 = logger.dumpkvs() if comm.rank == 0: assert d2 == correctval
986
31.9
68
py
baselines
baselines-master/baselines/common/misc_util.py
import gym import numpy as np import os import pickle import random import tempfile import zipfile def zipsame(*seqs): L = len(seqs[0]) assert all(len(seq) == L for seq in seqs[1:]) return zip(*seqs) class EzPickle(object): """Objects that are pickled and unpickled via their constructor arguments. Example usage: class Dog(Animal, EzPickle): def __init__(self, furcolor, tailkind="bushy"): Animal.__init__() EzPickle.__init__(furcolor, tailkind) ... When this object is unpickled, a new Dog will be constructed by passing the provided furcolor and tailkind into the constructor. However, philosophers are still not sure whether it is still the same dog. This is generally needed only for environments which wrap C/C++ code, such as MuJoCo and Atari. """ def __init__(self, *args, **kwargs): self._ezpickle_args = args self._ezpickle_kwargs = kwargs def __getstate__(self): return {"_ezpickle_args": self._ezpickle_args, "_ezpickle_kwargs": self._ezpickle_kwargs} def __setstate__(self, d): out = type(self)(*d["_ezpickle_args"], **d["_ezpickle_kwargs"]) self.__dict__.update(out.__dict__) def set_global_seeds(i): try: import MPI rank = MPI.COMM_WORLD.Get_rank() except ImportError: rank = 0 myseed = i + 1000 * rank if i is not None else None try: import tensorflow as tf tf.set_random_seed(myseed) except ImportError: pass np.random.seed(myseed) random.seed(myseed) def pretty_eta(seconds_left): """Print the number of seconds in human readable format. Examples: 2 days 2 hours and 37 minutes less than a minute Paramters --------- seconds_left: int Number of seconds to be converted to the ETA Returns ------- eta: str String representing the pretty ETA. """ minutes_left = seconds_left // 60 seconds_left %= 60 hours_left = minutes_left // 60 minutes_left %= 60 days_left = hours_left // 24 hours_left %= 24 def helper(cnt, name): return "{} {}{}".format(str(cnt), name, ('s' if cnt > 1 else '')) if days_left > 0: msg = helper(days_left, 'day') if hours_left > 0: msg += ' and ' + helper(hours_left, 'hour') return msg if hours_left > 0: msg = helper(hours_left, 'hour') if minutes_left > 0: msg += ' and ' + helper(minutes_left, 'minute') return msg if minutes_left > 0: return helper(minutes_left, 'minute') return 'less than a minute' class RunningAvg(object): def __init__(self, gamma, init_value=None): """Keep a running estimate of a quantity. This is a bit like mean but more sensitive to recent changes. Parameters ---------- gamma: float Must be between 0 and 1, where 0 is the most sensitive to recent changes. init_value: float or None Initial value of the estimate. If None, it will be set on the first update. """ self._value = init_value self._gamma = gamma def update(self, new_val): """Update the estimate. Parameters ---------- new_val: float new observated value of estimated quantity. """ if self._value is None: self._value = new_val else: self._value = self._gamma * self._value + (1.0 - self._gamma) * new_val def __float__(self): """Get the current estimate""" return self._value def boolean_flag(parser, name, default=False, help=None): """Add a boolean flag to argparse parser. Parameters ---------- parser: argparse.Parser parser to add the flag to name: str --<name> will enable the flag, while --no-<name> will disable it default: bool or None default value of the flag help: str help string for the flag """ dest = name.replace('-', '_') parser.add_argument("--" + name, action="store_true", default=default, dest=dest, help=help) parser.add_argument("--no-" + name, action="store_false", dest=dest) def get_wrapper_by_name(env, classname): """Given an a gym environment possibly wrapped multiple times, returns a wrapper of class named classname or raises ValueError if no such wrapper was applied Parameters ---------- env: gym.Env of gym.Wrapper gym environment classname: str name of the wrapper Returns ------- wrapper: gym.Wrapper wrapper named classname """ currentenv = env while True: if classname == currentenv.class_name(): return currentenv elif isinstance(currentenv, gym.Wrapper): currentenv = currentenv.env else: raise ValueError("Couldn't find wrapper named %s" % classname) def relatively_safe_pickle_dump(obj, path, compression=False): """This is just like regular pickle dump, except from the fact that failure cases are different: - It's never possible that we end up with a pickle in corrupted state. - If a there was a different file at the path, that file will remain unchanged in the even of failure (provided that filesystem rename is atomic). - it is sometimes possible that we end up with useless temp file which needs to be deleted manually (it will be removed automatically on the next function call) The indended use case is periodic checkpoints of experiment state, such that we never corrupt previous checkpoints if the current one fails. Parameters ---------- obj: object object to pickle path: str path to the output file compression: bool if true pickle will be compressed """ temp_storage = path + ".relatively_safe" if compression: # Using gzip here would be simpler, but the size is limited to 2GB with tempfile.NamedTemporaryFile() as uncompressed_file: pickle.dump(obj, uncompressed_file) uncompressed_file.file.flush() with zipfile.ZipFile(temp_storage, "w", compression=zipfile.ZIP_DEFLATED) as myzip: myzip.write(uncompressed_file.name, "data") else: with open(temp_storage, "wb") as f: pickle.dump(obj, f) os.rename(temp_storage, path) def pickle_load(path, compression=False): """Unpickle a possible compressed pickle. Parameters ---------- path: str path to the output file compression: bool if true assumes that pickle was compressed when created and attempts decompression. Returns ------- obj: object the unpickled object """ if compression: with zipfile.ZipFile(path, "r", compression=zipfile.ZIP_DEFLATED) as myzip: with myzip.open("data") as f: return pickle.load(f) else: with open(path, "rb") as f: return pickle.load(f)
7,166
28.372951
97
py
baselines
baselines-master/baselines/common/mpi_fork.py
import os, subprocess, sys def mpi_fork(n, bind_to_core=False): """Re-launches the current script with workers Returns "parent" for original parent, "child" for MPI children """ if n<=1: return "child" if os.getenv("IN_MPI") is None: env = os.environ.copy() env.update( MKL_NUM_THREADS="1", OMP_NUM_THREADS="1", IN_MPI="1" ) args = ["mpirun", "-np", str(n)] if bind_to_core: args += ["-bind-to", "core"] args += [sys.executable] + sys.argv subprocess.check_call(args, env=env) return "parent" else: return "child"
667
26.833333
66
py
baselines
baselines-master/baselines/common/dataset.py
import numpy as np class Dataset(object): def __init__(self, data_map, deterministic=False, shuffle=True): self.data_map = data_map self.deterministic = deterministic self.enable_shuffle = shuffle self.n = next(iter(data_map.values())).shape[0] self._next_id = 0 self.shuffle() def shuffle(self): if self.deterministic: return perm = np.arange(self.n) np.random.shuffle(perm) for key in self.data_map: self.data_map[key] = self.data_map[key][perm] self._next_id = 0 def next_batch(self, batch_size): if self._next_id >= self.n and self.enable_shuffle: self.shuffle() cur_id = self._next_id cur_batch_size = min(batch_size, self.n - self._next_id) self._next_id += cur_batch_size data_map = dict() for key in self.data_map: data_map[key] = self.data_map[key][cur_id:cur_id+cur_batch_size] return data_map def iterate_once(self, batch_size): if self.enable_shuffle: self.shuffle() while self._next_id <= self.n - batch_size: yield self.next_batch(batch_size) self._next_id = 0 def subset(self, num_elements, deterministic=True): data_map = dict() for key in self.data_map: data_map[key] = self.data_map[key][:num_elements] return Dataset(data_map, deterministic) def iterbatches(arrays, *, num_batches=None, batch_size=None, shuffle=True, include_final_partial_batch=True): assert (num_batches is None) != (batch_size is None), 'Provide num_batches or batch_size, but not both' arrays = tuple(map(np.asarray, arrays)) n = arrays[0].shape[0] assert all(a.shape[0] == n for a in arrays[1:]) inds = np.arange(n) if shuffle: np.random.shuffle(inds) sections = np.arange(0, n, batch_size)[1:] if num_batches is None else num_batches for batch_inds in np.array_split(inds, sections): if include_final_partial_batch or len(batch_inds) == batch_size: yield tuple(a[batch_inds] for a in arrays)
2,132
33.967213
110
py
baselines
baselines-master/baselines/common/math_util.py
import numpy as np import scipy.signal def discount(x, gamma): """ computes discounted sums along 0th dimension of x. inputs ------ x: ndarray gamma: float outputs ------- y: ndarray with same shape as x, satisfying y[t] = x[t] + gamma*x[t+1] + gamma^2*x[t+2] + ... + gamma^k x[t+k], where k = len(x) - t - 1 """ assert x.ndim >= 1 return scipy.signal.lfilter([1],[1,-gamma],x[::-1], axis=0)[::-1] def explained_variance(ypred,y): """ Computes fraction of variance that ypred explains about y. Returns 1 - Var[y-ypred] / Var[y] interpretation: ev=0 => might as well have predicted zero ev=1 => perfect prediction ev<0 => worse than just predicting zero """ assert y.ndim == 1 and ypred.ndim == 1 vary = np.var(y) return np.nan if vary==0 else 1 - np.var(y-ypred)/vary def explained_variance_2d(ypred, y): assert y.ndim == 2 and ypred.ndim == 2 vary = np.var(y, axis=0) out = 1 - np.var(y-ypred)/vary out[vary < 1e-10] = 0 return out def ncc(ypred, y): return np.corrcoef(ypred, y)[1,0] def flatten_arrays(arrs): return np.concatenate([arr.flat for arr in arrs]) def unflatten_vector(vec, shapes): i=0 arrs = [] for shape in shapes: size = np.prod(shape) arr = vec[i:i+size].reshape(shape) arrs.append(arr) i += size return arrs def discount_with_boundaries(X, New, gamma): """ X: 2d array of floats, time x features New: 2d array of bools, indicating when a new episode has started """ Y = np.zeros_like(X) T = X.shape[0] Y[T-1] = X[T-1] for t in range(T-2, -1, -1): Y[t] = X[t] + gamma * Y[t+1] * (1 - New[t+1]) return Y def test_discount_with_boundaries(): gamma=0.9 x = np.array([1.0, 2.0, 3.0, 4.0], 'float32') starts = [1.0, 0.0, 0.0, 1.0] y = discount_with_boundaries(x, starts, gamma) assert np.allclose(y, [ 1 + gamma * 2 + gamma**2 * 3, 2 + gamma * 3, 3, 4 ])
2,094
23.360465
75
py
baselines
baselines-master/baselines/common/tf_util.py
import numpy as np import tensorflow as tf # pylint: ignore-module import copy import os import functools import collections import multiprocessing def switch(condition, then_expression, else_expression): """Switches between two operations depending on a scalar value (int or bool). Note that both `then_expression` and `else_expression` should be symbolic tensors of the *same shape*. # Arguments condition: scalar tensor. then_expression: TensorFlow operation. else_expression: TensorFlow operation. """ x_shape = copy.copy(then_expression.get_shape()) x = tf.cond(tf.cast(condition, 'bool'), lambda: then_expression, lambda: else_expression) x.set_shape(x_shape) return x # ================================================================ # Extras # ================================================================ def lrelu(x, leak=0.2): f1 = 0.5 * (1 + leak) f2 = 0.5 * (1 - leak) return f1 * x + f2 * abs(x) # ================================================================ # Mathematical utils # ================================================================ def huber_loss(x, delta=1.0): """Reference: https://en.wikipedia.org/wiki/Huber_loss""" return tf.where( tf.abs(x) < delta, tf.square(x) * 0.5, delta * (tf.abs(x) - 0.5 * delta) ) # ================================================================ # Global session # ================================================================ def get_session(config=None): """Get default session or create one with a given config""" sess = tf.get_default_session() if sess is None: sess = make_session(config=config, make_default=True) return sess def make_session(config=None, num_cpu=None, make_default=False, graph=None): """Returns a session that will use <num_cpu> CPU's only""" if num_cpu is None: num_cpu = int(os.getenv('RCALL_NUM_CPU', multiprocessing.cpu_count())) if config is None: config = tf.ConfigProto( allow_soft_placement=True, inter_op_parallelism_threads=num_cpu, intra_op_parallelism_threads=num_cpu) config.gpu_options.allow_growth = True if make_default: return tf.InteractiveSession(config=config, graph=graph) else: return tf.Session(config=config, graph=graph) def single_threaded_session(): """Returns a session which will only use a single CPU""" return make_session(num_cpu=1) def in_session(f): @functools.wraps(f) def newfunc(*args, **kwargs): with tf.Session(): f(*args, **kwargs) return newfunc ALREADY_INITIALIZED = set() def initialize(): """Initialize all the uninitialized variables in the global scope.""" new_variables = set(tf.global_variables()) - ALREADY_INITIALIZED get_session().run(tf.variables_initializer(new_variables)) ALREADY_INITIALIZED.update(new_variables) # ================================================================ # Model components # ================================================================ def normc_initializer(std=1.0, axis=0): def _initializer(shape, dtype=None, partition_info=None): # pylint: disable=W0613 out = np.random.randn(*shape).astype(dtype.as_numpy_dtype) out *= std / np.sqrt(np.square(out).sum(axis=axis, keepdims=True)) return tf.constant(out) return _initializer def conv2d(x, num_filters, name, filter_size=(3, 3), stride=(1, 1), pad="SAME", dtype=tf.float32, collections=None, summary_tag=None): with tf.variable_scope(name): stride_shape = [1, stride[0], stride[1], 1] filter_shape = [filter_size[0], filter_size[1], int(x.get_shape()[3]), num_filters] # there are "num input feature maps * filter height * filter width" # inputs to each hidden unit fan_in = intprod(filter_shape[:3]) # each unit in the lower layer receives a gradient from: # "num output feature maps * filter height * filter width" / # pooling size fan_out = intprod(filter_shape[:2]) * num_filters # initialize weights with random weights w_bound = np.sqrt(6. / (fan_in + fan_out)) w = tf.get_variable("W", filter_shape, dtype, tf.random_uniform_initializer(-w_bound, w_bound), collections=collections) b = tf.get_variable("b", [1, 1, 1, num_filters], initializer=tf.zeros_initializer(), collections=collections) if summary_tag is not None: tf.summary.image(summary_tag, tf.transpose(tf.reshape(w, [filter_size[0], filter_size[1], -1, 1]), [2, 0, 1, 3]), max_images=10) return tf.nn.conv2d(x, w, stride_shape, pad) + b # ================================================================ # Theano-like Function # ================================================================ def function(inputs, outputs, updates=None, givens=None): """Just like Theano function. Take a bunch of tensorflow placeholders and expressions computed based on those placeholders and produces f(inputs) -> outputs. Function f takes values to be fed to the input's placeholders and produces the values of the expressions in outputs. Input values can be passed in the same order as inputs or can be provided as kwargs based on placeholder name (passed to constructor or accessible via placeholder.op.name). Example: x = tf.placeholder(tf.int32, (), name="x") y = tf.placeholder(tf.int32, (), name="y") z = 3 * x + 2 * y lin = function([x, y], z, givens={y: 0}) with single_threaded_session(): initialize() assert lin(2) == 6 assert lin(x=3) == 9 assert lin(2, 2) == 10 assert lin(x=2, y=3) == 12 Parameters ---------- inputs: [tf.placeholder, tf.constant, or object with make_feed_dict method] list of input arguments outputs: [tf.Variable] or tf.Variable list of outputs or a single output to be returned from function. Returned value will also have the same shape. updates: [tf.Operation] or tf.Operation list of update functions or single update function that will be run whenever the function is called. The return is ignored. """ if isinstance(outputs, list): return _Function(inputs, outputs, updates, givens=givens) elif isinstance(outputs, (dict, collections.OrderedDict)): f = _Function(inputs, outputs.values(), updates, givens=givens) return lambda *args, **kwargs: type(outputs)(zip(outputs.keys(), f(*args, **kwargs))) else: f = _Function(inputs, [outputs], updates, givens=givens) return lambda *args, **kwargs: f(*args, **kwargs)[0] class _Function(object): def __init__(self, inputs, outputs, updates, givens): for inpt in inputs: if not hasattr(inpt, 'make_feed_dict') and not (type(inpt) is tf.Tensor and len(inpt.op.inputs) == 0): assert False, "inputs should all be placeholders, constants, or have a make_feed_dict method" self.inputs = inputs self.input_names = {inp.name.split("/")[-1].split(":")[0]: inp for inp in inputs} updates = updates or [] self.update_group = tf.group(*updates) self.outputs_update = list(outputs) + [self.update_group] self.givens = {} if givens is None else givens def _feed_input(self, feed_dict, inpt, value): if hasattr(inpt, 'make_feed_dict'): feed_dict.update(inpt.make_feed_dict(value)) else: feed_dict[inpt] = adjust_shape(inpt, value) def __call__(self, *args, **kwargs): assert len(args) + len(kwargs) <= len(self.inputs), "Too many arguments provided" feed_dict = {} # Update feed dict with givens. for inpt in self.givens: feed_dict[inpt] = adjust_shape(inpt, feed_dict.get(inpt, self.givens[inpt])) # Update the args for inpt, value in zip(self.inputs, args): self._feed_input(feed_dict, inpt, value) for inpt_name, value in kwargs.items(): self._feed_input(feed_dict, self.input_names[inpt_name], value) results = get_session().run(self.outputs_update, feed_dict=feed_dict)[:-1] return results # ================================================================ # Flat vectors # ================================================================ def var_shape(x): out = x.get_shape().as_list() assert all(isinstance(a, int) for a in out), \ "shape function assumes that shape is fully known" return out def numel(x): return intprod(var_shape(x)) def intprod(x): return int(np.prod(x)) def flatgrad(loss, var_list, clip_norm=None): grads = tf.gradients(loss, var_list) if clip_norm is not None: grads = [tf.clip_by_norm(grad, clip_norm=clip_norm) for grad in grads] return tf.concat(axis=0, values=[ tf.reshape(grad if grad is not None else tf.zeros_like(v), [numel(v)]) for (v, grad) in zip(var_list, grads) ]) class SetFromFlat(object): def __init__(self, var_list, dtype=tf.float32): assigns = [] shapes = list(map(var_shape, var_list)) total_size = np.sum([intprod(shape) for shape in shapes]) self.theta = theta = tf.placeholder(dtype, [total_size]) start = 0 assigns = [] for (shape, v) in zip(shapes, var_list): size = intprod(shape) assigns.append(tf.assign(v, tf.reshape(theta[start:start + size], shape))) start += size self.op = tf.group(*assigns) def __call__(self, theta): tf.get_default_session().run(self.op, feed_dict={self.theta: theta}) class GetFlat(object): def __init__(self, var_list): self.op = tf.concat(axis=0, values=[tf.reshape(v, [numel(v)]) for v in var_list]) def __call__(self): return tf.get_default_session().run(self.op) def flattenallbut0(x): return tf.reshape(x, [-1, intprod(x.get_shape().as_list()[1:])]) # ============================================================= # TF placeholders management # ============================================================ _PLACEHOLDER_CACHE = {} # name -> (placeholder, dtype, shape) def get_placeholder(name, dtype, shape): if name in _PLACEHOLDER_CACHE: out, dtype1, shape1 = _PLACEHOLDER_CACHE[name] if out.graph == tf.get_default_graph(): assert dtype1 == dtype and shape1 == shape, \ 'Placeholder with name {} has already been registered and has shape {}, different from requested {}'.format(name, shape1, shape) return out out = tf.placeholder(dtype=dtype, shape=shape, name=name) _PLACEHOLDER_CACHE[name] = (out, dtype, shape) return out def get_placeholder_cached(name): return _PLACEHOLDER_CACHE[name][0] # ================================================================ # Diagnostics # ================================================================ def display_var_info(vars): from baselines import logger count_params = 0 for v in vars: name = v.name if "/Adam" in name or "beta1_power" in name or "beta2_power" in name: continue v_params = np.prod(v.shape.as_list()) count_params += v_params if "/b:" in name or "/bias" in name: continue # Wx+b, bias is not interesting to look at => count params, but not print logger.info(" %s%s %i params %s" % (name, " "*(55-len(name)), v_params, str(v.shape))) logger.info("Total model parameters: %0.2f million" % (count_params*1e-6)) def get_available_gpus(session_config=None): # based on recipe from https://stackoverflow.com/a/38580201 # Unless we allocate a session here, subsequent attempts to create one # will ignore our custom config (in particular, allow_growth=True will have # no effect). if session_config is None: session_config = get_session()._config from tensorflow.python.client import device_lib local_device_protos = device_lib.list_local_devices(session_config) return [x.name for x in local_device_protos if x.device_type == 'GPU'] # ================================================================ # Saving variables # ================================================================ def load_state(fname, sess=None): from baselines import logger logger.warn('load_state method is deprecated, please use load_variables instead') sess = sess or get_session() saver = tf.train.Saver() saver.restore(tf.get_default_session(), fname) def save_state(fname, sess=None): from baselines import logger logger.warn('save_state method is deprecated, please use save_variables instead') sess = sess or get_session() dirname = os.path.dirname(fname) if any(dirname): os.makedirs(dirname, exist_ok=True) saver = tf.train.Saver() saver.save(tf.get_default_session(), fname) # The methods above and below are clearly doing the same thing, and in a rather similar way # TODO: ensure there is no subtle differences and remove one def save_variables(save_path, variables=None, sess=None): import joblib sess = sess or get_session() variables = variables or tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES) ps = sess.run(variables) save_dict = {v.name: value for v, value in zip(variables, ps)} dirname = os.path.dirname(save_path) if any(dirname): os.makedirs(dirname, exist_ok=True) joblib.dump(save_dict, save_path) def load_variables(load_path, variables=None, sess=None): import joblib sess = sess or get_session() variables = variables or tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES) loaded_params = joblib.load(os.path.expanduser(load_path)) restores = [] if isinstance(loaded_params, list): assert len(loaded_params) == len(variables), 'number of variables loaded mismatches len(variables)' for d, v in zip(loaded_params, variables): restores.append(v.assign(d)) else: for v in variables: restores.append(v.assign(loaded_params[v.name])) sess.run(restores) # ================================================================ # Shape adjustment for feeding into tf placeholders # ================================================================ def adjust_shape(placeholder, data): ''' adjust shape of the data to the shape of the placeholder if possible. If shape is incompatible, AssertionError is thrown Parameters: placeholder tensorflow input placeholder data input data to be (potentially) reshaped to be fed into placeholder Returns: reshaped data ''' if not isinstance(data, np.ndarray) and not isinstance(data, list): return data if isinstance(data, list): data = np.array(data) placeholder_shape = [x or -1 for x in placeholder.shape.as_list()] assert _check_shape(placeholder_shape, data.shape), \ 'Shape of data {} is not compatible with shape of the placeholder {}'.format(data.shape, placeholder_shape) return np.reshape(data, placeholder_shape) def _check_shape(placeholder_shape, data_shape): ''' check if two shapes are compatible (i.e. differ only by dimensions of size 1, or by the batch dimension)''' return True squeezed_placeholder_shape = _squeeze_shape(placeholder_shape) squeezed_data_shape = _squeeze_shape(data_shape) for i, s_data in enumerate(squeezed_data_shape): s_placeholder = squeezed_placeholder_shape[i] if s_placeholder != -1 and s_data != s_placeholder: return False return True def _squeeze_shape(shape): return [x for x in shape if x != 1] # ================================================================ # Tensorboard interfacing # ================================================================ def launch_tensorboard_in_background(log_dir): ''' To log the Tensorflow graph when using rl-algs algorithms, you can run the following code in your main script: import threading, time def start_tensorboard(session): time.sleep(10) # Wait until graph is setup tb_path = osp.join(logger.get_dir(), 'tb') summary_writer = tf.summary.FileWriter(tb_path, graph=session.graph) summary_op = tf.summary.merge_all() launch_tensorboard_in_background(tb_path) session = tf.get_default_session() t = threading.Thread(target=start_tensorboard, args=([session])) t.start() ''' import subprocess subprocess.Popen(['tensorboard', '--logdir', log_dir])
16,969
37.220721
144
py
baselines
baselines-master/baselines/common/tile_images.py
import numpy as np def tile_images(img_nhwc): """ Tile N images into one big PxQ image (P,Q) are chosen to be as close as possible, and if N is square, then P=Q. input: img_nhwc, list or array of images, ndim=4 once turned into array n = batch index, h = height, w = width, c = channel returns: bigim_HWc, ndarray with ndim=3 """ img_nhwc = np.asarray(img_nhwc) N, h, w, c = img_nhwc.shape H = int(np.ceil(np.sqrt(N))) W = int(np.ceil(float(N)/H)) img_nhwc = np.array(list(img_nhwc) + [img_nhwc[0]*0 for _ in range(N, H*W)]) img_HWhwc = img_nhwc.reshape(H, W, h, w, c) img_HhWwc = img_HWhwc.transpose(0, 2, 1, 3, 4) img_Hh_Ww_c = img_HhWwc.reshape(H*h, W*w, c) return img_Hh_Ww_c
763
30.833333
80
py
baselines
baselines-master/baselines/common/running_mean_std.py
import tensorflow as tf import numpy as np from baselines.common.tf_util import get_session class RunningMeanStd(object): # https://en.wikipedia.org/wiki/Algorithms_for_calculating_variance#Parallel_algorithm def __init__(self, epsilon=1e-4, shape=()): self.mean = np.zeros(shape, 'float64') self.var = np.ones(shape, 'float64') self.count = epsilon def update(self, x): batch_mean = np.mean(x, axis=0) batch_var = np.var(x, axis=0) batch_count = x.shape[0] self.update_from_moments(batch_mean, batch_var, batch_count) def update_from_moments(self, batch_mean, batch_var, batch_count): self.mean, self.var, self.count = update_mean_var_count_from_moments( self.mean, self.var, self.count, batch_mean, batch_var, batch_count) def update_mean_var_count_from_moments(mean, var, count, batch_mean, batch_var, batch_count): delta = batch_mean - mean tot_count = count + batch_count new_mean = mean + delta * batch_count / tot_count m_a = var * count m_b = batch_var * batch_count M2 = m_a + m_b + np.square(delta) * count * batch_count / tot_count new_var = M2 / tot_count new_count = tot_count return new_mean, new_var, new_count class TfRunningMeanStd(object): # https://en.wikipedia.org/wiki/Algorithms_for_calculating_variance#Parallel_algorithm ''' TensorFlow variables-based implmentation of computing running mean and std Benefit of this implementation is that it can be saved / loaded together with the tensorflow model ''' def __init__(self, epsilon=1e-4, shape=(), scope=''): sess = get_session() self._new_mean = tf.placeholder(shape=shape, dtype=tf.float64) self._new_var = tf.placeholder(shape=shape, dtype=tf.float64) self._new_count = tf.placeholder(shape=(), dtype=tf.float64) with tf.variable_scope(scope, reuse=tf.AUTO_REUSE): self._mean = tf.get_variable('mean', initializer=np.zeros(shape, 'float64'), dtype=tf.float64) self._var = tf.get_variable('std', initializer=np.ones(shape, 'float64'), dtype=tf.float64) self._count = tf.get_variable('count', initializer=np.full((), epsilon, 'float64'), dtype=tf.float64) self.update_ops = tf.group([ self._var.assign(self._new_var), self._mean.assign(self._new_mean), self._count.assign(self._new_count) ]) sess.run(tf.variables_initializer([self._mean, self._var, self._count])) self.sess = sess self._set_mean_var_count() def _set_mean_var_count(self): self.mean, self.var, self.count = self.sess.run([self._mean, self._var, self._count]) def update(self, x): batch_mean = np.mean(x, axis=0) batch_var = np.var(x, axis=0) batch_count = x.shape[0] new_mean, new_var, new_count = update_mean_var_count_from_moments(self.mean, self.var, self.count, batch_mean, batch_var, batch_count) self.sess.run(self.update_ops, feed_dict={ self._new_mean: new_mean, self._new_var: new_var, self._new_count: new_count }) self._set_mean_var_count() def test_runningmeanstd(): for (x1, x2, x3) in [ (np.random.randn(3), np.random.randn(4), np.random.randn(5)), (np.random.randn(3,2), np.random.randn(4,2), np.random.randn(5,2)), ]: rms = RunningMeanStd(epsilon=0.0, shape=x1.shape[1:]) x = np.concatenate([x1, x2, x3], axis=0) ms1 = [x.mean(axis=0), x.var(axis=0)] rms.update(x1) rms.update(x2) rms.update(x3) ms2 = [rms.mean, rms.var] np.testing.assert_allclose(ms1, ms2) def test_tf_runningmeanstd(): for (x1, x2, x3) in [ (np.random.randn(3), np.random.randn(4), np.random.randn(5)), (np.random.randn(3,2), np.random.randn(4,2), np.random.randn(5,2)), ]: rms = TfRunningMeanStd(epsilon=0.0, shape=x1.shape[1:], scope='running_mean_std' + str(np.random.randint(0, 128))) x = np.concatenate([x1, x2, x3], axis=0) ms1 = [x.mean(axis=0), x.var(axis=0)] rms.update(x1) rms.update(x2) rms.update(x3) ms2 = [rms.mean, rms.var] np.testing.assert_allclose(ms1, ms2) def profile_tf_runningmeanstd(): import time from baselines.common import tf_util tf_util.get_session( config=tf.ConfigProto( inter_op_parallelism_threads=1, intra_op_parallelism_threads=1, allow_soft_placement=True )) x = np.random.random((376,)) n_trials = 10000 rms = RunningMeanStd() tfrms = TfRunningMeanStd() tic1 = time.time() for _ in range(n_trials): rms.update(x) tic2 = time.time() for _ in range(n_trials): tfrms.update(x) tic3 = time.time() print('rms update time ({} trials): {} s'.format(n_trials, tic2 - tic1)) print('tfrms update time ({} trials): {} s'.format(n_trials, tic3 - tic2)) tic1 = time.time() for _ in range(n_trials): z1 = rms.mean tic2 = time.time() for _ in range(n_trials): z2 = tfrms.mean assert z1 == z2 tic3 = time.time() print('rms get mean time ({} trials): {} s'.format(n_trials, tic2 - tic1)) print('tfrms get mean time ({} trials): {} s'.format(n_trials, tic3 - tic2)) ''' options = tf.RunOptions(trace_level=tf.RunOptions.FULL_TRACE) #pylint: disable=E1101 run_metadata = tf.RunMetadata() profile_opts = dict(options=options, run_metadata=run_metadata) from tensorflow.python.client import timeline fetched_timeline = timeline.Timeline(run_metadata.step_stats) #pylint: disable=E1101 chrome_trace = fetched_timeline.generate_chrome_trace_format() outfile = '/tmp/timeline.json' with open(outfile, 'wt') as f: f.write(chrome_trace) print('Successfully saved profile to {}. Exiting.'.format(outfile)) exit(0) ''' if __name__ == '__main__': profile_tf_runningmeanstd()
6,081
31.351064
142
py
baselines
baselines-master/baselines/common/retro_wrappers.py
from collections import deque import cv2 cv2.ocl.setUseOpenCL(False) from .atari_wrappers import WarpFrame, ClipRewardEnv, FrameStack, ScaledFloatFrame from .wrappers import TimeLimit import numpy as np import gym class StochasticFrameSkip(gym.Wrapper): def __init__(self, env, n, stickprob): gym.Wrapper.__init__(self, env) self.n = n self.stickprob = stickprob self.curac = None self.rng = np.random.RandomState() self.supports_want_render = hasattr(env, "supports_want_render") def reset(self, **kwargs): self.curac = None return self.env.reset(**kwargs) def step(self, ac): done = False totrew = 0 for i in range(self.n): # First step after reset, use action if self.curac is None: self.curac = ac # First substep, delay with probability=stickprob elif i==0: if self.rng.rand() > self.stickprob: self.curac = ac # Second substep, new action definitely kicks in elif i==1: self.curac = ac if self.supports_want_render and i<self.n-1: ob, rew, done, info = self.env.step(self.curac, want_render=False) else: ob, rew, done, info = self.env.step(self.curac) totrew += rew if done: break return ob, totrew, done, info def seed(self, s): self.rng.seed(s) class PartialFrameStack(gym.Wrapper): def __init__(self, env, k, channel=1): """ Stack one channel (channel keyword) from previous frames """ gym.Wrapper.__init__(self, env) shp = env.observation_space.shape self.channel = channel self.observation_space = gym.spaces.Box(low=0, high=255, shape=(shp[0], shp[1], shp[2] + k - 1), dtype=env.observation_space.dtype) self.k = k self.frames = deque([], maxlen=k) shp = env.observation_space.shape def reset(self): ob = self.env.reset() assert ob.shape[2] > self.channel for _ in range(self.k): self.frames.append(ob) return self._get_ob() def step(self, ac): ob, reward, done, info = self.env.step(ac) self.frames.append(ob) return self._get_ob(), reward, done, info def _get_ob(self): assert len(self.frames) == self.k return np.concatenate([frame if i==self.k-1 else frame[:,:,self.channel:self.channel+1] for (i, frame) in enumerate(self.frames)], axis=2) class Downsample(gym.ObservationWrapper): def __init__(self, env, ratio): """ Downsample images by a factor of ratio """ gym.ObservationWrapper.__init__(self, env) (oldh, oldw, oldc) = env.observation_space.shape newshape = (oldh//ratio, oldw//ratio, oldc) self.observation_space = gym.spaces.Box(low=0, high=255, shape=newshape, dtype=np.uint8) def observation(self, frame): height, width, _ = self.observation_space.shape frame = cv2.resize(frame, (width, height), interpolation=cv2.INTER_AREA) if frame.ndim == 2: frame = frame[:,:,None] return frame class Rgb2gray(gym.ObservationWrapper): def __init__(self, env): """ Downsample images by a factor of ratio """ gym.ObservationWrapper.__init__(self, env) (oldh, oldw, _oldc) = env.observation_space.shape self.observation_space = gym.spaces.Box(low=0, high=255, shape=(oldh, oldw, 1), dtype=np.uint8) def observation(self, frame): frame = cv2.cvtColor(frame, cv2.COLOR_RGB2GRAY) return frame[:,:,None] class MovieRecord(gym.Wrapper): def __init__(self, env, savedir, k): gym.Wrapper.__init__(self, env) self.savedir = savedir self.k = k self.epcount = 0 def reset(self): if self.epcount % self.k == 0: self.env.unwrapped.movie_path = self.savedir else: self.env.unwrapped.movie_path = None self.env.unwrapped.movie = None self.epcount += 1 return self.env.reset() class AppendTimeout(gym.Wrapper): def __init__(self, env): gym.Wrapper.__init__(self, env) self.action_space = env.action_space self.timeout_space = gym.spaces.Box(low=np.array([0.0]), high=np.array([1.0]), dtype=np.float32) self.original_os = env.observation_space if isinstance(self.original_os, gym.spaces.Dict): import copy ordered_dict = copy.deepcopy(self.original_os.spaces) ordered_dict['value_estimation_timeout'] = self.timeout_space self.observation_space = gym.spaces.Dict(ordered_dict) self.dict_mode = True else: self.observation_space = gym.spaces.Dict({ 'original': self.original_os, 'value_estimation_timeout': self.timeout_space }) self.dict_mode = False self.ac_count = None while 1: if not hasattr(env, "_max_episode_steps"): # Looking for TimeLimit wrapper that has this field env = env.env continue break self.timeout = env._max_episode_steps def step(self, ac): self.ac_count += 1 ob, rew, done, info = self.env.step(ac) return self._process(ob), rew, done, info def reset(self): self.ac_count = 0 return self._process(self.env.reset()) def _process(self, ob): fracmissing = 1 - self.ac_count / self.timeout if self.dict_mode: ob['value_estimation_timeout'] = fracmissing else: return { 'original': ob, 'value_estimation_timeout': fracmissing } class StartDoingRandomActionsWrapper(gym.Wrapper): """ Warning: can eat info dicts, not good if you depend on them """ def __init__(self, env, max_random_steps, on_startup=True, every_episode=False): gym.Wrapper.__init__(self, env) self.on_startup = on_startup self.every_episode = every_episode self.random_steps = max_random_steps self.last_obs = None if on_startup: self.some_random_steps() def some_random_steps(self): self.last_obs = self.env.reset() n = np.random.randint(self.random_steps) #print("running for random %i frames" % n) for _ in range(n): self.last_obs, _, done, _ = self.env.step(self.env.action_space.sample()) if done: self.last_obs = self.env.reset() def reset(self): return self.last_obs def step(self, a): self.last_obs, rew, done, info = self.env.step(a) if done: self.last_obs = self.env.reset() if self.every_episode: self.some_random_steps() return self.last_obs, rew, done, info def make_retro(*, game, state=None, max_episode_steps=4500, **kwargs): import retro if state is None: state = retro.State.DEFAULT env = retro.make(game, state, **kwargs) env = StochasticFrameSkip(env, n=4, stickprob=0.25) if max_episode_steps is not None: env = TimeLimit(env, max_episode_steps=max_episode_steps) return env def wrap_deepmind_retro(env, scale=True, frame_stack=4): """ Configure environment for retro games, using config similar to DeepMind-style Atari in wrap_deepmind """ env = WarpFrame(env) env = ClipRewardEnv(env) if frame_stack > 1: env = FrameStack(env, frame_stack) if scale: env = ScaledFloatFrame(env) return env class SonicDiscretizer(gym.ActionWrapper): """ Wrap a gym-retro environment and make it use discrete actions for the Sonic game. """ def __init__(self, env): super(SonicDiscretizer, self).__init__(env) buttons = ["B", "A", "MODE", "START", "UP", "DOWN", "LEFT", "RIGHT", "C", "Y", "X", "Z"] actions = [['LEFT'], ['RIGHT'], ['LEFT', 'DOWN'], ['RIGHT', 'DOWN'], ['DOWN'], ['DOWN', 'B'], ['B']] self._actions = [] for action in actions: arr = np.array([False] * 12) for button in action: arr[buttons.index(button)] = True self._actions.append(arr) self.action_space = gym.spaces.Discrete(len(self._actions)) def action(self, a): # pylint: disable=W0221 return self._actions[a].copy() class RewardScaler(gym.RewardWrapper): """ Bring rewards to a reasonable scale for PPO. This is incredibly important and effects performance drastically. """ def __init__(self, env, scale=0.01): super(RewardScaler, self).__init__(env) self.scale = scale def reward(self, reward): return reward * self.scale class AllowBacktracking(gym.Wrapper): """ Use deltas in max(X) as the reward, rather than deltas in X. This way, agents are not discouraged too heavily from exploring backwards if there is no way to advance head-on in the level. """ def __init__(self, env): super(AllowBacktracking, self).__init__(env) self._cur_x = 0 self._max_x = 0 def reset(self, **kwargs): # pylint: disable=E0202 self._cur_x = 0 self._max_x = 0 return self.env.reset(**kwargs) def step(self, action): # pylint: disable=E0202 obs, rew, done, info = self.env.step(action) self._cur_x += rew rew = max(0, self._cur_x - self._max_x) self._max_x = max(self._max_x, self._cur_x) return obs, rew, done, info
9,752
33.708185
107
py
baselines
baselines-master/baselines/common/wrappers.py
import gym class TimeLimit(gym.Wrapper): def __init__(self, env, max_episode_steps=None): super(TimeLimit, self).__init__(env) self._max_episode_steps = max_episode_steps self._elapsed_steps = 0 def step(self, ac): observation, reward, done, info = self.env.step(ac) self._elapsed_steps += 1 if self._elapsed_steps >= self._max_episode_steps: done = True info['TimeLimit.truncated'] = True return observation, reward, done, info def reset(self, **kwargs): self._elapsed_steps = 0 return self.env.reset(**kwargs) class ClipActionsWrapper(gym.Wrapper): def step(self, action): import numpy as np action = np.nan_to_num(action) action = np.clip(action, self.action_space.low, self.action_space.high) return self.env.step(action) def reset(self, **kwargs): return self.env.reset(**kwargs)
946
30.566667
79
py
baselines
baselines-master/baselines/common/segment_tree.py
import operator class SegmentTree(object): def __init__(self, capacity, operation, neutral_element): """Build a Segment Tree data structure. https://en.wikipedia.org/wiki/Segment_tree Can be used as regular array, but with two important differences: a) setting item's value is slightly slower. It is O(lg capacity) instead of O(1). b) user has access to an efficient ( O(log segment size) ) `reduce` operation which reduces `operation` over a contiguous subsequence of items in the array. Paramters --------- capacity: int Total size of the array - must be a power of two. operation: lambda obj, obj -> obj and operation for combining elements (eg. sum, max) must form a mathematical group together with the set of possible values for array elements (i.e. be associative) neutral_element: obj neutral element for the operation above. eg. float('-inf') for max and 0 for sum. """ assert capacity > 0 and capacity & (capacity - 1) == 0, "capacity must be positive and a power of 2." self._capacity = capacity self._value = [neutral_element for _ in range(2 * capacity)] self._operation = operation def _reduce_helper(self, start, end, node, node_start, node_end): if start == node_start and end == node_end: return self._value[node] mid = (node_start + node_end) // 2 if end <= mid: return self._reduce_helper(start, end, 2 * node, node_start, mid) else: if mid + 1 <= start: return self._reduce_helper(start, end, 2 * node + 1, mid + 1, node_end) else: return self._operation( self._reduce_helper(start, mid, 2 * node, node_start, mid), self._reduce_helper(mid + 1, end, 2 * node + 1, mid + 1, node_end) ) def reduce(self, start=0, end=None): """Returns result of applying `self.operation` to a contiguous subsequence of the array. self.operation(arr[start], operation(arr[start+1], operation(... arr[end]))) Parameters ---------- start: int beginning of the subsequence end: int end of the subsequences Returns ------- reduced: obj result of reducing self.operation over the specified range of array elements. """ if end is None: end = self._capacity if end < 0: end += self._capacity end -= 1 return self._reduce_helper(start, end, 1, 0, self._capacity - 1) def __setitem__(self, idx, val): # index of the leaf idx += self._capacity self._value[idx] = val idx //= 2 while idx >= 1: self._value[idx] = self._operation( self._value[2 * idx], self._value[2 * idx + 1] ) idx //= 2 def __getitem__(self, idx): assert 0 <= idx < self._capacity return self._value[self._capacity + idx] class SumSegmentTree(SegmentTree): def __init__(self, capacity): super(SumSegmentTree, self).__init__( capacity=capacity, operation=operator.add, neutral_element=0.0 ) def sum(self, start=0, end=None): """Returns arr[start] + ... + arr[end]""" return super(SumSegmentTree, self).reduce(start, end) def find_prefixsum_idx(self, prefixsum): """Find the highest index `i` in the array such that sum(arr[0] + arr[1] + ... + arr[i - i]) <= prefixsum if array values are probabilities, this function allows to sample indexes according to the discrete probability efficiently. Parameters ---------- perfixsum: float upperbound on the sum of array prefix Returns ------- idx: int highest index satisfying the prefixsum constraint """ assert 0 <= prefixsum <= self.sum() + 1e-5 idx = 1 while idx < self._capacity: # while non-leaf if self._value[2 * idx] > prefixsum: idx = 2 * idx else: prefixsum -= self._value[2 * idx] idx = 2 * idx + 1 return idx - self._capacity class MinSegmentTree(SegmentTree): def __init__(self, capacity): super(MinSegmentTree, self).__init__( capacity=capacity, operation=min, neutral_element=float('inf') ) def min(self, start=0, end=None): """Returns min(arr[start], ..., arr[end])""" return super(MinSegmentTree, self).reduce(start, end)
4,899
32.561644
109
py
baselines
baselines-master/baselines/common/policies.py
import tensorflow as tf from baselines.common import tf_util from baselines.a2c.utils import fc from baselines.common.distributions import make_pdtype from baselines.common.input import observation_placeholder, encode_observation from baselines.common.tf_util import adjust_shape from baselines.common.mpi_running_mean_std import RunningMeanStd from baselines.common.models import get_network_builder import gym class PolicyWithValue(object): """ Encapsulates fields and methods for RL policy and value function estimation with shared parameters """ def __init__(self, env, observations, latent, estimate_q=False, vf_latent=None, sess=None, **tensors): """ Parameters: ---------- env RL environment observations tensorflow placeholder in which the observations will be fed latent latent state from which policy distribution parameters should be inferred vf_latent latent state from which value function should be inferred (if None, then latent is used) sess tensorflow session to run calculations in (if None, default session is used) **tensors tensorflow tensors for additional attributes such as state or mask """ self.X = observations self.state = tf.constant([]) self.initial_state = None self.__dict__.update(tensors) vf_latent = vf_latent if vf_latent is not None else latent vf_latent = tf.layers.flatten(vf_latent) latent = tf.layers.flatten(latent) # Based on the action space, will select what probability distribution type self.pdtype = make_pdtype(env.action_space) self.pd, self.pi = self.pdtype.pdfromlatent(latent, init_scale=0.01) # Take an action self.action = self.pd.sample() # Calculate the neg log of our probability self.neglogp = self.pd.neglogp(self.action) self.sess = sess or tf.get_default_session() if estimate_q: assert isinstance(env.action_space, gym.spaces.Discrete) self.q = fc(vf_latent, 'q', env.action_space.n) self.vf = self.q else: self.vf = fc(vf_latent, 'vf', 1) self.vf = self.vf[:,0] def _evaluate(self, variables, observation, **extra_feed): sess = self.sess feed_dict = {self.X: adjust_shape(self.X, observation)} for inpt_name, data in extra_feed.items(): if inpt_name in self.__dict__.keys(): inpt = self.__dict__[inpt_name] if isinstance(inpt, tf.Tensor) and inpt._op.type == 'Placeholder': feed_dict[inpt] = adjust_shape(inpt, data) return sess.run(variables, feed_dict) def step(self, observation, **extra_feed): """ Compute next action(s) given the observation(s) Parameters: ---------- observation observation data (either single or a batch) **extra_feed additional data such as state or mask (names of the arguments should match the ones in constructor, see __init__) Returns: ------- (action, value estimate, next state, negative log likelihood of the action under current policy parameters) tuple """ a, v, state, neglogp = self._evaluate([self.action, self.vf, self.state, self.neglogp], observation, **extra_feed) if state.size == 0: state = None return a, v, state, neglogp def value(self, ob, *args, **kwargs): """ Compute value estimate(s) given the observation(s) Parameters: ---------- observation observation data (either single or a batch) **extra_feed additional data such as state or mask (names of the arguments should match the ones in constructor, see __init__) Returns: ------- value estimate """ return self._evaluate(self.vf, ob, *args, **kwargs) def save(self, save_path): tf_util.save_state(save_path, sess=self.sess) def load(self, load_path): tf_util.load_state(load_path, sess=self.sess) def build_policy(env, policy_network, value_network=None, normalize_observations=False, estimate_q=False, **policy_kwargs): if isinstance(policy_network, str): network_type = policy_network policy_network = get_network_builder(network_type)(**policy_kwargs) def policy_fn(nbatch=None, nsteps=None, sess=None, observ_placeholder=None): ob_space = env.observation_space X = observ_placeholder if observ_placeholder is not None else observation_placeholder(ob_space, batch_size=nbatch) extra_tensors = {} if normalize_observations and X.dtype == tf.float32: encoded_x, rms = _normalize_clip_observation(X) extra_tensors['rms'] = rms else: encoded_x = X encoded_x = encode_observation(ob_space, encoded_x) with tf.variable_scope('pi', reuse=tf.AUTO_REUSE): policy_latent = policy_network(encoded_x) if isinstance(policy_latent, tuple): policy_latent, recurrent_tensors = policy_latent if recurrent_tensors is not None: # recurrent architecture, need a few more steps nenv = nbatch // nsteps assert nenv > 0, 'Bad input for recurrent policy: batch size {} smaller than nsteps {}'.format(nbatch, nsteps) policy_latent, recurrent_tensors = policy_network(encoded_x, nenv) extra_tensors.update(recurrent_tensors) _v_net = value_network if _v_net is None or _v_net == 'shared': vf_latent = policy_latent else: if _v_net == 'copy': _v_net = policy_network else: assert callable(_v_net) with tf.variable_scope('vf', reuse=tf.AUTO_REUSE): # TODO recurrent architectures are not supported with value_network=copy yet vf_latent = _v_net(encoded_x) policy = PolicyWithValue( env=env, observations=X, latent=policy_latent, vf_latent=vf_latent, sess=sess, estimate_q=estimate_q, **extra_tensors ) return policy return policy_fn def _normalize_clip_observation(x, clip_range=[-5.0, 5.0]): rms = RunningMeanStd(shape=x.shape[1:]) norm_x = tf.clip_by_value((x - rms.mean) / rms.std, min(clip_range), max(clip_range)) return norm_x, rms
6,652
34.57754
137
py
baselines
baselines-master/baselines/common/models.py
import numpy as np import tensorflow as tf from baselines.a2c import utils from baselines.a2c.utils import conv, fc, conv_to_fc, batch_to_seq, seq_to_batch from baselines.common.mpi_running_mean_std import RunningMeanStd mapping = {} def register(name): def _thunk(func): mapping[name] = func return func return _thunk def nature_cnn(unscaled_images, **conv_kwargs): """ CNN from Nature paper. """ scaled_images = tf.cast(unscaled_images, tf.float32) / 255. activ = tf.nn.relu h = activ(conv(scaled_images, 'c1', nf=32, rf=8, stride=4, init_scale=np.sqrt(2), **conv_kwargs)) h2 = activ(conv(h, 'c2', nf=64, rf=4, stride=2, init_scale=np.sqrt(2), **conv_kwargs)) h3 = activ(conv(h2, 'c3', nf=64, rf=3, stride=1, init_scale=np.sqrt(2), **conv_kwargs)) h3 = conv_to_fc(h3) return activ(fc(h3, 'fc1', nh=512, init_scale=np.sqrt(2))) def build_impala_cnn(unscaled_images, depths=[16,32,32], **conv_kwargs): """ Model used in the paper "IMPALA: Scalable Distributed Deep-RL with Importance Weighted Actor-Learner Architectures" https://arxiv.org/abs/1802.01561 """ layer_num = 0 def get_layer_num_str(): nonlocal layer_num num_str = str(layer_num) layer_num += 1 return num_str def conv_layer(out, depth): return tf.layers.conv2d(out, depth, 3, padding='same', name='layer_' + get_layer_num_str()) def residual_block(inputs): depth = inputs.get_shape()[-1].value out = tf.nn.relu(inputs) out = conv_layer(out, depth) out = tf.nn.relu(out) out = conv_layer(out, depth) return out + inputs def conv_sequence(inputs, depth): out = conv_layer(inputs, depth) out = tf.layers.max_pooling2d(out, pool_size=3, strides=2, padding='same') out = residual_block(out) out = residual_block(out) return out out = tf.cast(unscaled_images, tf.float32) / 255. for depth in depths: out = conv_sequence(out, depth) out = tf.layers.flatten(out) out = tf.nn.relu(out) out = tf.layers.dense(out, 256, activation=tf.nn.relu, name='layer_' + get_layer_num_str()) return out @register("mlp") def mlp(num_layers=2, num_hidden=64, activation=tf.tanh, layer_norm=False): """ Stack of fully-connected layers to be used in a policy / q-function approximator Parameters: ---------- num_layers: int number of fully-connected layers (default: 2) num_hidden: int size of fully-connected layers (default: 64) activation: activation function (default: tf.tanh) Returns: ------- function that builds fully connected network with a given input tensor / placeholder """ def network_fn(X): h = tf.layers.flatten(X) for i in range(num_layers): h = fc(h, 'mlp_fc{}'.format(i), nh=num_hidden, init_scale=np.sqrt(2)) if layer_norm: h = tf.contrib.layers.layer_norm(h, center=True, scale=True) h = activation(h) return h return network_fn @register("cnn") def cnn(**conv_kwargs): def network_fn(X): return nature_cnn(X, **conv_kwargs) return network_fn @register("impala_cnn") def impala_cnn(**conv_kwargs): def network_fn(X): return build_impala_cnn(X) return network_fn @register("cnn_small") def cnn_small(**conv_kwargs): def network_fn(X): h = tf.cast(X, tf.float32) / 255. activ = tf.nn.relu h = activ(conv(h, 'c1', nf=8, rf=8, stride=4, init_scale=np.sqrt(2), **conv_kwargs)) h = activ(conv(h, 'c2', nf=16, rf=4, stride=2, init_scale=np.sqrt(2), **conv_kwargs)) h = conv_to_fc(h) h = activ(fc(h, 'fc1', nh=128, init_scale=np.sqrt(2))) return h return network_fn @register("lstm") def lstm(nlstm=128, layer_norm=False): """ Builds LSTM (Long-Short Term Memory) network to be used in a policy. Note that the resulting function returns not only the output of the LSTM (i.e. hidden state of lstm for each step in the sequence), but also a dictionary with auxiliary tensors to be set as policy attributes. Specifically, S is a placeholder to feed current state (LSTM state has to be managed outside policy) M is a placeholder for the mask (used to mask out observations after the end of the episode, but can be used for other purposes too) initial_state is a numpy array containing initial lstm state (usually zeros) state is the output LSTM state (to be fed into S at the next call) An example of usage of lstm-based policy can be found here: common/tests/test_doc_examples.py/test_lstm_example Parameters: ---------- nlstm: int LSTM hidden state size layer_norm: bool if True, layer-normalized version of LSTM is used Returns: ------- function that builds LSTM with a given input tensor / placeholder """ def network_fn(X, nenv=1): nbatch = X.shape[0] nsteps = nbatch // nenv h = tf.layers.flatten(X) M = tf.placeholder(tf.float32, [nbatch]) #mask (done t-1) S = tf.placeholder(tf.float32, [nenv, 2*nlstm]) #states xs = batch_to_seq(h, nenv, nsteps) ms = batch_to_seq(M, nenv, nsteps) if layer_norm: h5, snew = utils.lnlstm(xs, ms, S, scope='lnlstm', nh=nlstm) else: h5, snew = utils.lstm(xs, ms, S, scope='lstm', nh=nlstm) h = seq_to_batch(h5) initial_state = np.zeros(S.shape.as_list(), dtype=float) return h, {'S':S, 'M':M, 'state':snew, 'initial_state':initial_state} return network_fn @register("cnn_lstm") def cnn_lstm(nlstm=128, layer_norm=False, conv_fn=nature_cnn, **conv_kwargs): def network_fn(X, nenv=1): nbatch = X.shape[0] nsteps = nbatch // nenv h = conv_fn(X, **conv_kwargs) M = tf.placeholder(tf.float32, [nbatch]) #mask (done t-1) S = tf.placeholder(tf.float32, [nenv, 2*nlstm]) #states xs = batch_to_seq(h, nenv, nsteps) ms = batch_to_seq(M, nenv, nsteps) if layer_norm: h5, snew = utils.lnlstm(xs, ms, S, scope='lnlstm', nh=nlstm) else: h5, snew = utils.lstm(xs, ms, S, scope='lstm', nh=nlstm) h = seq_to_batch(h5) initial_state = np.zeros(S.shape.as_list(), dtype=float) return h, {'S':S, 'M':M, 'state':snew, 'initial_state':initial_state} return network_fn @register("impala_cnn_lstm") def impala_cnn_lstm(): return cnn_lstm(nlstm=256, conv_fn=build_impala_cnn) @register("cnn_lnlstm") def cnn_lnlstm(nlstm=128, **conv_kwargs): return cnn_lstm(nlstm, layer_norm=True, **conv_kwargs) @register("conv_only") def conv_only(convs=[(32, 8, 4), (64, 4, 2), (64, 3, 1)], **conv_kwargs): ''' convolutions-only net Parameters: ---------- conv: list of triples (filter_number, filter_size, stride) specifying parameters for each layer. Returns: function that takes tensorflow tensor as input and returns the output of the last convolutional layer ''' def network_fn(X): out = tf.cast(X, tf.float32) / 255. with tf.variable_scope("convnet"): for num_outputs, kernel_size, stride in convs: out = tf.contrib.layers.convolution2d(out, num_outputs=num_outputs, kernel_size=kernel_size, stride=stride, activation_fn=tf.nn.relu, **conv_kwargs) return out return network_fn def _normalize_clip_observation(x, clip_range=[-5.0, 5.0]): rms = RunningMeanStd(shape=x.shape[1:]) norm_x = tf.clip_by_value((x - rms.mean) / rms.std, min(clip_range), max(clip_range)) return norm_x, rms def get_network_builder(name): """ If you want to register your own network outside models.py, you just need: Usage Example: ------------- from baselines.common.models import register @register("your_network_name") def your_network_define(**net_kwargs): ... return network_fn """ if callable(name): return name elif name in mapping: return mapping[name] else: raise ValueError('Unknown network type: {}'.format(name))
8,557
30.007246
140
py
baselines
baselines-master/baselines/common/mpi_adam_optimizer.py
import numpy as np import tensorflow as tf from baselines.common import tf_util as U from baselines.common.tests.test_with_mpi import with_mpi from baselines import logger try: from mpi4py import MPI except ImportError: MPI = None class MpiAdamOptimizer(tf.train.AdamOptimizer): """Adam optimizer that averages gradients across mpi processes.""" def __init__(self, comm, grad_clip=None, mpi_rank_weight=1, **kwargs): self.comm = comm self.grad_clip = grad_clip self.mpi_rank_weight = mpi_rank_weight tf.train.AdamOptimizer.__init__(self, **kwargs) def compute_gradients(self, loss, var_list, **kwargs): grads_and_vars = tf.train.AdamOptimizer.compute_gradients(self, loss, var_list, **kwargs) grads_and_vars = [(g, v) for g, v in grads_and_vars if g is not None] flat_grad = tf.concat([tf.reshape(g, (-1,)) for g, v in grads_and_vars], axis=0) * self.mpi_rank_weight shapes = [v.shape.as_list() for g, v in grads_and_vars] sizes = [int(np.prod(s)) for s in shapes] total_weight = np.zeros(1, np.float32) self.comm.Allreduce(np.array([self.mpi_rank_weight], dtype=np.float32), total_weight, op=MPI.SUM) total_weight = total_weight[0] buf = np.zeros(sum(sizes), np.float32) countholder = [0] # Counts how many times _collect_grads has been called stat = tf.reduce_sum(grads_and_vars[0][1]) # sum of first variable def _collect_grads(flat_grad, np_stat): if self.grad_clip is not None: gradnorm = np.linalg.norm(flat_grad) if gradnorm > 1: flat_grad /= gradnorm logger.logkv_mean('gradnorm', gradnorm) logger.logkv_mean('gradclipfrac', float(gradnorm > 1)) self.comm.Allreduce(flat_grad, buf, op=MPI.SUM) np.divide(buf, float(total_weight), out=buf) if countholder[0] % 100 == 0: check_synced(np_stat, self.comm) countholder[0] += 1 return buf avg_flat_grad = tf.py_func(_collect_grads, [flat_grad, stat], tf.float32) avg_flat_grad.set_shape(flat_grad.shape) avg_grads = tf.split(avg_flat_grad, sizes, axis=0) avg_grads_and_vars = [(tf.reshape(g, v.shape), v) for g, (_, v) in zip(avg_grads, grads_and_vars)] return avg_grads_and_vars def check_synced(localval, comm=None): """ It's common to forget to initialize your variables to the same values, or (less commonly) if you update them in some other way than adam, to get them out of sync. This function checks that variables on all MPI workers are the same, and raises an AssertionError otherwise Arguments: comm: MPI communicator localval: list of local variables (list of variables on current worker to be compared with the other workers) """ comm = comm or MPI.COMM_WORLD vals = comm.gather(localval) if comm.rank == 0: assert all(val==vals[0] for val in vals[1:]),\ 'MpiAdamOptimizer detected that different workers have different weights: {}'.format(vals) @with_mpi(timeout=5) def test_nonfreeze(): np.random.seed(0) tf.set_random_seed(0) a = tf.Variable(np.random.randn(3).astype('float32')) b = tf.Variable(np.random.randn(2,5).astype('float32')) loss = tf.reduce_sum(tf.square(a)) + tf.reduce_sum(tf.sin(b)) stepsize = 1e-2 # for some reason the session config with inter_op_parallelism_threads was causing # nested sess.run calls to freeze config = tf.ConfigProto(inter_op_parallelism_threads=1) sess = U.get_session(config=config) update_op = MpiAdamOptimizer(comm=MPI.COMM_WORLD, learning_rate=stepsize).minimize(loss) sess.run(tf.global_variables_initializer()) losslist_ref = [] for i in range(100): l,_ = sess.run([loss, update_op]) print(i, l) losslist_ref.append(l)
3,976
42.703297
117
py
baselines
baselines-master/baselines/common/__init__.py
# flake8: noqa F403 from baselines.common.console_util import * from baselines.common.dataset import Dataset from baselines.common.math_util import * from baselines.common.misc_util import *
191
31
44
py
baselines
baselines-master/baselines/common/mpi_moments.py
from mpi4py import MPI import numpy as np from baselines.common import zipsame def mpi_mean(x, axis=0, comm=None, keepdims=False): x = np.asarray(x) assert x.ndim > 0 if comm is None: comm = MPI.COMM_WORLD xsum = x.sum(axis=axis, keepdims=keepdims) n = xsum.size localsum = np.zeros(n+1, x.dtype) localsum[:n] = xsum.ravel() localsum[n] = x.shape[axis] # globalsum = np.zeros_like(localsum) # comm.Allreduce(localsum, globalsum, op=MPI.SUM) globalsum = comm.allreduce(localsum, op=MPI.SUM) return globalsum[:n].reshape(xsum.shape) / globalsum[n], globalsum[n] def mpi_moments(x, axis=0, comm=None, keepdims=False): x = np.asarray(x) assert x.ndim > 0 mean, count = mpi_mean(x, axis=axis, comm=comm, keepdims=True) sqdiffs = np.square(x - mean) meansqdiff, count1 = mpi_mean(sqdiffs, axis=axis, comm=comm, keepdims=True) assert count1 == count std = np.sqrt(meansqdiff) if not keepdims: newshape = mean.shape[:axis] + mean.shape[axis+1:] mean = mean.reshape(newshape) std = std.reshape(newshape) return mean, std, count def test_runningmeanstd(): import subprocess subprocess.check_call(['mpirun', '-np', '3', 'python','-c', 'from baselines.common.mpi_moments import _helper_runningmeanstd; _helper_runningmeanstd()']) def _helper_runningmeanstd(): comm = MPI.COMM_WORLD np.random.seed(0) for (triple,axis) in [ ((np.random.randn(3), np.random.randn(4), np.random.randn(5)),0), ((np.random.randn(3,2), np.random.randn(4,2), np.random.randn(5,2)),0), ((np.random.randn(2,3), np.random.randn(2,4), np.random.randn(2,4)),1), ]: x = np.concatenate(triple, axis=axis) ms1 = [x.mean(axis=axis), x.std(axis=axis), x.shape[axis]] ms2 = mpi_moments(triple[comm.Get_rank()],axis=axis) for (a1,a2) in zipsame(ms1, ms2): print(a1, a2) assert np.allclose(a1, a2) print("ok!")
2,018
31.564516
101
py
baselines
baselines-master/baselines/common/console_util.py
from __future__ import print_function from contextlib import contextmanager import numpy as np import time import shlex import subprocess # ================================================================ # Misc # ================================================================ def fmt_row(width, row, header=False): out = " | ".join(fmt_item(x, width) for x in row) if header: out = out + "\n" + "-"*len(out) return out def fmt_item(x, l): if isinstance(x, np.ndarray): assert x.ndim==0 x = x.item() if isinstance(x, (float, np.float32, np.float64)): v = abs(x) if (v < 1e-4 or v > 1e+4) and v > 0: rep = "%7.2e" % x else: rep = "%7.5f" % x else: rep = str(x) return " "*(l - len(rep)) + rep color2num = dict( gray=30, red=31, green=32, yellow=33, blue=34, magenta=35, cyan=36, white=37, crimson=38 ) def colorize(string, color='green', bold=False, highlight=False): attr = [] num = color2num[color] if highlight: num += 10 attr.append(str(num)) if bold: attr.append('1') return '\x1b[%sm%s\x1b[0m' % (';'.join(attr), string) def print_cmd(cmd, dry=False): if isinstance(cmd, str): # for shell=True pass else: cmd = ' '.join(shlex.quote(arg) for arg in cmd) print(colorize(('CMD: ' if not dry else 'DRY: ') + cmd)) def get_git_commit(cwd=None): return subprocess.check_output(['git', 'rev-parse', '--short', 'HEAD'], cwd=cwd).decode('utf8') def get_git_commit_message(cwd=None): return subprocess.check_output(['git', 'show', '-s', '--format=%B', 'HEAD'], cwd=cwd).decode('utf8') def ccap(cmd, dry=False, env=None, **kwargs): print_cmd(cmd, dry) if not dry: subprocess.check_call(cmd, env=env, **kwargs) MESSAGE_DEPTH = 0 @contextmanager def timed(msg): global MESSAGE_DEPTH #pylint: disable=W0603 print(colorize('\t'*MESSAGE_DEPTH + '=: ' + msg, color='magenta')) tstart = time.time() MESSAGE_DEPTH += 1 yield MESSAGE_DEPTH -= 1 print(colorize('\t'*MESSAGE_DEPTH + "done in %.3f seconds"%(time.time() - tstart), color='magenta'))
2,179
25.91358
104
py
baselines
baselines-master/baselines/common/cmd_util.py
""" Helpers for scripts like run_atari.py. """ import os try: from mpi4py import MPI except ImportError: MPI = None import gym from gym.wrappers import FlattenObservation, FilterObservation from baselines import logger from baselines.bench import Monitor from baselines.common import set_global_seeds from baselines.common.atari_wrappers import make_atari, wrap_deepmind from baselines.common.vec_env.subproc_vec_env import SubprocVecEnv from baselines.common.vec_env.dummy_vec_env import DummyVecEnv from baselines.common import retro_wrappers from baselines.common.wrappers import ClipActionsWrapper def make_vec_env(env_id, env_type, num_env, seed, wrapper_kwargs=None, env_kwargs=None, start_index=0, reward_scale=1.0, flatten_dict_observations=True, gamestate=None, initializer=None, force_dummy=False): """ Create a wrapped, monitored SubprocVecEnv for Atari and MuJoCo. """ wrapper_kwargs = wrapper_kwargs or {} env_kwargs = env_kwargs or {} mpi_rank = MPI.COMM_WORLD.Get_rank() if MPI else 0 seed = seed + 10000 * mpi_rank if seed is not None else None logger_dir = logger.get_dir() def make_thunk(rank, initializer=None): return lambda: make_env( env_id=env_id, env_type=env_type, mpi_rank=mpi_rank, subrank=rank, seed=seed, reward_scale=reward_scale, gamestate=gamestate, flatten_dict_observations=flatten_dict_observations, wrapper_kwargs=wrapper_kwargs, env_kwargs=env_kwargs, logger_dir=logger_dir, initializer=initializer ) set_global_seeds(seed) if not force_dummy and num_env > 1: return SubprocVecEnv([make_thunk(i + start_index, initializer=initializer) for i in range(num_env)]) else: return DummyVecEnv([make_thunk(i + start_index, initializer=None) for i in range(num_env)]) def make_env(env_id, env_type, mpi_rank=0, subrank=0, seed=None, reward_scale=1.0, gamestate=None, flatten_dict_observations=True, wrapper_kwargs=None, env_kwargs=None, logger_dir=None, initializer=None): if initializer is not None: initializer(mpi_rank=mpi_rank, subrank=subrank) wrapper_kwargs = wrapper_kwargs or {} env_kwargs = env_kwargs or {} if ':' in env_id: import re import importlib module_name = re.sub(':.*','',env_id) env_id = re.sub('.*:', '', env_id) importlib.import_module(module_name) if env_type == 'atari': env = make_atari(env_id) elif env_type == 'retro': import retro gamestate = gamestate or retro.State.DEFAULT env = retro_wrappers.make_retro(game=env_id, max_episode_steps=10000, use_restricted_actions=retro.Actions.DISCRETE, state=gamestate) else: env = gym.make(env_id, **env_kwargs) if flatten_dict_observations and isinstance(env.observation_space, gym.spaces.Dict): env = FlattenObservation(env) env.seed(seed + subrank if seed is not None else None) env = Monitor(env, logger_dir and os.path.join(logger_dir, str(mpi_rank) + '.' + str(subrank)), allow_early_resets=True) if env_type == 'atari': env = wrap_deepmind(env, **wrapper_kwargs) elif env_type == 'retro': if 'frame_stack' not in wrapper_kwargs: wrapper_kwargs['frame_stack'] = 1 env = retro_wrappers.wrap_deepmind_retro(env, **wrapper_kwargs) if isinstance(env.action_space, gym.spaces.Box): env = ClipActionsWrapper(env) if reward_scale != 1: env = retro_wrappers.RewardScaler(env, reward_scale) return env def make_mujoco_env(env_id, seed, reward_scale=1.0): """ Create a wrapped, monitored gym.Env for MuJoCo. """ rank = MPI.COMM_WORLD.Get_rank() myseed = seed + 1000 * rank if seed is not None else None set_global_seeds(myseed) env = gym.make(env_id) logger_path = None if logger.get_dir() is None else os.path.join(logger.get_dir(), str(rank)) env = Monitor(env, logger_path, allow_early_resets=True) env.seed(seed) if reward_scale != 1.0: from baselines.common.retro_wrappers import RewardScaler env = RewardScaler(env, reward_scale) return env def make_robotics_env(env_id, seed, rank=0): """ Create a wrapped, monitored gym.Env for MuJoCo. """ set_global_seeds(seed) env = gym.make(env_id) env = FlattenObservation(FilterObservation(env, ['observation', 'desired_goal'])) env = Monitor( env, logger.get_dir() and os.path.join(logger.get_dir(), str(rank)), info_keywords=('is_success',)) env.seed(seed) return env def arg_parser(): """ Create an empty argparse.ArgumentParser. """ import argparse return argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter) def atari_arg_parser(): """ Create an argparse.ArgumentParser for run_atari.py. """ print('Obsolete - use common_arg_parser instead') return common_arg_parser() def mujoco_arg_parser(): print('Obsolete - use common_arg_parser instead') return common_arg_parser() def common_arg_parser(): """ Create an argparse.ArgumentParser for run_mujoco.py. """ parser = arg_parser() parser.add_argument('--env', help='environment ID', type=str, default='Reacher-v2') parser.add_argument('--env_type', help='type of environment, used when the environment type cannot be automatically determined', type=str) parser.add_argument('--seed', help='RNG seed', type=int, default=None) parser.add_argument('--alg', help='Algorithm', type=str, default='ppo2') parser.add_argument('--num_timesteps', type=float, default=1e6), parser.add_argument('--network', help='network type (mlp, cnn, lstm, cnn_lstm, conv_only)', default=None) parser.add_argument('--gamestate', help='game state to load (so far only used in retro games)', default=None) parser.add_argument('--num_env', help='Number of environment copies being run in parallel. When not specified, set to number of cpus for Atari, and to 1 for Mujoco', default=None, type=int) parser.add_argument('--reward_scale', help='Reward scale factor. Default: 1.0', default=1.0, type=float) parser.add_argument('--save_path', help='Path to save trained model to', default=None, type=str) parser.add_argument('--save_video_interval', help='Save video every x steps (0 = disabled)', default=0, type=int) parser.add_argument('--save_video_length', help='Length of recorded video. Default: 200', default=200, type=int) parser.add_argument('--log_path', help='Directory to save learning curve data.', default=None, type=str) parser.add_argument('--play', default=False, action='store_true') return parser def robotics_arg_parser(): """ Create an argparse.ArgumentParser for run_mujoco.py. """ parser = arg_parser() parser.add_argument('--env', help='environment ID', type=str, default='FetchReach-v0') parser.add_argument('--seed', help='RNG seed', type=int, default=None) parser.add_argument('--num-timesteps', type=int, default=int(1e6)) return parser def parse_unknown_args(args): """ Parse arguments not consumed by arg parser into a dictionary """ retval = {} preceded_by_key = False for arg in args: if arg.startswith('--'): if '=' in arg: key = arg.split('=')[0][2:] value = arg.split('=')[1] retval[key] = value else: key = arg[2:] preceded_by_key = True elif preceded_by_key: retval[key] = arg preceded_by_key = False return retval
7,922
37.275362
204
py
baselines
baselines-master/baselines/common/input.py
import numpy as np import tensorflow as tf from gym.spaces import Discrete, Box, MultiDiscrete def observation_placeholder(ob_space, batch_size=None, name='Ob'): ''' Create placeholder to feed observations into of the size appropriate to the observation space Parameters: ---------- ob_space: gym.Space observation space batch_size: int size of the batch to be fed into input. Can be left None in most cases. name: str name of the placeholder Returns: ------- tensorflow placeholder tensor ''' assert isinstance(ob_space, Discrete) or isinstance(ob_space, Box) or isinstance(ob_space, MultiDiscrete), \ 'Can only deal with Discrete and Box observation spaces for now' dtype = ob_space.dtype if dtype == np.int8: dtype = np.uint8 return tf.placeholder(shape=(batch_size,) + ob_space.shape, dtype=dtype, name=name) def observation_input(ob_space, batch_size=None, name='Ob'): ''' Create placeholder to feed observations into of the size appropriate to the observation space, and add input encoder of the appropriate type. ''' placeholder = observation_placeholder(ob_space, batch_size, name) return placeholder, encode_observation(ob_space, placeholder) def encode_observation(ob_space, placeholder): ''' Encode input in the way that is appropriate to the observation space Parameters: ---------- ob_space: gym.Space observation space placeholder: tf.placeholder observation input placeholder ''' if isinstance(ob_space, Discrete): return tf.to_float(tf.one_hot(placeholder, ob_space.n)) elif isinstance(ob_space, Box): return tf.to_float(placeholder) elif isinstance(ob_space, MultiDiscrete): placeholder = tf.cast(placeholder, tf.int32) one_hots = [tf.to_float(tf.one_hot(placeholder[..., i], ob_space.nvec[i])) for i in range(placeholder.shape[-1])] return tf.concat(one_hots, axis=-1) else: raise NotImplementedError
2,071
30.876923
121
py
baselines
baselines-master/baselines/common/plot_util.py
import matplotlib.pyplot as plt import os.path as osp import json import os import numpy as np import pandas from collections import defaultdict, namedtuple from baselines.bench import monitor from baselines.logger import read_json, read_csv def smooth(y, radius, mode='two_sided', valid_only=False): ''' Smooth signal y, where radius is determines the size of the window mode='twosided': average over the window [max(index - radius, 0), min(index + radius, len(y)-1)] mode='causal': average over the window [max(index - radius, 0), index] valid_only: put nan in entries where the full-sized window is not available ''' assert mode in ('two_sided', 'causal') if len(y) < 2*radius+1: return np.ones_like(y) * y.mean() elif mode == 'two_sided': convkernel = np.ones(2 * radius+1) out = np.convolve(y, convkernel,mode='same') / np.convolve(np.ones_like(y), convkernel, mode='same') if valid_only: out[:radius] = out[-radius:] = np.nan elif mode == 'causal': convkernel = np.ones(radius) out = np.convolve(y, convkernel,mode='full') / np.convolve(np.ones_like(y), convkernel, mode='full') out = out[:-radius+1] if valid_only: out[:radius] = np.nan return out def one_sided_ema(xolds, yolds, low=None, high=None, n=512, decay_steps=1., low_counts_threshold=1e-8): ''' perform one-sided (causal) EMA (exponential moving average) smoothing and resampling to an even grid with n points. Does not do extrapolation, so we assume xolds[0] <= low && high <= xolds[-1] Arguments: xolds: array or list - x values of data. Needs to be sorted in ascending order yolds: array of list - y values of data. Has to have the same length as xolds low: float - min value of the new x grid. By default equals to xolds[0] high: float - max value of the new x grid. By default equals to xolds[-1] n: int - number of points in new x grid decay_steps: float - EMA decay factor, expressed in new x grid steps. low_counts_threshold: float or int - y values with counts less than this value will be set to NaN Returns: tuple sum_ys, count_ys where xs - array with new x grid ys - array of EMA of y at each point of the new x grid count_ys - array of EMA of y counts at each point of the new x grid ''' low = xolds[0] if low is None else low high = xolds[-1] if high is None else high assert xolds[0] <= low, 'low = {} < xolds[0] = {} - extrapolation not permitted!'.format(low, xolds[0]) assert xolds[-1] >= high, 'high = {} > xolds[-1] = {} - extrapolation not permitted!'.format(high, xolds[-1]) assert len(xolds) == len(yolds), 'length of xolds ({}) and yolds ({}) do not match!'.format(len(xolds), len(yolds)) xolds = xolds.astype('float64') yolds = yolds.astype('float64') luoi = 0 # last unused old index sum_y = 0. count_y = 0. xnews = np.linspace(low, high, n) decay_period = (high - low) / (n - 1) * decay_steps interstep_decay = np.exp(- 1. / decay_steps) sum_ys = np.zeros_like(xnews) count_ys = np.zeros_like(xnews) for i in range(n): xnew = xnews[i] sum_y *= interstep_decay count_y *= interstep_decay while True: if luoi >= len(xolds): break xold = xolds[luoi] if xold <= xnew: decay = np.exp(- (xnew - xold) / decay_period) sum_y += decay * yolds[luoi] count_y += decay luoi += 1 else: break sum_ys[i] = sum_y count_ys[i] = count_y ys = sum_ys / count_ys ys[count_ys < low_counts_threshold] = np.nan return xnews, ys, count_ys def symmetric_ema(xolds, yolds, low=None, high=None, n=512, decay_steps=1., low_counts_threshold=1e-8): ''' perform symmetric EMA (exponential moving average) smoothing and resampling to an even grid with n points. Does not do extrapolation, so we assume xolds[0] <= low && high <= xolds[-1] Arguments: xolds: array or list - x values of data. Needs to be sorted in ascending order yolds: array of list - y values of data. Has to have the same length as xolds low: float - min value of the new x grid. By default equals to xolds[0] high: float - max value of the new x grid. By default equals to xolds[-1] n: int - number of points in new x grid decay_steps: float - EMA decay factor, expressed in new x grid steps. low_counts_threshold: float or int - y values with counts less than this value will be set to NaN Returns: tuple sum_ys, count_ys where xs - array with new x grid ys - array of EMA of y at each point of the new x grid count_ys - array of EMA of y counts at each point of the new x grid ''' xs, ys1, count_ys1 = one_sided_ema(xolds, yolds, low, high, n, decay_steps, low_counts_threshold=0) _, ys2, count_ys2 = one_sided_ema(-xolds[::-1], yolds[::-1], -high, -low, n, decay_steps, low_counts_threshold=0) ys2 = ys2[::-1] count_ys2 = count_ys2[::-1] count_ys = count_ys1 + count_ys2 ys = (ys1 * count_ys1 + ys2 * count_ys2) / count_ys ys[count_ys < low_counts_threshold] = np.nan return xs, ys, count_ys Result = namedtuple('Result', 'monitor progress dirname metadata') Result.__new__.__defaults__ = (None,) * len(Result._fields) def load_results(root_dir_or_dirs, enable_progress=True, enable_monitor=True, verbose=False): ''' load summaries of runs from a list of directories (including subdirectories) Arguments: enable_progress: bool - if True, will attempt to load data from progress.csv files (data saved by logger). Default: True enable_monitor: bool - if True, will attempt to load data from monitor.csv files (data saved by Monitor environment wrapper). Default: True verbose: bool - if True, will print out list of directories from which the data is loaded. Default: False Returns: List of Result objects with the following fields: - dirname - path to the directory data was loaded from - metadata - run metadata (such as command-line arguments and anything else in metadata.json file - monitor - if enable_monitor is True, this field contains pandas dataframe with loaded monitor.csv file (or aggregate of all *.monitor.csv files in the directory) - progress - if enable_progress is True, this field contains pandas dataframe with loaded progress.csv file ''' import re if isinstance(root_dir_or_dirs, str): rootdirs = [osp.expanduser(root_dir_or_dirs)] else: rootdirs = [osp.expanduser(d) for d in root_dir_or_dirs] allresults = [] for rootdir in rootdirs: assert osp.exists(rootdir), "%s doesn't exist"%rootdir for dirname, dirs, files in os.walk(rootdir): if '-proc' in dirname: files[:] = [] continue monitor_re = re.compile(r'(\d+\.)?(\d+\.)?monitor\.csv') if set(['metadata.json', 'monitor.json', 'progress.json', 'progress.csv']).intersection(files) or \ any([f for f in files if monitor_re.match(f)]): # also match monitor files like 0.1.monitor.csv # used to be uncommented, which means do not go deeper than current directory if any of the data files # are found # dirs[:] = [] result = {'dirname' : dirname} if "metadata.json" in files: with open(osp.join(dirname, "metadata.json"), "r") as fh: result['metadata'] = json.load(fh) progjson = osp.join(dirname, "progress.json") progcsv = osp.join(dirname, "progress.csv") if enable_progress: if osp.exists(progjson): result['progress'] = pandas.DataFrame(read_json(progjson)) elif osp.exists(progcsv): try: result['progress'] = read_csv(progcsv) except pandas.errors.EmptyDataError: print('skipping progress file in ', dirname, 'empty data') else: if verbose: print('skipping %s: no progress file'%dirname) if enable_monitor: try: result['monitor'] = pandas.DataFrame(monitor.load_results(dirname)) except monitor.LoadMonitorResultsError: print('skipping %s: no monitor files'%dirname) except Exception as e: print('exception loading monitor file in %s: %s'%(dirname, e)) if result.get('monitor') is not None or result.get('progress') is not None: allresults.append(Result(**result)) if verbose: print('successfully loaded %s'%dirname) if verbose: print('loaded %i results'%len(allresults)) return allresults COLORS = ['blue', 'green', 'red', 'cyan', 'magenta', 'yellow', 'black', 'purple', 'pink', 'brown', 'orange', 'teal', 'lightblue', 'lime', 'lavender', 'turquoise', 'darkgreen', 'tan', 'salmon', 'gold', 'darkred', 'darkblue'] def default_xy_fn(r): x = np.cumsum(r.monitor.l) y = smooth(r.monitor.r, radius=10) return x,y def default_split_fn(r): import re # match name between slash and -<digits> at the end of the string # (slash in the beginning or -<digits> in the end or either may be missing) match = re.search(r'[^/-]+(?=(-\d+)?\Z)', r.dirname) if match: return match.group(0) def plot_results( allresults, *, xy_fn=default_xy_fn, split_fn=default_split_fn, group_fn=default_split_fn, average_group=False, shaded_std=True, shaded_err=True, figsize=None, legend_outside=False, resample=0, smooth_step=1.0, tiling='vertical', xlabel=None, ylabel=None ): ''' Plot multiple Results objects xy_fn: function Result -> x,y - function that converts results objects into tuple of x and y values. By default, x is cumsum of episode lengths, and y is episode rewards split_fn: function Result -> hashable - function that converts results objects into keys to split curves into sub-panels by. That is, the results r for which split_fn(r) is different will be put on different sub-panels. By default, the portion of r.dirname between last / and -<digits> is returned. The sub-panels are stacked vertically in the figure. group_fn: function Result -> hashable - function that converts results objects into keys to group curves by. That is, the results r for which group_fn(r) is the same will be put into the same group. Curves in the same group have the same color (if average_group is False), or averaged over (if average_group is True). The default value is the same as default value for split_fn average_group: bool - if True, will average the curves in the same group and plot the mean. Enables resampling (if resample = 0, will use 512 steps) shaded_std: bool - if True (default), the shaded region corresponding to standard deviation of the group of curves will be shown (only applicable if average_group = True) shaded_err: bool - if True (default), the shaded region corresponding to error in mean estimate of the group of curves (that is, standard deviation divided by square root of number of curves) will be shown (only applicable if average_group = True) figsize: tuple or None - size of the resulting figure (including sub-panels). By default, width is 6 and height is 6 times number of sub-panels. legend_outside: bool - if True, will place the legend outside of the sub-panels. resample: int - if not zero, size of the uniform grid in x direction to resample onto. Resampling is performed via symmetric EMA smoothing (see the docstring for symmetric_ema). Default is zero (no resampling). Note that if average_group is True, resampling is necessary; in that case, default value is 512. smooth_step: float - when resampling (i.e. when resample > 0 or average_group is True), use this EMA decay parameter (in units of the new grid step). See docstrings for decay_steps in symmetric_ema or one_sided_ema functions. ''' if split_fn is None: split_fn = lambda _ : '' if group_fn is None: group_fn = lambda _ : '' sk2r = defaultdict(list) # splitkey2results for result in allresults: splitkey = split_fn(result) sk2r[splitkey].append(result) assert len(sk2r) > 0 assert isinstance(resample, int), "0: don't resample. <integer>: that many samples" if tiling == 'vertical' or tiling is None: nrows = len(sk2r) ncols = 1 elif tiling == 'horizontal': ncols = len(sk2r) nrows = 1 elif tiling == 'symmetric': import math N = len(sk2r) largest_divisor = 1 for i in range(1, int(math.sqrt(N))+1): if N % i == 0: largest_divisor = i ncols = largest_divisor nrows = N // ncols figsize = figsize or (6 * ncols, 6 * nrows) f, axarr = plt.subplots(nrows, ncols, sharex=False, squeeze=False, figsize=figsize) groups = list(set(group_fn(result) for result in allresults)) default_samples = 512 if average_group: resample = resample or default_samples for (isplit, sk) in enumerate(sorted(sk2r.keys())): g2l = {} g2c = defaultdict(int) sresults = sk2r[sk] gresults = defaultdict(list) idx_row = isplit // ncols idx_col = isplit % ncols ax = axarr[idx_row][idx_col] for result in sresults: group = group_fn(result) g2c[group] += 1 x, y = xy_fn(result) if x is None: x = np.arange(len(y)) x, y = map(np.asarray, (x, y)) if average_group: gresults[group].append((x,y)) else: if resample: x, y, counts = symmetric_ema(x, y, x[0], x[-1], resample, decay_steps=smooth_step) l, = ax.plot(x, y, color=COLORS[groups.index(group) % len(COLORS)]) g2l[group] = l if average_group: for group in sorted(groups): xys = gresults[group] if not any(xys): continue color = COLORS[groups.index(group) % len(COLORS)] origxs = [xy[0] for xy in xys] minxlen = min(map(len, origxs)) def allequal(qs): return all((q==qs[0]).all() for q in qs[1:]) if resample: low = max(x[0] for x in origxs) high = min(x[-1] for x in origxs) usex = np.linspace(low, high, resample) ys = [] for (x, y) in xys: ys.append(symmetric_ema(x, y, low, high, resample, decay_steps=smooth_step)[1]) else: assert allequal([x[:minxlen] for x in origxs]),\ 'If you want to average unevenly sampled data, set resample=<number of samples you want>' usex = origxs[0] ys = [xy[1][:minxlen] for xy in xys] ymean = np.mean(ys, axis=0) ystd = np.std(ys, axis=0) ystderr = ystd / np.sqrt(len(ys)) l, = axarr[idx_row][idx_col].plot(usex, ymean, color=color) g2l[group] = l if shaded_err: ax.fill_between(usex, ymean - ystderr, ymean + ystderr, color=color, alpha=.4) if shaded_std: ax.fill_between(usex, ymean - ystd, ymean + ystd, color=color, alpha=.2) # https://matplotlib.org/users/legend_guide.html plt.tight_layout() if any(g2l.keys()): ax.legend( g2l.values(), ['%s (%i)'%(g, g2c[g]) for g in g2l] if average_group else g2l.keys(), loc=2 if legend_outside else None, bbox_to_anchor=(1,1) if legend_outside else None) ax.set_title(sk) # add xlabels, but only to the bottom row if xlabel is not None: for ax in axarr[-1]: plt.sca(ax) plt.xlabel(xlabel) # add ylabels, but only to left column if ylabel is not None: for ax in axarr[:,0]: plt.sca(ax) plt.ylabel(ylabel) return f, axarr def regression_analysis(df): xcols = list(df.columns.copy()) xcols.remove('score') ycols = ['score'] import statsmodels.api as sm mod = sm.OLS(df[ycols], sm.add_constant(df[xcols]), hasconst=False) res = mod.fit() print(res.summary()) def test_smooth(): norig = 100 nup = 300 ndown = 30 xs = np.cumsum(np.random.rand(norig) * 10 / norig) yclean = np.sin(xs) ys = yclean + .1 * np.random.randn(yclean.size) xup, yup, _ = symmetric_ema(xs, ys, xs.min(), xs.max(), nup, decay_steps=nup/ndown) xdown, ydown, _ = symmetric_ema(xs, ys, xs.min(), xs.max(), ndown, decay_steps=ndown/ndown) xsame, ysame, _ = symmetric_ema(xs, ys, xs.min(), xs.max(), norig, decay_steps=norig/ndown) plt.plot(xs, ys, label='orig', marker='x') plt.plot(xup, yup, label='up', marker='x') plt.plot(xdown, ydown, label='down', marker='x') plt.plot(xsame, ysame, label='same', marker='x') plt.plot(xs, yclean, label='clean', marker='x') plt.legend() plt.show()
18,930
42.51954
174
py
baselines
baselines-master/baselines/common/tests/test_env_after_learn.py
import pytest import gym import tensorflow as tf from baselines.common.vec_env.subproc_vec_env import SubprocVecEnv from baselines.run import get_learn_function from baselines.common.tf_util import make_session algos = ['a2c', 'acer', 'acktr', 'deepq', 'ppo2', 'trpo_mpi'] @pytest.mark.parametrize('algo', algos) def test_env_after_learn(algo): def make_env(): # acktr requires too much RAM, fails on travis env = gym.make('CartPole-v1' if algo == 'acktr' else 'PongNoFrameskip-v4') return env make_session(make_default=True, graph=tf.Graph()) env = SubprocVecEnv([make_env]) learn = get_learn_function(algo) # Commenting out the following line resolves the issue, though crash happens at env.reset(). learn(network='mlp', env=env, total_timesteps=0, load_path=None, seed=None) env.reset() env.close()
865
29.928571
96
py
baselines
baselines-master/baselines/common/tests/test_fetchreach.py
import pytest import gym from baselines.run import get_learn_function from baselines.common.tests.util import reward_per_episode_test from baselines.common.tests import mark_slow pytest.importorskip('mujoco_py') common_kwargs = dict( network='mlp', seed=0, ) learn_kwargs = { 'her': dict(total_timesteps=2000) } @mark_slow @pytest.mark.parametrize("alg", learn_kwargs.keys()) def test_fetchreach(alg): ''' Test if the algorithm (with an mlp policy) can learn the FetchReach task ''' kwargs = common_kwargs.copy() kwargs.update(learn_kwargs[alg]) learn_fn = lambda e: get_learn_function(alg)(env=e, **kwargs) def env_fn(): env = gym.make('FetchReach-v1') env.seed(0) return env reward_per_episode_test(env_fn, learn_fn, -15) if __name__ == '__main__': test_fetchreach('her')
860
20
65
py
baselines
baselines-master/baselines/common/tests/test_with_mpi.py
import os import sys import subprocess import cloudpickle import base64 import pytest from functools import wraps try: from mpi4py import MPI except ImportError: MPI = None def with_mpi(nproc=2, timeout=30, skip_if_no_mpi=True): def outer_thunk(fn): @wraps(fn) def thunk(*args, **kwargs): serialized_fn = base64.b64encode(cloudpickle.dumps(lambda: fn(*args, **kwargs))) subprocess.check_call([ 'mpiexec','-n', str(nproc), sys.executable, '-m', 'baselines.common.tests.test_with_mpi', serialized_fn ], env=os.environ, timeout=timeout) if skip_if_no_mpi: return pytest.mark.skipif(MPI is None, reason="MPI not present")(thunk) else: return thunk return outer_thunk if __name__ == '__main__': if len(sys.argv) > 1: fn = cloudpickle.loads(base64.b64decode(sys.argv[1])) assert callable(fn) fn()
997
24.589744
92
py
baselines
baselines-master/baselines/common/tests/test_tf_util.py
# tests for tf_util import tensorflow as tf from baselines.common.tf_util import ( function, initialize, single_threaded_session ) def test_function(): with tf.Graph().as_default(): x = tf.placeholder(tf.int32, (), name="x") y = tf.placeholder(tf.int32, (), name="y") z = 3 * x + 2 * y lin = function([x, y], z, givens={y: 0}) with single_threaded_session(): initialize() assert lin(2) == 6 assert lin(x=3) == 9 assert lin(2, 2) == 10 assert lin(x=2, y=3) == 12 def test_multikwargs(): with tf.Graph().as_default(): x = tf.placeholder(tf.int32, (), name="x") with tf.variable_scope("other"): x2 = tf.placeholder(tf.int32, (), name="x") z = 3 * x + 2 * x2 lin = function([x, x2], z, givens={x2: 0}) with single_threaded_session(): initialize() assert lin(2) == 6 assert lin(2, 2) == 10 if __name__ == '__main__': test_function() test_multikwargs()
1,072
23.953488
55
py
baselines
baselines-master/baselines/common/tests/test_schedules.py
import numpy as np from baselines.common.schedules import ConstantSchedule, PiecewiseSchedule def test_piecewise_schedule(): ps = PiecewiseSchedule([(-5, 100), (5, 200), (10, 50), (100, 50), (200, -50)], outside_value=500) assert np.isclose(ps.value(-10), 500) assert np.isclose(ps.value(0), 150) assert np.isclose(ps.value(5), 200) assert np.isclose(ps.value(9), 80) assert np.isclose(ps.value(50), 50) assert np.isclose(ps.value(80), 50) assert np.isclose(ps.value(150), 0) assert np.isclose(ps.value(175), -25) assert np.isclose(ps.value(201), 500) assert np.isclose(ps.value(500), 500) assert np.isclose(ps.value(200 - 1e-10), -50) def test_constant_schedule(): cs = ConstantSchedule(5) for i in range(-100, 100): assert np.isclose(cs.value(i), 5)
823
29.518519
101
py
baselines
baselines-master/baselines/common/tests/test_identity.py
import pytest from baselines.common.tests.envs.identity_env import DiscreteIdentityEnv, BoxIdentityEnv, MultiDiscreteIdentityEnv from baselines.run import get_learn_function from baselines.common.tests.util import simple_test from baselines.common.tests import mark_slow common_kwargs = dict( total_timesteps=30000, network='mlp', gamma=0.9, seed=0, ) learn_kwargs = { 'a2c' : {}, 'acktr': {}, 'deepq': {}, 'ddpg': dict(layer_norm=True), 'ppo2': dict(lr=1e-3, nsteps=64, ent_coef=0.0), 'trpo_mpi': dict(timesteps_per_batch=100, cg_iters=10, gamma=0.9, lam=1.0, max_kl=0.01) } algos_disc = ['a2c', 'acktr', 'deepq', 'ppo2', 'trpo_mpi'] algos_multidisc = ['a2c', 'acktr', 'ppo2', 'trpo_mpi'] algos_cont = ['a2c', 'acktr', 'ddpg', 'ppo2', 'trpo_mpi'] @mark_slow @pytest.mark.parametrize("alg", algos_disc) def test_discrete_identity(alg): ''' Test if the algorithm (with an mlp policy) can learn an identity transformation (i.e. return observation as an action) ''' kwargs = learn_kwargs[alg] kwargs.update(common_kwargs) learn_fn = lambda e: get_learn_function(alg)(env=e, **kwargs) env_fn = lambda: DiscreteIdentityEnv(10, episode_len=100) simple_test(env_fn, learn_fn, 0.9) @mark_slow @pytest.mark.parametrize("alg", algos_multidisc) def test_multidiscrete_identity(alg): ''' Test if the algorithm (with an mlp policy) can learn an identity transformation (i.e. return observation as an action) ''' kwargs = learn_kwargs[alg] kwargs.update(common_kwargs) learn_fn = lambda e: get_learn_function(alg)(env=e, **kwargs) env_fn = lambda: MultiDiscreteIdentityEnv((3,3), episode_len=100) simple_test(env_fn, learn_fn, 0.9) @mark_slow @pytest.mark.parametrize("alg", algos_cont) def test_continuous_identity(alg): ''' Test if the algorithm (with an mlp policy) can learn an identity transformation (i.e. return observation as an action) to a required precision ''' kwargs = learn_kwargs[alg] kwargs.update(common_kwargs) learn_fn = lambda e: get_learn_function(alg)(env=e, **kwargs) env_fn = lambda: BoxIdentityEnv((1,), episode_len=100) simple_test(env_fn, learn_fn, -0.1) if __name__ == '__main__': test_multidiscrete_identity('acktr')
2,304
28.935065
114
py
baselines
baselines-master/baselines/common/tests/test_segment_tree.py
import numpy as np from baselines.common.segment_tree import SumSegmentTree, MinSegmentTree def test_tree_set(): tree = SumSegmentTree(4) tree[2] = 1.0 tree[3] = 3.0 assert np.isclose(tree.sum(), 4.0) assert np.isclose(tree.sum(0, 2), 0.0) assert np.isclose(tree.sum(0, 3), 1.0) assert np.isclose(tree.sum(2, 3), 1.0) assert np.isclose(tree.sum(2, -1), 1.0) assert np.isclose(tree.sum(2, 4), 4.0) def test_tree_set_overlap(): tree = SumSegmentTree(4) tree[2] = 1.0 tree[2] = 3.0 assert np.isclose(tree.sum(), 3.0) assert np.isclose(tree.sum(2, 3), 3.0) assert np.isclose(tree.sum(2, -1), 3.0) assert np.isclose(tree.sum(2, 4), 3.0) assert np.isclose(tree.sum(1, 2), 0.0) def test_prefixsum_idx(): tree = SumSegmentTree(4) tree[2] = 1.0 tree[3] = 3.0 assert tree.find_prefixsum_idx(0.0) == 2 assert tree.find_prefixsum_idx(0.5) == 2 assert tree.find_prefixsum_idx(0.99) == 2 assert tree.find_prefixsum_idx(1.01) == 3 assert tree.find_prefixsum_idx(3.00) == 3 assert tree.find_prefixsum_idx(4.00) == 3 def test_prefixsum_idx2(): tree = SumSegmentTree(4) tree[0] = 0.5 tree[1] = 1.0 tree[2] = 1.0 tree[3] = 3.0 assert tree.find_prefixsum_idx(0.00) == 0 assert tree.find_prefixsum_idx(0.55) == 1 assert tree.find_prefixsum_idx(0.99) == 1 assert tree.find_prefixsum_idx(1.51) == 2 assert tree.find_prefixsum_idx(3.00) == 3 assert tree.find_prefixsum_idx(5.50) == 3 def test_max_interval_tree(): tree = MinSegmentTree(4) tree[0] = 1.0 tree[2] = 0.5 tree[3] = 3.0 assert np.isclose(tree.min(), 0.5) assert np.isclose(tree.min(0, 2), 1.0) assert np.isclose(tree.min(0, 3), 0.5) assert np.isclose(tree.min(0, -1), 0.5) assert np.isclose(tree.min(2, 4), 0.5) assert np.isclose(tree.min(3, 4), 3.0) tree[2] = 0.7 assert np.isclose(tree.min(), 0.7) assert np.isclose(tree.min(0, 2), 1.0) assert np.isclose(tree.min(0, 3), 0.7) assert np.isclose(tree.min(0, -1), 0.7) assert np.isclose(tree.min(2, 4), 0.7) assert np.isclose(tree.min(3, 4), 3.0) tree[2] = 4.0 assert np.isclose(tree.min(), 1.0) assert np.isclose(tree.min(0, 2), 1.0) assert np.isclose(tree.min(0, 3), 1.0) assert np.isclose(tree.min(0, -1), 1.0) assert np.isclose(tree.min(2, 4), 3.0) assert np.isclose(tree.min(2, 3), 4.0) assert np.isclose(tree.min(2, -1), 4.0) assert np.isclose(tree.min(3, 4), 3.0) if __name__ == '__main__': test_tree_set() test_tree_set_overlap() test_prefixsum_idx() test_prefixsum_idx2() test_max_interval_tree()
2,691
24.884615
72
py
baselines
baselines-master/baselines/common/tests/test_mnist.py
import pytest # from baselines.acer import acer_simple as acer from baselines.common.tests.envs.mnist_env import MnistEnv from baselines.common.tests.util import simple_test from baselines.run import get_learn_function from baselines.common.tests import mark_slow # TODO investigate a2c and ppo2 failures - is it due to bad hyperparameters for this problem? # GitHub issue https://github.com/openai/baselines/issues/189 common_kwargs = { 'seed': 0, 'network':'cnn', 'gamma':0.9, 'pad':'SAME' } learn_args = { 'a2c': dict(total_timesteps=50000), 'acer': dict(total_timesteps=20000), 'deepq': dict(total_timesteps=5000), 'acktr': dict(total_timesteps=30000), 'ppo2': dict(total_timesteps=50000, lr=1e-3, nsteps=128, ent_coef=0.0), 'trpo_mpi': dict(total_timesteps=80000, timesteps_per_batch=100, cg_iters=10, lam=1.0, max_kl=0.001) } #tests pass, but are too slow on travis. Same algorithms are covered # by other tests with less compute-hungry nn's and by benchmarks @pytest.mark.skip @mark_slow @pytest.mark.parametrize("alg", learn_args.keys()) def test_mnist(alg): ''' Test if the algorithm can learn to classify MNIST digits. Uses CNN policy. ''' learn_kwargs = learn_args[alg] learn_kwargs.update(common_kwargs) learn = get_learn_function(alg) learn_fn = lambda e: learn(env=e, **learn_kwargs) env_fn = lambda: MnistEnv(episode_len=100) simple_test(env_fn, learn_fn, 0.6) if __name__ == '__main__': test_mnist('acer')
1,515
29.32
104
py
baselines
baselines-master/baselines/common/tests/util.py
import tensorflow as tf import numpy as np from baselines.common.vec_env.dummy_vec_env import DummyVecEnv N_TRIALS = 10000 N_EPISODES = 100 _sess_config = tf.ConfigProto( allow_soft_placement=True, intra_op_parallelism_threads=1, inter_op_parallelism_threads=1 ) def simple_test(env_fn, learn_fn, min_reward_fraction, n_trials=N_TRIALS): def seeded_env_fn(): env = env_fn() env.seed(0) return env np.random.seed(0) env = DummyVecEnv([seeded_env_fn]) with tf.Graph().as_default(), tf.Session(config=_sess_config).as_default(): tf.set_random_seed(0) model = learn_fn(env) sum_rew = 0 done = True for i in range(n_trials): if done: obs = env.reset() state = model.initial_state if state is not None: a, v, state, _ = model.step(obs, S=state, M=[False]) else: a, v, _, _ = model.step(obs) obs, rew, done, _ = env.step(a) sum_rew += float(rew) print("Reward in {} trials is {}".format(n_trials, sum_rew)) assert sum_rew > min_reward_fraction * n_trials, \ 'sum of rewards {} is less than {} of the total number of trials {}'.format(sum_rew, min_reward_fraction, n_trials) def reward_per_episode_test(env_fn, learn_fn, min_avg_reward, n_trials=N_EPISODES): env = DummyVecEnv([env_fn]) with tf.Graph().as_default(), tf.Session(config=_sess_config).as_default(): model = learn_fn(env) N_TRIALS = 100 observations, actions, rewards = rollout(env, model, N_TRIALS) rewards = [sum(r) for r in rewards] avg_rew = sum(rewards) / N_TRIALS print("Average reward in {} episodes is {}".format(n_trials, avg_rew)) assert avg_rew > min_avg_reward, \ 'average reward in {} episodes ({}) is less than {}'.format(n_trials, avg_rew, min_avg_reward) def rollout(env, model, n_trials): rewards = [] actions = [] observations = [] for i in range(n_trials): obs = env.reset() state = model.initial_state if hasattr(model, 'initial_state') else None episode_rew = [] episode_actions = [] episode_obs = [] while True: if state is not None: a, v, state, _ = model.step(obs, S=state, M=[False]) else: a,v, _, _ = model.step(obs) obs, rew, done, _ = env.step(a) episode_rew.append(rew) episode_actions.append(a) episode_obs.append(obs) if done: break rewards.append(episode_rew) actions.append(episode_actions) observations.append(episode_obs) return observations, actions, rewards def smoketest(argstr, **kwargs): import tempfile import subprocess import os argstr = 'python -m baselines.run ' + argstr for key, value in kwargs: argstr += ' --{}={}'.format(key, value) tempdir = tempfile.mkdtemp() env = os.environ.copy() env['OPENAI_LOGDIR'] = tempdir subprocess.run(argstr.split(' '), env=env) return tempdir
3,181
33.215054
127
py
baselines
baselines-master/baselines/common/tests/test_plot_util.py
# smoke tests of plot_util from baselines.common import plot_util as pu from baselines.common.tests.util import smoketest def test_plot_util(): nruns = 4 logdirs = [smoketest('--alg=ppo2 --env=CartPole-v0 --num_timesteps=10000') for _ in range(nruns)] data = pu.load_results(logdirs) assert len(data) == 4 _, axes = pu.plot_results(data[:1]); assert len(axes) == 1 _, axes = pu.plot_results(data, tiling='vertical'); assert axes.shape==(4,1) _, axes = pu.plot_results(data, tiling='horizontal'); assert axes.shape==(1,4) _, axes = pu.plot_results(data, tiling='symmetric'); assert axes.shape==(2,2) _, axes = pu.plot_results(data, split_fn=lambda _: ''); assert len(axes) == 1
717
38.888889
101
py
baselines
baselines-master/baselines/common/tests/__init__.py
import os, pytest mark_slow = pytest.mark.skipif(not os.getenv('RUNSLOW'), reason='slow')
89
44
71
py
baselines
baselines-master/baselines/common/tests/test_doc_examples.py
import pytest try: import mujoco_py _mujoco_present = True except BaseException: mujoco_py = None _mujoco_present = False @pytest.mark.skipif( not _mujoco_present, reason='error loading mujoco - either mujoco / mujoco key not present, or LD_LIBRARY_PATH is not pointing to mujoco library' ) def test_lstm_example(): import tensorflow as tf from baselines.common import policies, models, cmd_util from baselines.common.vec_env.dummy_vec_env import DummyVecEnv # create vectorized environment venv = DummyVecEnv([lambda: cmd_util.make_mujoco_env('Reacher-v2', seed=0)]) with tf.Session() as sess: # build policy based on lstm network with 128 units policy = policies.build_policy(venv, models.lstm(128))(nbatch=1, nsteps=1) # initialize tensorflow variables sess.run(tf.global_variables_initializer()) # prepare environment variables ob = venv.reset() state = policy.initial_state done = [False] step_counter = 0 # run a single episode until the end (i.e. until done) while True: action, _, state, _ = policy.step(ob, S=state, M=done) ob, reward, done, _ = venv.step(action) step_counter += 1 if done: break assert step_counter > 5
1,351
26.591837
128
py
baselines
baselines-master/baselines/common/tests/test_serialization.py
import os import gym import tempfile import pytest import tensorflow as tf import numpy as np from baselines.common.tests.envs.mnist_env import MnistEnv from baselines.common.vec_env.dummy_vec_env import DummyVecEnv from baselines.run import get_learn_function from baselines.common.tf_util import make_session, get_session from functools import partial learn_kwargs = { 'deepq': {}, 'a2c': {}, 'acktr': {}, 'acer': {}, 'ppo2': {'nminibatches': 1, 'nsteps': 10}, 'trpo_mpi': {}, } network_kwargs = { 'mlp': {}, 'cnn': {'pad': 'SAME'}, 'lstm': {}, 'cnn_lnlstm': {'pad': 'SAME'} } @pytest.mark.parametrize("learn_fn", learn_kwargs.keys()) @pytest.mark.parametrize("network_fn", network_kwargs.keys()) def test_serialization(learn_fn, network_fn): ''' Test if the trained model can be serialized ''' if network_fn.endswith('lstm') and learn_fn in ['acer', 'acktr', 'trpo_mpi', 'deepq']: # TODO make acktr work with recurrent policies # and test # github issue: https://github.com/openai/baselines/issues/660 return def make_env(): env = MnistEnv(episode_len=100) env.seed(10) return env env = DummyVecEnv([make_env]) ob = env.reset().copy() learn = get_learn_function(learn_fn) kwargs = {} kwargs.update(network_kwargs[network_fn]) kwargs.update(learn_kwargs[learn_fn]) learn = partial(learn, env=env, network=network_fn, seed=0, **kwargs) with tempfile.TemporaryDirectory() as td: model_path = os.path.join(td, 'serialization_test_model') with tf.Graph().as_default(), make_session().as_default(): model = learn(total_timesteps=100) model.save(model_path) mean1, std1 = _get_action_stats(model, ob) variables_dict1 = _serialize_variables() with tf.Graph().as_default(), make_session().as_default(): model = learn(total_timesteps=0, load_path=model_path) mean2, std2 = _get_action_stats(model, ob) variables_dict2 = _serialize_variables() for k, v in variables_dict1.items(): np.testing.assert_allclose(v, variables_dict2[k], atol=0.01, err_msg='saved and loaded variable {} value mismatch'.format(k)) np.testing.assert_allclose(mean1, mean2, atol=0.5) np.testing.assert_allclose(std1, std2, atol=0.5) @pytest.mark.parametrize("learn_fn", learn_kwargs.keys()) @pytest.mark.parametrize("network_fn", ['mlp']) def test_coexistence(learn_fn, network_fn): ''' Test if more than one model can exist at a time ''' if learn_fn == 'deepq': # TODO enable multiple DQN models to be useable at the same time # github issue https://github.com/openai/baselines/issues/656 return if network_fn.endswith('lstm') and learn_fn in ['acktr', 'trpo_mpi', 'deepq']: # TODO make acktr work with recurrent policies # and test # github issue: https://github.com/openai/baselines/issues/660 return env = DummyVecEnv([lambda: gym.make('CartPole-v0')]) learn = get_learn_function(learn_fn) kwargs = {} kwargs.update(network_kwargs[network_fn]) kwargs.update(learn_kwargs[learn_fn]) learn = partial(learn, env=env, network=network_fn, total_timesteps=0, **kwargs) make_session(make_default=True, graph=tf.Graph()) model1 = learn(seed=1) make_session(make_default=True, graph=tf.Graph()) model2 = learn(seed=2) model1.step(env.observation_space.sample()) model2.step(env.observation_space.sample()) def _serialize_variables(): sess = get_session() variables = tf.trainable_variables() values = sess.run(variables) return {var.name: value for var, value in zip(variables, values)} def _get_action_stats(model, ob): ntrials = 1000 if model.initial_state is None or model.initial_state == []: actions = np.array([model.step(ob)[0] for _ in range(ntrials)]) else: actions = np.array([model.step(ob, S=model.initial_state, M=[False])[0] for _ in range(ntrials)]) mean = np.mean(actions, axis=0) std = np.std(actions, axis=0) return mean, std
4,273
29.528571
105
py
baselines
baselines-master/baselines/common/tests/test_cartpole.py
import pytest import gym from baselines.run import get_learn_function from baselines.common.tests.util import reward_per_episode_test from baselines.common.tests import mark_slow common_kwargs = dict( total_timesteps=30000, network='mlp', gamma=1.0, seed=0, ) learn_kwargs = { 'a2c' : dict(nsteps=32, value_network='copy', lr=0.05), 'acer': dict(value_network='copy'), 'acktr': dict(nsteps=32, value_network='copy', is_async=False), 'deepq': dict(total_timesteps=20000), 'ppo2': dict(value_network='copy'), 'trpo_mpi': {} } @mark_slow @pytest.mark.parametrize("alg", learn_kwargs.keys()) def test_cartpole(alg): ''' Test if the algorithm (with an mlp policy) can learn to balance the cartpole ''' kwargs = common_kwargs.copy() kwargs.update(learn_kwargs[alg]) learn_fn = lambda e: get_learn_function(alg)(env=e, **kwargs) def env_fn(): env = gym.make('CartPole-v0') env.seed(0) return env reward_per_episode_test(env_fn, learn_fn, 100) if __name__ == '__main__': test_cartpole('acer')
1,098
22.891304
67
py
baselines
baselines-master/baselines/common/tests/test_fixed_sequence.py
import pytest from baselines.common.tests.envs.fixed_sequence_env import FixedSequenceEnv from baselines.common.tests.util import simple_test from baselines.run import get_learn_function from baselines.common.tests import mark_slow common_kwargs = dict( seed=0, total_timesteps=50000, ) learn_kwargs = { 'a2c': {}, 'ppo2': dict(nsteps=10, ent_coef=0.0, nminibatches=1), # TODO enable sequential models for trpo_mpi (proper handling of nbatch and nsteps) # github issue: https://github.com/openai/baselines/issues/188 # 'trpo_mpi': lambda e, p: trpo_mpi.learn(policy_fn=p(env=e), env=e, max_timesteps=30000, timesteps_per_batch=100, cg_iters=10, gamma=0.9, lam=1.0, max_kl=0.001) } alg_list = learn_kwargs.keys() rnn_list = ['lstm'] @mark_slow @pytest.mark.parametrize("alg", alg_list) @pytest.mark.parametrize("rnn", rnn_list) def test_fixed_sequence(alg, rnn): ''' Test if the algorithm (with a given policy) can learn an identity transformation (i.e. return observation as an action) ''' kwargs = learn_kwargs[alg] kwargs.update(common_kwargs) env_fn = lambda: FixedSequenceEnv(n_actions=10, episode_len=5) learn = lambda e: get_learn_function(alg)( env=e, network=rnn, **kwargs ) simple_test(env_fn, learn, 0.7) if __name__ == '__main__': test_fixed_sequence('ppo2', 'lstm')
1,389
25.226415
165
py
baselines
baselines-master/baselines/common/tests/envs/mnist_env.py
import os.path as osp import numpy as np import tempfile from gym import Env from gym.spaces import Discrete, Box class MnistEnv(Env): def __init__( self, episode_len=None, no_images=None ): import filelock from tensorflow.examples.tutorials.mnist import input_data # we could use temporary directory for this with a context manager and # TemporaryDirecotry, but then each test that uses mnist would re-download the data # this way the data is not cleaned up, but we only download it once per machine mnist_path = osp.join(tempfile.gettempdir(), 'MNIST_data') with filelock.FileLock(mnist_path + '.lock'): self.mnist = input_data.read_data_sets(mnist_path) self.np_random = np.random.RandomState() self.observation_space = Box(low=0.0, high=1.0, shape=(28,28,1)) self.action_space = Discrete(10) self.episode_len = episode_len self.time = 0 self.no_images = no_images self.train_mode() self.reset() def reset(self): self._choose_next_state() self.time = 0 return self.state[0] def step(self, actions): rew = self._get_reward(actions) self._choose_next_state() done = False if self.episode_len and self.time >= self.episode_len: rew = 0 done = True return self.state[0], rew, done, {} def seed(self, seed=None): self.np_random.seed(seed) def train_mode(self): self.dataset = self.mnist.train def test_mode(self): self.dataset = self.mnist.test def _choose_next_state(self): max_index = (self.no_images if self.no_images is not None else self.dataset.num_examples) - 1 index = self.np_random.randint(0, max_index) image = self.dataset.images[index].reshape(28,28,1)*255 label = self.dataset.labels[index] self.state = (image, label) self.time += 1 def _get_reward(self, actions): return 1 if self.state[1] == actions else 0
2,110
28.319444
101
py
baselines
baselines-master/baselines/common/tests/envs/fixed_sequence_env.py
import numpy as np from gym import Env from gym.spaces import Discrete class FixedSequenceEnv(Env): def __init__( self, n_actions=10, episode_len=100 ): self.action_space = Discrete(n_actions) self.observation_space = Discrete(1) self.np_random = np.random.RandomState(0) self.episode_len = episode_len self.sequence = [self.np_random.randint(0, self.action_space.n) for _ in range(self.episode_len)] self.time = 0 def reset(self): self.time = 0 return 0 def step(self, actions): rew = self._get_reward(actions) self._choose_next_state() done = False if self.episode_len and self.time >= self.episode_len: done = True return 0, rew, done, {} def seed(self, seed=None): self.np_random.seed(seed) def _choose_next_state(self): self.time += 1 def _get_reward(self, actions): return 1 if actions == self.sequence[self.time] else 0
1,054
22.977273
71
py
baselines
baselines-master/baselines/common/tests/envs/identity_env.py
import numpy as np from abc import abstractmethod from gym import Env from gym.spaces import MultiDiscrete, Discrete, Box from collections import deque class IdentityEnv(Env): def __init__( self, episode_len=None, delay=0, zero_first_rewards=True ): self.observation_space = self.action_space self.episode_len = episode_len self.time = 0 self.delay = delay self.zero_first_rewards = zero_first_rewards self.q = deque(maxlen=delay+1) def reset(self): self.q.clear() for _ in range(self.delay + 1): self.q.append(self.action_space.sample()) self.time = 0 return self.q[-1] def step(self, actions): rew = self._get_reward(self.q.popleft(), actions) if self.zero_first_rewards and self.time < self.delay: rew = 0 self.q.append(self.action_space.sample()) self.time += 1 done = self.episode_len is not None and self.time >= self.episode_len return self.q[-1], rew, done, {} def seed(self, seed=None): self.action_space.seed(seed) @abstractmethod def _get_reward(self, state, actions): raise NotImplementedError class DiscreteIdentityEnv(IdentityEnv): def __init__( self, dim, episode_len=None, delay=0, zero_first_rewards=True ): self.action_space = Discrete(dim) super().__init__(episode_len=episode_len, delay=delay, zero_first_rewards=zero_first_rewards) def _get_reward(self, state, actions): return 1 if state == actions else 0 class MultiDiscreteIdentityEnv(IdentityEnv): def __init__( self, dims, episode_len=None, delay=0, ): self.action_space = MultiDiscrete(dims) super().__init__(episode_len=episode_len, delay=delay) def _get_reward(self, state, actions): return 1 if all(state == actions) else 0 class BoxIdentityEnv(IdentityEnv): def __init__( self, shape, episode_len=None, ): self.action_space = Box(low=-1.0, high=1.0, shape=shape, dtype=np.float32) super().__init__(episode_len=episode_len) def _get_reward(self, state, actions): diff = actions - state diff = diff[:] return -0.5 * np.dot(diff, diff)
2,444
25.868132
101
py
baselines
baselines-master/baselines/common/tests/envs/__init__.py
0
0
0
py
baselines
baselines-master/baselines/common/tests/envs/identity_env_test.py
from baselines.common.tests.envs.identity_env import DiscreteIdentityEnv def test_discrete_nodelay(): nsteps = 100 eplen = 50 env = DiscreteIdentityEnv(10, episode_len=eplen) ob = env.reset() for t in range(nsteps): action = env.action_space.sample() next_ob, rew, done, info = env.step(action) assert rew == (1 if action == ob else 0) if (t + 1) % eplen == 0: assert done next_ob = env.reset() else: assert not done ob = next_ob def test_discrete_delay1(): eplen = 50 env = DiscreteIdentityEnv(10, episode_len=eplen, delay=1) ob = env.reset() prev_ob = None for t in range(eplen): action = env.action_space.sample() next_ob, rew, done, info = env.step(action) if t > 0: assert rew == (1 if action == prev_ob else 0) else: assert rew == 0 prev_ob = ob ob = next_ob if t < eplen - 1: assert not done assert done
1,034
26.972973
72
py
baselines
baselines-master/baselines/common/vec_env/vec_video_recorder.py
import os from baselines import logger from baselines.common.vec_env import VecEnvWrapper from gym.wrappers.monitoring import video_recorder class VecVideoRecorder(VecEnvWrapper): """ Wrap VecEnv to record rendered image as mp4 video. """ def __init__(self, venv, directory, record_video_trigger, video_length=200): """ # Arguments venv: VecEnv to wrap directory: Where to save videos record_video_trigger: Function that defines when to start recording. The function takes the current number of step, and returns whether we should start recording or not. video_length: Length of recorded video """ VecEnvWrapper.__init__(self, venv) self.record_video_trigger = record_video_trigger self.video_recorder = None self.directory = os.path.abspath(directory) if not os.path.exists(self.directory): os.mkdir(self.directory) self.file_prefix = "vecenv" self.file_infix = '{}'.format(os.getpid()) self.step_id = 0 self.video_length = video_length self.recording = False self.recorded_frames = 0 def reset(self): obs = self.venv.reset() self.start_video_recorder() return obs def start_video_recorder(self): self.close_video_recorder() base_path = os.path.join(self.directory, '{}.video.{}.video{:06}'.format(self.file_prefix, self.file_infix, self.step_id)) self.video_recorder = video_recorder.VideoRecorder( env=self.venv, base_path=base_path, metadata={'step_id': self.step_id} ) self.video_recorder.capture_frame() self.recorded_frames = 1 self.recording = True def _video_enabled(self): return self.record_video_trigger(self.step_id) def step_wait(self): obs, rews, dones, infos = self.venv.step_wait() self.step_id += 1 if self.recording: self.video_recorder.capture_frame() self.recorded_frames += 1 if self.recorded_frames > self.video_length: logger.info("Saving video to ", self.video_recorder.path) self.close_video_recorder() elif self._video_enabled(): self.start_video_recorder() return obs, rews, dones, infos def close_video_recorder(self): if self.recording: self.video_recorder.close() self.recording = False self.recorded_frames = 0 def close(self): VecEnvWrapper.close(self) self.close_video_recorder() def __del__(self): self.close()
2,746
29.522222
130
py
baselines
baselines-master/baselines/common/vec_env/vec_normalize.py
from . import VecEnvWrapper import numpy as np class VecNormalize(VecEnvWrapper): """ A vectorized wrapper that normalizes the observations and returns from an environment. """ def __init__(self, venv, ob=True, ret=True, clipob=10., cliprew=10., gamma=0.99, epsilon=1e-8, use_tf=False): VecEnvWrapper.__init__(self, venv) if use_tf: from baselines.common.running_mean_std import TfRunningMeanStd self.ob_rms = TfRunningMeanStd(shape=self.observation_space.shape, scope='ob_rms') if ob else None self.ret_rms = TfRunningMeanStd(shape=(), scope='ret_rms') if ret else None else: from baselines.common.running_mean_std import RunningMeanStd self.ob_rms = RunningMeanStd(shape=self.observation_space.shape) if ob else None self.ret_rms = RunningMeanStd(shape=()) if ret else None self.clipob = clipob self.cliprew = cliprew self.ret = np.zeros(self.num_envs) self.gamma = gamma self.epsilon = epsilon def step_wait(self): obs, rews, news, infos = self.venv.step_wait() self.ret = self.ret * self.gamma + rews obs = self._obfilt(obs) if self.ret_rms: self.ret_rms.update(self.ret) rews = np.clip(rews / np.sqrt(self.ret_rms.var + self.epsilon), -self.cliprew, self.cliprew) self.ret[news] = 0. return obs, rews, news, infos def _obfilt(self, obs): if self.ob_rms: self.ob_rms.update(obs) obs = np.clip((obs - self.ob_rms.mean) / np.sqrt(self.ob_rms.var + self.epsilon), -self.clipob, self.clipob) return obs else: return obs def reset(self): self.ret = np.zeros(self.num_envs) obs = self.venv.reset() return self._obfilt(obs)
1,854
37.645833
120
py
baselines
baselines-master/baselines/common/vec_env/test_vec_env.py
""" Tests for asynchronous vectorized environments. """ import gym import numpy as np import pytest from .dummy_vec_env import DummyVecEnv from .shmem_vec_env import ShmemVecEnv from .subproc_vec_env import SubprocVecEnv from baselines.common.tests.test_with_mpi import with_mpi def assert_venvs_equal(venv1, venv2, num_steps): """ Compare two environments over num_steps steps and make sure that the observations produced by each are the same when given the same actions. """ assert venv1.num_envs == venv2.num_envs assert venv1.observation_space.shape == venv2.observation_space.shape assert venv1.observation_space.dtype == venv2.observation_space.dtype assert venv1.action_space.shape == venv2.action_space.shape assert venv1.action_space.dtype == venv2.action_space.dtype try: obs1, obs2 = venv1.reset(), venv2.reset() assert np.array(obs1).shape == np.array(obs2).shape assert np.array(obs1).shape == (venv1.num_envs,) + venv1.observation_space.shape assert np.allclose(obs1, obs2) venv1.action_space.seed(1337) for _ in range(num_steps): actions = np.array([venv1.action_space.sample() for _ in range(venv1.num_envs)]) for venv in [venv1, venv2]: venv.step_async(actions) outs1 = venv1.step_wait() outs2 = venv2.step_wait() for out1, out2 in zip(outs1[:3], outs2[:3]): assert np.array(out1).shape == np.array(out2).shape assert np.allclose(out1, out2) assert list(outs1[3]) == list(outs2[3]) finally: venv1.close() venv2.close() @pytest.mark.parametrize('klass', (ShmemVecEnv, SubprocVecEnv)) @pytest.mark.parametrize('dtype', ('uint8', 'float32')) def test_vec_env(klass, dtype): # pylint: disable=R0914 """ Test that a vectorized environment is equivalent to DummyVecEnv, since DummyVecEnv is less likely to be error prone. """ num_envs = 3 num_steps = 100 shape = (3, 8) def make_fn(seed): """ Get an environment constructor with a seed. """ return lambda: SimpleEnv(seed, shape, dtype) fns = [make_fn(i) for i in range(num_envs)] env1 = DummyVecEnv(fns) env2 = klass(fns) assert_venvs_equal(env1, env2, num_steps=num_steps) @pytest.mark.parametrize('dtype', ('uint8', 'float32')) @pytest.mark.parametrize('num_envs_in_series', (3, 4, 6)) def test_sync_sampling(dtype, num_envs_in_series): """ Test that a SubprocVecEnv running with envs in series outputs the same as DummyVecEnv. """ num_envs = 12 num_steps = 100 shape = (3, 8) def make_fn(seed): """ Get an environment constructor with a seed. """ return lambda: SimpleEnv(seed, shape, dtype) fns = [make_fn(i) for i in range(num_envs)] env1 = DummyVecEnv(fns) env2 = SubprocVecEnv(fns, in_series=num_envs_in_series) assert_venvs_equal(env1, env2, num_steps=num_steps) @pytest.mark.parametrize('dtype', ('uint8', 'float32')) @pytest.mark.parametrize('num_envs_in_series', (3, 4, 6)) def test_sync_sampling_sanity(dtype, num_envs_in_series): """ Test that a SubprocVecEnv running with envs in series outputs the same as SubprocVecEnv without running in series. """ num_envs = 12 num_steps = 100 shape = (3, 8) def make_fn(seed): """ Get an environment constructor with a seed. """ return lambda: SimpleEnv(seed, shape, dtype) fns = [make_fn(i) for i in range(num_envs)] env1 = SubprocVecEnv(fns) env2 = SubprocVecEnv(fns, in_series=num_envs_in_series) assert_venvs_equal(env1, env2, num_steps=num_steps) class SimpleEnv(gym.Env): """ An environment with a pre-determined observation space and RNG seed. """ def __init__(self, seed, shape, dtype): np.random.seed(seed) self._dtype = dtype self._start_obs = np.array(np.random.randint(0, 0x100, size=shape), dtype=dtype) self._max_steps = seed + 1 self._cur_obs = None self._cur_step = 0 # this is 0xFF instead of 0x100 because the Box space includes # the high end, while randint does not self.action_space = gym.spaces.Box(low=0, high=0xFF, shape=shape, dtype=dtype) self.observation_space = self.action_space def step(self, action): self._cur_obs += np.array(action, dtype=self._dtype) self._cur_step += 1 done = self._cur_step >= self._max_steps reward = self._cur_step / self._max_steps return self._cur_obs, reward, done, {'foo': 'bar' + str(reward)} def reset(self): self._cur_obs = self._start_obs self._cur_step = 0 return self._cur_obs def render(self, mode=None): raise NotImplementedError @with_mpi() def test_mpi_with_subprocvecenv(): shape = (2,3,4) nenv = 1 venv = SubprocVecEnv([lambda: SimpleEnv(0, shape, 'float32')] * nenv) ob = venv.reset() venv.close() assert ob.shape == (nenv,) + shape
5,162
31.471698
92
py
baselines
baselines-master/baselines/common/vec_env/vec_env.py
import contextlib import os from abc import ABC, abstractmethod from baselines.common.tile_images import tile_images class AlreadySteppingError(Exception): """ Raised when an asynchronous step is running while step_async() is called again. """ def __init__(self): msg = 'already running an async step' Exception.__init__(self, msg) class NotSteppingError(Exception): """ Raised when an asynchronous step is not running but step_wait() is called. """ def __init__(self): msg = 'not running an async step' Exception.__init__(self, msg) class VecEnv(ABC): """ An abstract asynchronous, vectorized environment. Used to batch data from multiple copies of an environment, so that each observation becomes an batch of observations, and expected action is a batch of actions to be applied per-environment. """ closed = False viewer = None metadata = { 'render.modes': ['human', 'rgb_array'] } def __init__(self, num_envs, observation_space, action_space): self.num_envs = num_envs self.observation_space = observation_space self.action_space = action_space @abstractmethod def reset(self): """ Reset all the environments and return an array of observations, or a dict of observation arrays. If step_async is still doing work, that work will be cancelled and step_wait() should not be called until step_async() is invoked again. """ pass @abstractmethod def step_async(self, actions): """ Tell all the environments to start taking a step with the given actions. Call step_wait() to get the results of the step. You should not call this if a step_async run is already pending. """ pass @abstractmethod def step_wait(self): """ Wait for the step taken with step_async(). Returns (obs, rews, dones, infos): - obs: an array of observations, or a dict of arrays of observations. - rews: an array of rewards - dones: an array of "episode done" booleans - infos: a sequence of info objects """ pass def close_extras(self): """ Clean up the extra resources, beyond what's in this base class. Only runs when not self.closed. """ pass def close(self): if self.closed: return if self.viewer is not None: self.viewer.close() self.close_extras() self.closed = True def step(self, actions): """ Step the environments synchronously. This is available for backwards compatibility. """ self.step_async(actions) return self.step_wait() def render(self, mode='human'): imgs = self.get_images() bigimg = tile_images(imgs) if mode == 'human': self.get_viewer().imshow(bigimg) return self.get_viewer().isopen elif mode == 'rgb_array': return bigimg else: raise NotImplementedError def get_images(self): """ Return RGB images from each environment """ raise NotImplementedError @property def unwrapped(self): if isinstance(self, VecEnvWrapper): return self.venv.unwrapped else: return self def get_viewer(self): if self.viewer is None: from gym.envs.classic_control import rendering self.viewer = rendering.SimpleImageViewer() return self.viewer class VecEnvWrapper(VecEnv): """ An environment wrapper that applies to an entire batch of environments at once. """ def __init__(self, venv, observation_space=None, action_space=None): self.venv = venv super().__init__(num_envs=venv.num_envs, observation_space=observation_space or venv.observation_space, action_space=action_space or venv.action_space) def step_async(self, actions): self.venv.step_async(actions) @abstractmethod def reset(self): pass @abstractmethod def step_wait(self): pass def close(self): return self.venv.close() def render(self, mode='human'): return self.venv.render(mode=mode) def get_images(self): return self.venv.get_images() def __getattr__(self, name): if name.startswith('_'): raise AttributeError("attempted to get missing private attribute '{}'".format(name)) return getattr(self.venv, name) class VecEnvObservationWrapper(VecEnvWrapper): @abstractmethod def process(self, obs): pass def reset(self): obs = self.venv.reset() return self.process(obs) def step_wait(self): obs, rews, dones, infos = self.venv.step_wait() return self.process(obs), rews, dones, infos class CloudpickleWrapper(object): """ Uses cloudpickle to serialize contents (otherwise multiprocessing tries to use pickle) """ def __init__(self, x): self.x = x def __getstate__(self): import cloudpickle return cloudpickle.dumps(self.x) def __setstate__(self, ob): import pickle self.x = pickle.loads(ob) @contextlib.contextmanager def clear_mpi_env_vars(): """ from mpi4py import MPI will call MPI_Init by default. If the child process has MPI environment variables, MPI will think that the child process is an MPI process just like the parent and do bad things such as hang. This context manager is a hacky way to clear those environment variables temporarily such as when we are starting multiprocessing Processes. """ removed_environment = {} for k, v in list(os.environ.items()): for prefix in ['OMPI_', 'PMI_']: if k.startswith(prefix): removed_environment[k] = v del os.environ[k] try: yield finally: os.environ.update(removed_environment)
6,195
26.660714
219
py
baselines
baselines-master/baselines/common/vec_env/vec_monitor.py
from . import VecEnvWrapper from baselines.bench.monitor import ResultsWriter import numpy as np import time from collections import deque class VecMonitor(VecEnvWrapper): def __init__(self, venv, filename=None, keep_buf=0, info_keywords=()): VecEnvWrapper.__init__(self, venv) self.eprets = None self.eplens = None self.epcount = 0 self.tstart = time.time() if filename: self.results_writer = ResultsWriter(filename, header={'t_start': self.tstart}, extra_keys=info_keywords) else: self.results_writer = None self.info_keywords = info_keywords self.keep_buf = keep_buf if self.keep_buf: self.epret_buf = deque([], maxlen=keep_buf) self.eplen_buf = deque([], maxlen=keep_buf) def reset(self): obs = self.venv.reset() self.eprets = np.zeros(self.num_envs, 'f') self.eplens = np.zeros(self.num_envs, 'i') return obs def step_wait(self): obs, rews, dones, infos = self.venv.step_wait() self.eprets += rews self.eplens += 1 newinfos = list(infos[:]) for i in range(len(dones)): if dones[i]: info = infos[i].copy() ret = self.eprets[i] eplen = self.eplens[i] epinfo = {'r': ret, 'l': eplen, 't': round(time.time() - self.tstart, 6)} for k in self.info_keywords: epinfo[k] = info[k] info['episode'] = epinfo if self.keep_buf: self.epret_buf.append(ret) self.eplen_buf.append(eplen) self.epcount += 1 self.eprets[i] = 0 self.eplens[i] = 0 if self.results_writer: self.results_writer.write_row(epinfo) newinfos[i] = info return obs, rews, dones, newinfos
1,971
34.214286
90
py
baselines
baselines-master/baselines/common/vec_env/dummy_vec_env.py
import numpy as np from .vec_env import VecEnv from .util import copy_obs_dict, dict_to_obs, obs_space_info class DummyVecEnv(VecEnv): """ VecEnv that does runs multiple environments sequentially, that is, the step and reset commands are send to one environment at a time. Useful when debugging and when num_env == 1 (in the latter case, avoids communication overhead) """ def __init__(self, env_fns): """ Arguments: env_fns: iterable of callables functions that build environments """ self.envs = [fn() for fn in env_fns] env = self.envs[0] VecEnv.__init__(self, len(env_fns), env.observation_space, env.action_space) obs_space = env.observation_space self.keys, shapes, dtypes = obs_space_info(obs_space) self.buf_obs = { k: np.zeros((self.num_envs,) + tuple(shapes[k]), dtype=dtypes[k]) for k in self.keys } self.buf_dones = np.zeros((self.num_envs,), dtype=np.bool) self.buf_rews = np.zeros((self.num_envs,), dtype=np.float32) self.buf_infos = [{} for _ in range(self.num_envs)] self.actions = None self.spec = self.envs[0].spec def step_async(self, actions): listify = True try: if len(actions) == self.num_envs: listify = False except TypeError: pass if not listify: self.actions = actions else: assert self.num_envs == 1, "actions {} is either not a list or has a wrong size - cannot match to {} environments".format(actions, self.num_envs) self.actions = [actions] def step_wait(self): for e in range(self.num_envs): action = self.actions[e] # if isinstance(self.envs[e].action_space, spaces.Discrete): # action = int(action) obs, self.buf_rews[e], self.buf_dones[e], self.buf_infos[e] = self.envs[e].step(action) if self.buf_dones[e]: obs = self.envs[e].reset() self._save_obs(e, obs) return (self._obs_from_buf(), np.copy(self.buf_rews), np.copy(self.buf_dones), self.buf_infos.copy()) def reset(self): for e in range(self.num_envs): obs = self.envs[e].reset() self._save_obs(e, obs) return self._obs_from_buf() def _save_obs(self, e, obs): for k in self.keys: if k is None: self.buf_obs[k][e] = obs else: self.buf_obs[k][e] = obs[k] def _obs_from_buf(self): return dict_to_obs(copy_obs_dict(self.buf_obs)) def get_images(self): return [env.render(mode='rgb_array') for env in self.envs] def render(self, mode='human'): if self.num_envs == 1: return self.envs[0].render(mode=mode) else: return super().render(mode=mode)
2,923
34.658537
157
py
baselines
baselines-master/baselines/common/vec_env/util.py
""" Helpers for dealing with vectorized environments. """ from collections import OrderedDict import gym import numpy as np def copy_obs_dict(obs): """ Deep-copy an observation dict. """ return {k: np.copy(v) for k, v in obs.items()} def dict_to_obs(obs_dict): """ Convert an observation dict into a raw array if the original observation space was not a Dict space. """ if set(obs_dict.keys()) == {None}: return obs_dict[None] return obs_dict def obs_space_info(obs_space): """ Get dict-structured information about a gym.Space. Returns: A tuple (keys, shapes, dtypes): keys: a list of dict keys. shapes: a dict mapping keys to shapes. dtypes: a dict mapping keys to dtypes. """ if isinstance(obs_space, gym.spaces.Dict): assert isinstance(obs_space.spaces, OrderedDict) subspaces = obs_space.spaces elif isinstance(obs_space, gym.spaces.Tuple): assert isinstance(obs_space.spaces, tuple) subspaces = {i: obs_space.spaces[i] for i in range(len(obs_space.spaces))} else: subspaces = {None: obs_space} keys = [] shapes = {} dtypes = {} for key, box in subspaces.items(): keys.append(key) shapes[key] = box.shape dtypes[key] = box.dtype return keys, shapes, dtypes def obs_to_dict(obs): """ Convert an observation into a dict. """ if isinstance(obs, dict): return obs return {None: obs}
1,513
23.031746
82
py
baselines
baselines-master/baselines/common/vec_env/__init__.py
from .vec_env import AlreadySteppingError, NotSteppingError, VecEnv, VecEnvWrapper, VecEnvObservationWrapper, CloudpickleWrapper from .dummy_vec_env import DummyVecEnv from .shmem_vec_env import ShmemVecEnv from .subproc_vec_env import SubprocVecEnv from .vec_frame_stack import VecFrameStack from .vec_monitor import VecMonitor from .vec_normalize import VecNormalize from .vec_remove_dict_obs import VecExtractDictObs __all__ = ['AlreadySteppingError', 'NotSteppingError', 'VecEnv', 'VecEnvWrapper', 'VecEnvObservationWrapper', 'CloudpickleWrapper', 'DummyVecEnv', 'ShmemVecEnv', 'SubprocVecEnv', 'VecFrameStack', 'VecMonitor', 'VecNormalize', 'VecExtractDictObs']
668
59.818182
246
py
baselines
baselines-master/baselines/common/vec_env/subproc_vec_env.py
import multiprocessing as mp import numpy as np from .vec_env import VecEnv, CloudpickleWrapper, clear_mpi_env_vars def worker(remote, parent_remote, env_fn_wrappers): def step_env(env, action): ob, reward, done, info = env.step(action) if done: ob = env.reset() return ob, reward, done, info parent_remote.close() envs = [env_fn_wrapper() for env_fn_wrapper in env_fn_wrappers.x] try: while True: cmd, data = remote.recv() if cmd == 'step': remote.send([step_env(env, action) for env, action in zip(envs, data)]) elif cmd == 'reset': remote.send([env.reset() for env in envs]) elif cmd == 'render': remote.send([env.render(mode='rgb_array') for env in envs]) elif cmd == 'close': remote.close() break elif cmd == 'get_spaces_spec': remote.send(CloudpickleWrapper((envs[0].observation_space, envs[0].action_space, envs[0].spec))) else: raise NotImplementedError except KeyboardInterrupt: print('SubprocVecEnv worker: got KeyboardInterrupt') finally: for env in envs: env.close() class SubprocVecEnv(VecEnv): """ VecEnv that runs multiple environments in parallel in subproceses and communicates with them via pipes. Recommended to use when num_envs > 1 and step() can be a bottleneck. """ def __init__(self, env_fns, spaces=None, context='spawn', in_series=1): """ Arguments: env_fns: iterable of callables - functions that create environments to run in subprocesses. Need to be cloud-pickleable in_series: number of environments to run in series in a single process (e.g. when len(env_fns) == 12 and in_series == 3, it will run 4 processes, each running 3 envs in series) """ self.waiting = False self.closed = False self.in_series = in_series nenvs = len(env_fns) assert nenvs % in_series == 0, "Number of envs must be divisible by number of envs to run in series" self.nremotes = nenvs // in_series env_fns = np.array_split(env_fns, self.nremotes) ctx = mp.get_context(context) self.remotes, self.work_remotes = zip(*[ctx.Pipe() for _ in range(self.nremotes)]) self.ps = [ctx.Process(target=worker, args=(work_remote, remote, CloudpickleWrapper(env_fn))) for (work_remote, remote, env_fn) in zip(self.work_remotes, self.remotes, env_fns)] for p in self.ps: p.daemon = True # if the main process crashes, we should not cause things to hang with clear_mpi_env_vars(): p.start() for remote in self.work_remotes: remote.close() self.remotes[0].send(('get_spaces_spec', None)) observation_space, action_space, self.spec = self.remotes[0].recv().x self.viewer = None VecEnv.__init__(self, nenvs, observation_space, action_space) def step_async(self, actions): self._assert_not_closed() actions = np.array_split(actions, self.nremotes) for remote, action in zip(self.remotes, actions): remote.send(('step', action)) self.waiting = True def step_wait(self): self._assert_not_closed() results = [remote.recv() for remote in self.remotes] results = _flatten_list(results) self.waiting = False obs, rews, dones, infos = zip(*results) return _flatten_obs(obs), np.stack(rews), np.stack(dones), infos def reset(self): self._assert_not_closed() for remote in self.remotes: remote.send(('reset', None)) obs = [remote.recv() for remote in self.remotes] obs = _flatten_list(obs) return _flatten_obs(obs) def close_extras(self): self.closed = True if self.waiting: for remote in self.remotes: remote.recv() for remote in self.remotes: remote.send(('close', None)) for p in self.ps: p.join() def get_images(self): self._assert_not_closed() for pipe in self.remotes: pipe.send(('render', None)) imgs = [pipe.recv() for pipe in self.remotes] imgs = _flatten_list(imgs) return imgs def _assert_not_closed(self): assert not self.closed, "Trying to operate on a SubprocVecEnv after calling close()" def __del__(self): if not self.closed: self.close() def _flatten_obs(obs): assert isinstance(obs, (list, tuple)) assert len(obs) > 0 if isinstance(obs[0], dict): keys = obs[0].keys() return {k: np.stack([o[k] for o in obs]) for k in keys} else: return np.stack(obs) def _flatten_list(l): assert isinstance(l, (list, tuple)) assert len(l) > 0 assert all([len(l_) > 0 for l_ in l]) return [l__ for l_ in l for l__ in l_]
5,069
35.47482
128
py
baselines
baselines-master/baselines/common/vec_env/test_video_recorder.py
""" Tests for asynchronous vectorized environments. """ import gym import pytest import os import glob import tempfile from .dummy_vec_env import DummyVecEnv from .shmem_vec_env import ShmemVecEnv from .subproc_vec_env import SubprocVecEnv from .vec_video_recorder import VecVideoRecorder @pytest.mark.parametrize('klass', (DummyVecEnv, ShmemVecEnv, SubprocVecEnv)) @pytest.mark.parametrize('num_envs', (1, 4)) @pytest.mark.parametrize('video_length', (10, 100)) @pytest.mark.parametrize('video_interval', (1, 50)) def test_video_recorder(klass, num_envs, video_length, video_interval): """ Wrap an existing VecEnv with VevVideoRecorder, Make (video_interval + video_length + 1) steps, then check that the file is present """ def make_fn(): env = gym.make('PongNoFrameskip-v4') return env fns = [make_fn for _ in range(num_envs)] env = klass(fns) with tempfile.TemporaryDirectory() as video_path: env = VecVideoRecorder(env, video_path, record_video_trigger=lambda x: x % video_interval == 0, video_length=video_length) env.reset() for _ in range(video_interval + video_length + 1): env.step([0] * num_envs) env.close() recorded_video = glob.glob(os.path.join(video_path, "*.mp4")) # first and second step assert len(recorded_video) == 2 # Files are not empty assert all(os.stat(p).st_size != 0 for p in recorded_video)
1,467
28.36
130
py
baselines
baselines-master/baselines/common/vec_env/shmem_vec_env.py
""" An interface for asynchronous vectorized environments. """ import multiprocessing as mp import numpy as np from .vec_env import VecEnv, CloudpickleWrapper, clear_mpi_env_vars import ctypes from baselines import logger from .util import dict_to_obs, obs_space_info, obs_to_dict _NP_TO_CT = {np.float32: ctypes.c_float, np.int32: ctypes.c_int32, np.int8: ctypes.c_int8, np.uint8: ctypes.c_char, np.bool: ctypes.c_bool} class ShmemVecEnv(VecEnv): """ Optimized version of SubprocVecEnv that uses shared variables to communicate observations. """ def __init__(self, env_fns, spaces=None, context='spawn'): """ If you don't specify observation_space, we'll have to create a dummy environment to get it. """ ctx = mp.get_context(context) if spaces: observation_space, action_space = spaces else: logger.log('Creating dummy env object to get spaces') with logger.scoped_configure(format_strs=[]): dummy = env_fns[0]() observation_space, action_space = dummy.observation_space, dummy.action_space dummy.close() del dummy VecEnv.__init__(self, len(env_fns), observation_space, action_space) self.obs_keys, self.obs_shapes, self.obs_dtypes = obs_space_info(observation_space) self.obs_bufs = [ {k: ctx.Array(_NP_TO_CT[self.obs_dtypes[k].type], int(np.prod(self.obs_shapes[k]))) for k in self.obs_keys} for _ in env_fns] self.parent_pipes = [] self.procs = [] with clear_mpi_env_vars(): for env_fn, obs_buf in zip(env_fns, self.obs_bufs): wrapped_fn = CloudpickleWrapper(env_fn) parent_pipe, child_pipe = ctx.Pipe() proc = ctx.Process(target=_subproc_worker, args=(child_pipe, parent_pipe, wrapped_fn, obs_buf, self.obs_shapes, self.obs_dtypes, self.obs_keys)) proc.daemon = True self.procs.append(proc) self.parent_pipes.append(parent_pipe) proc.start() child_pipe.close() self.waiting_step = False self.viewer = None def reset(self): if self.waiting_step: logger.warn('Called reset() while waiting for the step to complete') self.step_wait() for pipe in self.parent_pipes: pipe.send(('reset', None)) return self._decode_obses([pipe.recv() for pipe in self.parent_pipes]) def step_async(self, actions): assert len(actions) == len(self.parent_pipes) for pipe, act in zip(self.parent_pipes, actions): pipe.send(('step', act)) self.waiting_step = True def step_wait(self): outs = [pipe.recv() for pipe in self.parent_pipes] self.waiting_step = False obs, rews, dones, infos = zip(*outs) return self._decode_obses(obs), np.array(rews), np.array(dones), infos def close_extras(self): if self.waiting_step: self.step_wait() for pipe in self.parent_pipes: pipe.send(('close', None)) for pipe in self.parent_pipes: pipe.recv() pipe.close() for proc in self.procs: proc.join() def get_images(self, mode='human'): for pipe in self.parent_pipes: pipe.send(('render', None)) return [pipe.recv() for pipe in self.parent_pipes] def _decode_obses(self, obs): result = {} for k in self.obs_keys: bufs = [b[k] for b in self.obs_bufs] o = [np.frombuffer(b.get_obj(), dtype=self.obs_dtypes[k]).reshape(self.obs_shapes[k]) for b in bufs] result[k] = np.array(o) return dict_to_obs(result) def _subproc_worker(pipe, parent_pipe, env_fn_wrapper, obs_bufs, obs_shapes, obs_dtypes, keys): """ Control a single environment instance using IPC and shared memory. """ def _write_obs(maybe_dict_obs): flatdict = obs_to_dict(maybe_dict_obs) for k in keys: dst = obs_bufs[k].get_obj() dst_np = np.frombuffer(dst, dtype=obs_dtypes[k]).reshape(obs_shapes[k]) # pylint: disable=W0212 np.copyto(dst_np, flatdict[k]) env = env_fn_wrapper.x() parent_pipe.close() try: while True: cmd, data = pipe.recv() if cmd == 'reset': pipe.send(_write_obs(env.reset())) elif cmd == 'step': obs, reward, done, info = env.step(data) if done: obs = env.reset() pipe.send((_write_obs(obs), reward, done, info)) elif cmd == 'render': pipe.send(env.render(mode='rgb_array')) elif cmd == 'close': pipe.send(None) break else: raise RuntimeError('Got unrecognized cmd %s' % cmd) except KeyboardInterrupt: print('ShmemVecEnv worker: got KeyboardInterrupt') finally: env.close()
5,178
35.471831
129
py
baselines
baselines-master/baselines/common/vec_env/vec_frame_stack.py
from .vec_env import VecEnvWrapper import numpy as np from gym import spaces class VecFrameStack(VecEnvWrapper): def __init__(self, venv, nstack): self.venv = venv self.nstack = nstack wos = venv.observation_space # wrapped ob space low = np.repeat(wos.low, self.nstack, axis=-1) high = np.repeat(wos.high, self.nstack, axis=-1) self.stackedobs = np.zeros((venv.num_envs,) + low.shape, low.dtype) observation_space = spaces.Box(low=low, high=high, dtype=venv.observation_space.dtype) VecEnvWrapper.__init__(self, venv, observation_space=observation_space) def step_wait(self): obs, rews, news, infos = self.venv.step_wait() self.stackedobs = np.roll(self.stackedobs, shift=-1, axis=-1) for (i, new) in enumerate(news): if new: self.stackedobs[i] = 0 self.stackedobs[..., -obs.shape[-1]:] = obs return self.stackedobs, rews, news, infos def reset(self): obs = self.venv.reset() self.stackedobs[...] = 0 self.stackedobs[..., -obs.shape[-1]:] = obs return self.stackedobs
1,150
36.129032
94
py
baselines
baselines-master/baselines/common/vec_env/vec_remove_dict_obs.py
from .vec_env import VecEnvObservationWrapper class VecExtractDictObs(VecEnvObservationWrapper): def __init__(self, venv, key): self.key = key super().__init__(venv=venv, observation_space=venv.observation_space.spaces[self.key]) def process(self, obs): return obs[self.key]
321
28.272727
70
py
baselines
baselines-master/baselines/ppo2/ppo2.py
import os import time import numpy as np import os.path as osp from baselines import logger from collections import deque from baselines.common import explained_variance, set_global_seeds from baselines.common.policies import build_policy try: from mpi4py import MPI except ImportError: MPI = None from baselines.ppo2.runner import Runner def constfn(val): def f(_): return val return f def learn(*, network, env, total_timesteps, eval_env = None, seed=None, nsteps=2048, ent_coef=0.0, lr=3e-4, vf_coef=0.5, max_grad_norm=0.5, gamma=0.99, lam=0.95, log_interval=10, nminibatches=4, noptepochs=4, cliprange=0.2, save_interval=0, load_path=None, model_fn=None, update_fn=None, init_fn=None, mpi_rank_weight=1, comm=None, **network_kwargs): ''' Learn policy using PPO algorithm (https://arxiv.org/abs/1707.06347) Parameters: ---------- network: policy network architecture. Either string (mlp, lstm, lnlstm, cnn_lstm, cnn, cnn_small, conv_only - see baselines.common/models.py for full list) specifying the standard network architecture, or a function that takes tensorflow tensor as input and returns tuple (output_tensor, extra_feed) where output tensor is the last network layer output, extra_feed is None for feed-forward neural nets, and extra_feed is a dictionary describing how to feed state into the network for recurrent neural nets. See common/models.py/lstm for more details on using recurrent nets in policies env: baselines.common.vec_env.VecEnv environment. Needs to be vectorized for parallel environment simulation. The environments produced by gym.make can be wrapped using baselines.common.vec_env.DummyVecEnv class. nsteps: int number of steps of the vectorized environment per update (i.e. batch size is nsteps * nenv where nenv is number of environment copies simulated in parallel) total_timesteps: int number of timesteps (i.e. number of actions taken in the environment) ent_coef: float policy entropy coefficient in the optimization objective lr: float or function learning rate, constant or a schedule function [0,1] -> R+ where 1 is beginning of the training and 0 is the end of the training. vf_coef: float value function loss coefficient in the optimization objective max_grad_norm: float or None gradient norm clipping coefficient gamma: float discounting factor lam: float advantage estimation discounting factor (lambda in the paper) log_interval: int number of timesteps between logging events nminibatches: int number of training minibatches per update. For recurrent policies, should be smaller or equal than number of environments run in parallel. noptepochs: int number of training epochs per update cliprange: float or function clipping range, constant or schedule function [0,1] -> R+ where 1 is beginning of the training and 0 is the end of the training save_interval: int number of timesteps between saving events load_path: str path to load the model from **network_kwargs: keyword arguments to the policy / network builder. See baselines.common/policies.py/build_policy and arguments to a particular type of network For instance, 'mlp' network architecture has arguments num_hidden and num_layers. ''' set_global_seeds(seed) if isinstance(lr, float): lr = constfn(lr) else: assert callable(lr) if isinstance(cliprange, float): cliprange = constfn(cliprange) else: assert callable(cliprange) total_timesteps = int(total_timesteps) policy = build_policy(env, network, **network_kwargs) # Get the nb of env nenvs = env.num_envs # Get state_space and action_space ob_space = env.observation_space ac_space = env.action_space # Calculate the batch_size nbatch = nenvs * nsteps nbatch_train = nbatch // nminibatches is_mpi_root = (MPI is None or MPI.COMM_WORLD.Get_rank() == 0) # Instantiate the model object (that creates act_model and train_model) if model_fn is None: from baselines.ppo2.model import Model model_fn = Model model = model_fn(policy=policy, ob_space=ob_space, ac_space=ac_space, nbatch_act=nenvs, nbatch_train=nbatch_train, nsteps=nsteps, ent_coef=ent_coef, vf_coef=vf_coef, max_grad_norm=max_grad_norm, comm=comm, mpi_rank_weight=mpi_rank_weight) if load_path is not None: model.load(load_path) # Instantiate the runner object runner = Runner(env=env, model=model, nsteps=nsteps, gamma=gamma, lam=lam) if eval_env is not None: eval_runner = Runner(env = eval_env, model = model, nsteps = nsteps, gamma = gamma, lam= lam) epinfobuf = deque(maxlen=100) if eval_env is not None: eval_epinfobuf = deque(maxlen=100) if init_fn is not None: init_fn() # Start total timer tfirststart = time.perf_counter() nupdates = total_timesteps//nbatch for update in range(1, nupdates+1): assert nbatch % nminibatches == 0 # Start timer tstart = time.perf_counter() frac = 1.0 - (update - 1.0) / nupdates # Calculate the learning rate lrnow = lr(frac) # Calculate the cliprange cliprangenow = cliprange(frac) if update % log_interval == 0 and is_mpi_root: logger.info('Stepping environment...') # Get minibatch obs, returns, masks, actions, values, neglogpacs, states, epinfos = runner.run() #pylint: disable=E0632 if eval_env is not None: eval_obs, eval_returns, eval_masks, eval_actions, eval_values, eval_neglogpacs, eval_states, eval_epinfos = eval_runner.run() #pylint: disable=E0632 if update % log_interval == 0 and is_mpi_root: logger.info('Done.') epinfobuf.extend(epinfos) if eval_env is not None: eval_epinfobuf.extend(eval_epinfos) # Here what we're going to do is for each minibatch calculate the loss and append it. mblossvals = [] if states is None: # nonrecurrent version # Index of each element of batch_size # Create the indices array inds = np.arange(nbatch) for _ in range(noptepochs): # Randomize the indexes np.random.shuffle(inds) # 0 to batch_size with batch_train_size step for start in range(0, nbatch, nbatch_train): end = start + nbatch_train mbinds = inds[start:end] slices = (arr[mbinds] for arr in (obs, returns, masks, actions, values, neglogpacs)) mblossvals.append(model.train(lrnow, cliprangenow, *slices)) else: # recurrent version assert nenvs % nminibatches == 0 envsperbatch = nenvs // nminibatches envinds = np.arange(nenvs) flatinds = np.arange(nenvs * nsteps).reshape(nenvs, nsteps) for _ in range(noptepochs): np.random.shuffle(envinds) for start in range(0, nenvs, envsperbatch): end = start + envsperbatch mbenvinds = envinds[start:end] mbflatinds = flatinds[mbenvinds].ravel() slices = (arr[mbflatinds] for arr in (obs, returns, masks, actions, values, neglogpacs)) mbstates = states[mbenvinds] mblossvals.append(model.train(lrnow, cliprangenow, *slices, mbstates)) # Feedforward --> get losses --> update lossvals = np.mean(mblossvals, axis=0) # End timer tnow = time.perf_counter() # Calculate the fps (frame per second) fps = int(nbatch / (tnow - tstart)) if update_fn is not None: update_fn(update) if update % log_interval == 0 or update == 1: # Calculates if value function is a good predicator of the returns (ev > 1) # or if it's just worse than predicting nothing (ev =< 0) ev = explained_variance(values, returns) logger.logkv("misc/serial_timesteps", update*nsteps) logger.logkv("misc/nupdates", update) logger.logkv("misc/total_timesteps", update*nbatch) logger.logkv("fps", fps) logger.logkv("misc/explained_variance", float(ev)) logger.logkv('eprewmean', safemean([epinfo['r'] for epinfo in epinfobuf])) logger.logkv('eplenmean', safemean([epinfo['l'] for epinfo in epinfobuf])) if eval_env is not None: logger.logkv('eval_eprewmean', safemean([epinfo['r'] for epinfo in eval_epinfobuf]) ) logger.logkv('eval_eplenmean', safemean([epinfo['l'] for epinfo in eval_epinfobuf]) ) logger.logkv('misc/time_elapsed', tnow - tfirststart) for (lossval, lossname) in zip(lossvals, model.loss_names): logger.logkv('loss/' + lossname, lossval) logger.dumpkvs() if save_interval and (update % save_interval == 0 or update == 1) and logger.get_dir() and is_mpi_root: checkdir = osp.join(logger.get_dir(), 'checkpoints') os.makedirs(checkdir, exist_ok=True) savepath = osp.join(checkdir, '%.5i'%update) print('Saving to', savepath) model.save(savepath) return model # Avoid division error when calculate the mean (in our case if epinfo is empty returns np.nan, not return an error) def safemean(xs): return np.nan if len(xs) == 0 else np.mean(xs)
10,229
44.466667
184
py
baselines
baselines-master/baselines/ppo2/microbatched_model.py
import tensorflow as tf import numpy as np from baselines.ppo2.model import Model class MicrobatchedModel(Model): """ Model that does training one microbatch at a time - when gradient computation on the entire minibatch causes some overflow """ def __init__(self, *, policy, ob_space, ac_space, nbatch_act, nbatch_train, nsteps, ent_coef, vf_coef, max_grad_norm, mpi_rank_weight, comm, microbatch_size): self.nmicrobatches = nbatch_train // microbatch_size self.microbatch_size = microbatch_size assert nbatch_train % microbatch_size == 0, 'microbatch_size ({}) should divide nbatch_train ({}) evenly'.format(microbatch_size, nbatch_train) super().__init__( policy=policy, ob_space=ob_space, ac_space=ac_space, nbatch_act=nbatch_act, nbatch_train=microbatch_size, nsteps=nsteps, ent_coef=ent_coef, vf_coef=vf_coef, max_grad_norm=max_grad_norm, mpi_rank_weight=mpi_rank_weight, comm=comm) self.grads_ph = [tf.placeholder(dtype=g.dtype, shape=g.shape) for g in self.grads] grads_ph_and_vars = list(zip(self.grads_ph, self.var)) self._apply_gradients_op = self.trainer.apply_gradients(grads_ph_and_vars) def train(self, lr, cliprange, obs, returns, masks, actions, values, neglogpacs, states=None): assert states is None, "microbatches with recurrent models are not supported yet" # Here we calculate advantage A(s,a) = R + yV(s') - V(s) # Returns = R + yV(s') advs = returns - values # Normalize the advantages advs = (advs - advs.mean()) / (advs.std() + 1e-8) # Initialize empty list for per-microbatch stats like pg_loss, vf_loss, entropy, approxkl (whatever is in self.stats_list) stats_vs = [] for microbatch_idx in range(self.nmicrobatches): _sli = range(microbatch_idx * self.microbatch_size, (microbatch_idx+1) * self.microbatch_size) td_map = { self.train_model.X: obs[_sli], self.A:actions[_sli], self.ADV:advs[_sli], self.R:returns[_sli], self.CLIPRANGE:cliprange, self.OLDNEGLOGPAC:neglogpacs[_sli], self.OLDVPRED:values[_sli] } # Compute gradient on a microbatch (note that variables do not change here) ... grad_v, stats_v = self.sess.run([self.grads, self.stats_list], td_map) if microbatch_idx == 0: sum_grad_v = grad_v else: # .. and add to the total of the gradients for i, g in enumerate(grad_v): sum_grad_v[i] += g stats_vs.append(stats_v) feed_dict = {ph: sum_g / self.nmicrobatches for ph, sum_g in zip(self.grads_ph, sum_grad_v)} feed_dict[self.LR] = lr # Update variables using average of the gradients self.sess.run(self._apply_gradients_op, feed_dict) # Return average of the stats return np.mean(np.array(stats_vs), axis=0).tolist()
3,241
40.037975
151
py
baselines
baselines-master/baselines/ppo2/test_microbatches.py
import gym import tensorflow as tf import numpy as np from functools import partial from baselines.common.vec_env.dummy_vec_env import DummyVecEnv from baselines.common.tf_util import make_session from baselines.ppo2.ppo2 import learn from baselines.ppo2.microbatched_model import MicrobatchedModel def test_microbatches(): def env_fn(): env = gym.make('CartPole-v0') env.seed(0) return env learn_fn = partial(learn, network='mlp', nsteps=32, total_timesteps=32, seed=0) env_ref = DummyVecEnv([env_fn]) sess_ref = make_session(make_default=True, graph=tf.Graph()) learn_fn(env=env_ref) vars_ref = {v.name: sess_ref.run(v) for v in tf.trainable_variables()} env_test = DummyVecEnv([env_fn]) sess_test = make_session(make_default=True, graph=tf.Graph()) learn_fn(env=env_test, model_fn=partial(MicrobatchedModel, microbatch_size=2)) # learn_fn(env=env_test) vars_test = {v.name: sess_test.run(v) for v in tf.trainable_variables()} for v in vars_ref: np.testing.assert_allclose(vars_ref[v], vars_test[v], atol=3e-3) if __name__ == '__main__': test_microbatches()
1,152
31.027778
83
py
baselines
baselines-master/baselines/ppo2/model.py
import tensorflow as tf import functools from baselines.common.tf_util import get_session, save_variables, load_variables from baselines.common.tf_util import initialize try: from baselines.common.mpi_adam_optimizer import MpiAdamOptimizer from mpi4py import MPI from baselines.common.mpi_util import sync_from_root except ImportError: MPI = None class Model(object): """ We use this object to : __init__: - Creates the step_model - Creates the train_model train(): - Make the training part (feedforward and retropropagation of gradients) save/load(): - Save load the model """ def __init__(self, *, policy, ob_space, ac_space, nbatch_act, nbatch_train, nsteps, ent_coef, vf_coef, max_grad_norm, mpi_rank_weight=1, comm=None, microbatch_size=None): self.sess = sess = get_session() if MPI is not None and comm is None: comm = MPI.COMM_WORLD with tf.variable_scope('ppo2_model', reuse=tf.AUTO_REUSE): # CREATE OUR TWO MODELS # act_model that is used for sampling act_model = policy(nbatch_act, 1, sess) # Train model for training if microbatch_size is None: train_model = policy(nbatch_train, nsteps, sess) else: train_model = policy(microbatch_size, nsteps, sess) # CREATE THE PLACEHOLDERS self.A = A = train_model.pdtype.sample_placeholder([None]) self.ADV = ADV = tf.placeholder(tf.float32, [None]) self.R = R = tf.placeholder(tf.float32, [None]) # Keep track of old actor self.OLDNEGLOGPAC = OLDNEGLOGPAC = tf.placeholder(tf.float32, [None]) # Keep track of old critic self.OLDVPRED = OLDVPRED = tf.placeholder(tf.float32, [None]) self.LR = LR = tf.placeholder(tf.float32, []) # Cliprange self.CLIPRANGE = CLIPRANGE = tf.placeholder(tf.float32, []) neglogpac = train_model.pd.neglogp(A) # Calculate the entropy # Entropy is used to improve exploration by limiting the premature convergence to suboptimal policy. entropy = tf.reduce_mean(train_model.pd.entropy()) # CALCULATE THE LOSS # Total loss = Policy gradient loss - entropy * entropy coefficient + Value coefficient * value loss # Clip the value to reduce variability during Critic training # Get the predicted value vpred = train_model.vf vpredclipped = OLDVPRED + tf.clip_by_value(train_model.vf - OLDVPRED, - CLIPRANGE, CLIPRANGE) # Unclipped value vf_losses1 = tf.square(vpred - R) # Clipped value vf_losses2 = tf.square(vpredclipped - R) vf_loss = .5 * tf.reduce_mean(tf.maximum(vf_losses1, vf_losses2)) # Calculate ratio (pi current policy / pi old policy) ratio = tf.exp(OLDNEGLOGPAC - neglogpac) # Defining Loss = - J is equivalent to max J pg_losses = -ADV * ratio pg_losses2 = -ADV * tf.clip_by_value(ratio, 1.0 - CLIPRANGE, 1.0 + CLIPRANGE) # Final PG loss pg_loss = tf.reduce_mean(tf.maximum(pg_losses, pg_losses2)) approxkl = .5 * tf.reduce_mean(tf.square(neglogpac - OLDNEGLOGPAC)) clipfrac = tf.reduce_mean(tf.to_float(tf.greater(tf.abs(ratio - 1.0), CLIPRANGE))) # Total loss loss = pg_loss - entropy * ent_coef + vf_loss * vf_coef # UPDATE THE PARAMETERS USING LOSS # 1. Get the model parameters params = tf.trainable_variables('ppo2_model') # 2. Build our trainer if comm is not None and comm.Get_size() > 1: self.trainer = MpiAdamOptimizer(comm, learning_rate=LR, mpi_rank_weight=mpi_rank_weight, epsilon=1e-5) else: self.trainer = tf.train.AdamOptimizer(learning_rate=LR, epsilon=1e-5) # 3. Calculate the gradients grads_and_var = self.trainer.compute_gradients(loss, params) grads, var = zip(*grads_and_var) if max_grad_norm is not None: # Clip the gradients (normalize) grads, _grad_norm = tf.clip_by_global_norm(grads, max_grad_norm) grads_and_var = list(zip(grads, var)) # zip aggregate each gradient with parameters associated # For instance zip(ABCD, xyza) => Ax, By, Cz, Da self.grads = grads self.var = var self._train_op = self.trainer.apply_gradients(grads_and_var) self.loss_names = ['policy_loss', 'value_loss', 'policy_entropy', 'approxkl', 'clipfrac'] self.stats_list = [pg_loss, vf_loss, entropy, approxkl, clipfrac] self.train_model = train_model self.act_model = act_model self.step = act_model.step self.value = act_model.value self.initial_state = act_model.initial_state self.save = functools.partial(save_variables, sess=sess) self.load = functools.partial(load_variables, sess=sess) initialize() global_variables = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, scope="") if MPI is not None: sync_from_root(sess, global_variables, comm=comm) #pylint: disable=E1101 def train(self, lr, cliprange, obs, returns, masks, actions, values, neglogpacs, states=None): # Here we calculate advantage A(s,a) = R + yV(s') - V(s) # Returns = R + yV(s') advs = returns - values # Normalize the advantages advs = (advs - advs.mean()) / (advs.std() + 1e-8) td_map = { self.train_model.X : obs, self.A : actions, self.ADV : advs, self.R : returns, self.LR : lr, self.CLIPRANGE : cliprange, self.OLDNEGLOGPAC : neglogpacs, self.OLDVPRED : values } if states is not None: td_map[self.train_model.S] = states td_map[self.train_model.M] = masks return self.sess.run( self.stats_list + [self._train_op], td_map )[:-1]
6,054
36.84375
114
py
baselines
baselines-master/baselines/ppo2/defaults.py
def mujoco(): return dict( nsteps=2048, nminibatches=32, lam=0.95, gamma=0.99, noptepochs=10, log_interval=1, ent_coef=0.0, lr=lambda f: 3e-4 * f, cliprange=0.2, value_network='copy' ) def atari(): return dict( nsteps=128, nminibatches=4, lam=0.95, gamma=0.99, noptepochs=4, log_interval=1, ent_coef=.01, lr=lambda f : f * 2.5e-4, cliprange=0.1, ) def retro(): return atari()
518
18.961538
59
py
baselines
baselines-master/baselines/ppo2/runner.py
import numpy as np from baselines.common.runners import AbstractEnvRunner class Runner(AbstractEnvRunner): """ We use this object to make a mini batch of experiences __init__: - Initialize the runner run(): - Make a mini batch """ def __init__(self, *, env, model, nsteps, gamma, lam): super().__init__(env=env, model=model, nsteps=nsteps) # Lambda used in GAE (General Advantage Estimation) self.lam = lam # Discount rate self.gamma = gamma def run(self): # Here, we init the lists that will contain the mb of experiences mb_obs, mb_rewards, mb_actions, mb_values, mb_dones, mb_neglogpacs = [],[],[],[],[],[] mb_states = self.states epinfos = [] # For n in range number of steps for _ in range(self.nsteps): # Given observations, get action value and neglopacs # We already have self.obs because Runner superclass run self.obs[:] = env.reset() on init actions, values, self.states, neglogpacs = self.model.step(self.obs, S=self.states, M=self.dones) mb_obs.append(self.obs.copy()) mb_actions.append(actions) mb_values.append(values) mb_neglogpacs.append(neglogpacs) mb_dones.append(self.dones) # Take actions in env and look the results # Infos contains a ton of useful informations self.obs[:], rewards, self.dones, infos = self.env.step(actions) for info in infos: maybeepinfo = info.get('episode') if maybeepinfo: epinfos.append(maybeepinfo) mb_rewards.append(rewards) #batch of steps to batch of rollouts mb_obs = np.asarray(mb_obs, dtype=self.obs.dtype) mb_rewards = np.asarray(mb_rewards, dtype=np.float32) mb_actions = np.asarray(mb_actions) mb_values = np.asarray(mb_values, dtype=np.float32) mb_neglogpacs = np.asarray(mb_neglogpacs, dtype=np.float32) mb_dones = np.asarray(mb_dones, dtype=np.bool) last_values = self.model.value(self.obs, S=self.states, M=self.dones) # discount/bootstrap off value fn mb_returns = np.zeros_like(mb_rewards) mb_advs = np.zeros_like(mb_rewards) lastgaelam = 0 for t in reversed(range(self.nsteps)): if t == self.nsteps - 1: nextnonterminal = 1.0 - self.dones nextvalues = last_values else: nextnonterminal = 1.0 - mb_dones[t+1] nextvalues = mb_values[t+1] delta = mb_rewards[t] + self.gamma * nextvalues * nextnonterminal - mb_values[t] mb_advs[t] = lastgaelam = delta + self.gamma * self.lam * nextnonterminal * lastgaelam mb_returns = mb_advs + mb_values return (*map(sf01, (mb_obs, mb_returns, mb_dones, mb_actions, mb_values, mb_neglogpacs)), mb_states, epinfos) # obs, returns, masks, actions, values, neglogpacs, states = runner.run() def sf01(arr): """ swap and then flatten axes 0 and 1 """ s = arr.shape return arr.swapaxes(0, 1).reshape(s[0] * s[1], *s[2:])
3,194
40.493506
109
py
baselines
baselines-master/baselines/ppo2/__init__.py
0
0
0
py
baselines
baselines-master/baselines/a2c/a2c.py
import time import functools import tensorflow as tf from baselines import logger from baselines.common import set_global_seeds, explained_variance from baselines.common import tf_util from baselines.common.policies import build_policy from baselines.a2c.utils import Scheduler, find_trainable_variables from baselines.a2c.runner import Runner from baselines.ppo2.ppo2 import safemean from collections import deque from tensorflow import losses class Model(object): """ We use this class to : __init__: - Creates the step_model - Creates the train_model train(): - Make the training part (feedforward and retropropagation of gradients) save/load(): - Save load the model """ def __init__(self, policy, env, nsteps, ent_coef=0.01, vf_coef=0.5, max_grad_norm=0.5, lr=7e-4, alpha=0.99, epsilon=1e-5, total_timesteps=int(80e6), lrschedule='linear'): sess = tf_util.get_session() nenvs = env.num_envs nbatch = nenvs*nsteps with tf.variable_scope('a2c_model', reuse=tf.AUTO_REUSE): # step_model is used for sampling step_model = policy(nenvs, 1, sess) # train_model is used to train our network train_model = policy(nbatch, nsteps, sess) A = tf.placeholder(train_model.action.dtype, train_model.action.shape) ADV = tf.placeholder(tf.float32, [nbatch]) R = tf.placeholder(tf.float32, [nbatch]) LR = tf.placeholder(tf.float32, []) # Calculate the loss # Total loss = Policy gradient loss - entropy * entropy coefficient + Value coefficient * value loss # Policy loss neglogpac = train_model.pd.neglogp(A) # L = A(s,a) * -logpi(a|s) pg_loss = tf.reduce_mean(ADV * neglogpac) # Entropy is used to improve exploration by limiting the premature convergence to suboptimal policy. entropy = tf.reduce_mean(train_model.pd.entropy()) # Value loss vf_loss = losses.mean_squared_error(tf.squeeze(train_model.vf), R) loss = pg_loss - entropy*ent_coef + vf_loss * vf_coef # Update parameters using loss # 1. Get the model parameters params = find_trainable_variables("a2c_model") # 2. Calculate the gradients grads = tf.gradients(loss, params) if max_grad_norm is not None: # Clip the gradients (normalize) grads, grad_norm = tf.clip_by_global_norm(grads, max_grad_norm) grads = list(zip(grads, params)) # zip aggregate each gradient with parameters associated # For instance zip(ABCD, xyza) => Ax, By, Cz, Da # 3. Make op for one policy and value update step of A2C trainer = tf.train.RMSPropOptimizer(learning_rate=LR, decay=alpha, epsilon=epsilon) _train = trainer.apply_gradients(grads) lr = Scheduler(v=lr, nvalues=total_timesteps, schedule=lrschedule) def train(obs, states, rewards, masks, actions, values): # Here we calculate advantage A(s,a) = R + yV(s') - V(s) # rewards = R + yV(s') advs = rewards - values for step in range(len(obs)): cur_lr = lr.value() td_map = {train_model.X:obs, A:actions, ADV:advs, R:rewards, LR:cur_lr} if states is not None: td_map[train_model.S] = states td_map[train_model.M] = masks policy_loss, value_loss, policy_entropy, _ = sess.run( [pg_loss, vf_loss, entropy, _train], td_map ) return policy_loss, value_loss, policy_entropy self.train = train self.train_model = train_model self.step_model = step_model self.step = step_model.step self.value = step_model.value self.initial_state = step_model.initial_state self.save = functools.partial(tf_util.save_variables, sess=sess) self.load = functools.partial(tf_util.load_variables, sess=sess) tf.global_variables_initializer().run(session=sess) def learn( network, env, seed=None, nsteps=5, total_timesteps=int(80e6), vf_coef=0.5, ent_coef=0.01, max_grad_norm=0.5, lr=7e-4, lrschedule='linear', epsilon=1e-5, alpha=0.99, gamma=0.99, log_interval=100, load_path=None, **network_kwargs): ''' Main entrypoint for A2C algorithm. Train a policy with given network architecture on a given environment using a2c algorithm. Parameters: ----------- network: policy network architecture. Either string (mlp, lstm, lnlstm, cnn_lstm, cnn, cnn_small, conv_only - see baselines.common/models.py for full list) specifying the standard network architecture, or a function that takes tensorflow tensor as input and returns tuple (output_tensor, extra_feed) where output tensor is the last network layer output, extra_feed is None for feed-forward neural nets, and extra_feed is a dictionary describing how to feed state into the network for recurrent neural nets. See baselines.common/policies.py/lstm for more details on using recurrent nets in policies env: RL environment. Should implement interface similar to VecEnv (baselines.common/vec_env) or be wrapped with DummyVecEnv (baselines.common/vec_env/dummy_vec_env.py) seed: seed to make random number sequence in the alorightm reproducible. By default is None which means seed from system noise generator (not reproducible) nsteps: int, number of steps of the vectorized environment per update (i.e. batch size is nsteps * nenv where nenv is number of environment copies simulated in parallel) total_timesteps: int, total number of timesteps to train on (default: 80M) vf_coef: float, coefficient in front of value function loss in the total loss function (default: 0.5) ent_coef: float, coeffictiant in front of the policy entropy in the total loss function (default: 0.01) max_gradient_norm: float, gradient is clipped to have global L2 norm no more than this value (default: 0.5) lr: float, learning rate for RMSProp (current implementation has RMSProp hardcoded in) (default: 7e-4) lrschedule: schedule of learning rate. Can be 'linear', 'constant', or a function [0..1] -> [0..1] that takes fraction of the training progress as input and returns fraction of the learning rate (specified as lr) as output epsilon: float, RMSProp epsilon (stabilizes square root computation in denominator of RMSProp update) (default: 1e-5) alpha: float, RMSProp decay parameter (default: 0.99) gamma: float, reward discounting parameter (default: 0.99) log_interval: int, specifies how frequently the logs are printed out (default: 100) **network_kwargs: keyword arguments to the policy / network builder. See baselines.common/policies.py/build_policy and arguments to a particular type of network For instance, 'mlp' network architecture has arguments num_hidden and num_layers. ''' set_global_seeds(seed) # Get the nb of env nenvs = env.num_envs policy = build_policy(env, network, **network_kwargs) # Instantiate the model object (that creates step_model and train_model) model = Model(policy=policy, env=env, nsteps=nsteps, ent_coef=ent_coef, vf_coef=vf_coef, max_grad_norm=max_grad_norm, lr=lr, alpha=alpha, epsilon=epsilon, total_timesteps=total_timesteps, lrschedule=lrschedule) if load_path is not None: model.load(load_path) # Instantiate the runner object runner = Runner(env, model, nsteps=nsteps, gamma=gamma) epinfobuf = deque(maxlen=100) # Calculate the batch_size nbatch = nenvs*nsteps # Start total timer tstart = time.time() for update in range(1, total_timesteps//nbatch+1): # Get mini batch of experiences obs, states, rewards, masks, actions, values, epinfos = runner.run() epinfobuf.extend(epinfos) policy_loss, value_loss, policy_entropy = model.train(obs, states, rewards, masks, actions, values) nseconds = time.time()-tstart # Calculate the fps (frame per second) fps = int((update*nbatch)/nseconds) if update % log_interval == 0 or update == 1: # Calculates if value function is a good predicator of the returns (ev > 1) # or if it's just worse than predicting nothing (ev =< 0) ev = explained_variance(values, rewards) logger.record_tabular("nupdates", update) logger.record_tabular("total_timesteps", update*nbatch) logger.record_tabular("fps", fps) logger.record_tabular("policy_entropy", float(policy_entropy)) logger.record_tabular("value_loss", float(value_loss)) logger.record_tabular("explained_variance", float(ev)) logger.record_tabular("eprewmean", safemean([epinfo['r'] for epinfo in epinfobuf])) logger.record_tabular("eplenmean", safemean([epinfo['l'] for epinfo in epinfobuf])) logger.dump_tabular() return model
9,451
39.566524
186
py
baselines
baselines-master/baselines/a2c/utils.py
import os import numpy as np import tensorflow as tf from collections import deque def sample(logits): noise = tf.random_uniform(tf.shape(logits)) return tf.argmax(logits - tf.log(-tf.log(noise)), 1) def cat_entropy(logits): a0 = logits - tf.reduce_max(logits, 1, keepdims=True) ea0 = tf.exp(a0) z0 = tf.reduce_sum(ea0, 1, keepdims=True) p0 = ea0 / z0 return tf.reduce_sum(p0 * (tf.log(z0) - a0), 1) def cat_entropy_softmax(p0): return - tf.reduce_sum(p0 * tf.log(p0 + 1e-6), axis = 1) def ortho_init(scale=1.0): def _ortho_init(shape, dtype, partition_info=None): #lasagne ortho init for tf shape = tuple(shape) if len(shape) == 2: flat_shape = shape elif len(shape) == 4: # assumes NHWC flat_shape = (np.prod(shape[:-1]), shape[-1]) else: raise NotImplementedError a = np.random.normal(0.0, 1.0, flat_shape) u, _, v = np.linalg.svd(a, full_matrices=False) q = u if u.shape == flat_shape else v # pick the one with the correct shape q = q.reshape(shape) return (scale * q[:shape[0], :shape[1]]).astype(np.float32) return _ortho_init def conv(x, scope, *, nf, rf, stride, pad='VALID', init_scale=1.0, data_format='NHWC', one_dim_bias=False): if data_format == 'NHWC': channel_ax = 3 strides = [1, stride, stride, 1] bshape = [1, 1, 1, nf] elif data_format == 'NCHW': channel_ax = 1 strides = [1, 1, stride, stride] bshape = [1, nf, 1, 1] else: raise NotImplementedError bias_var_shape = [nf] if one_dim_bias else [1, nf, 1, 1] nin = x.get_shape()[channel_ax].value wshape = [rf, rf, nin, nf] with tf.variable_scope(scope): w = tf.get_variable("w", wshape, initializer=ortho_init(init_scale)) b = tf.get_variable("b", bias_var_shape, initializer=tf.constant_initializer(0.0)) if not one_dim_bias and data_format == 'NHWC': b = tf.reshape(b, bshape) return tf.nn.conv2d(x, w, strides=strides, padding=pad, data_format=data_format) + b def fc(x, scope, nh, *, init_scale=1.0, init_bias=0.0): with tf.variable_scope(scope): nin = x.get_shape()[1].value w = tf.get_variable("w", [nin, nh], initializer=ortho_init(init_scale)) b = tf.get_variable("b", [nh], initializer=tf.constant_initializer(init_bias)) return tf.matmul(x, w)+b def batch_to_seq(h, nbatch, nsteps, flat=False): if flat: h = tf.reshape(h, [nbatch, nsteps]) else: h = tf.reshape(h, [nbatch, nsteps, -1]) return [tf.squeeze(v, [1]) for v in tf.split(axis=1, num_or_size_splits=nsteps, value=h)] def seq_to_batch(h, flat = False): shape = h[0].get_shape().as_list() if not flat: assert(len(shape) > 1) nh = h[0].get_shape()[-1].value return tf.reshape(tf.concat(axis=1, values=h), [-1, nh]) else: return tf.reshape(tf.stack(values=h, axis=1), [-1]) def lstm(xs, ms, s, scope, nh, init_scale=1.0): nbatch, nin = [v.value for v in xs[0].get_shape()] with tf.variable_scope(scope): wx = tf.get_variable("wx", [nin, nh*4], initializer=ortho_init(init_scale)) wh = tf.get_variable("wh", [nh, nh*4], initializer=ortho_init(init_scale)) b = tf.get_variable("b", [nh*4], initializer=tf.constant_initializer(0.0)) c, h = tf.split(axis=1, num_or_size_splits=2, value=s) for idx, (x, m) in enumerate(zip(xs, ms)): c = c*(1-m) h = h*(1-m) z = tf.matmul(x, wx) + tf.matmul(h, wh) + b i, f, o, u = tf.split(axis=1, num_or_size_splits=4, value=z) i = tf.nn.sigmoid(i) f = tf.nn.sigmoid(f) o = tf.nn.sigmoid(o) u = tf.tanh(u) c = f*c + i*u h = o*tf.tanh(c) xs[idx] = h s = tf.concat(axis=1, values=[c, h]) return xs, s def _ln(x, g, b, e=1e-5, axes=[1]): u, s = tf.nn.moments(x, axes=axes, keep_dims=True) x = (x-u)/tf.sqrt(s+e) x = x*g+b return x def lnlstm(xs, ms, s, scope, nh, init_scale=1.0): nbatch, nin = [v.value for v in xs[0].get_shape()] with tf.variable_scope(scope): wx = tf.get_variable("wx", [nin, nh*4], initializer=ortho_init(init_scale)) gx = tf.get_variable("gx", [nh*4], initializer=tf.constant_initializer(1.0)) bx = tf.get_variable("bx", [nh*4], initializer=tf.constant_initializer(0.0)) wh = tf.get_variable("wh", [nh, nh*4], initializer=ortho_init(init_scale)) gh = tf.get_variable("gh", [nh*4], initializer=tf.constant_initializer(1.0)) bh = tf.get_variable("bh", [nh*4], initializer=tf.constant_initializer(0.0)) b = tf.get_variable("b", [nh*4], initializer=tf.constant_initializer(0.0)) gc = tf.get_variable("gc", [nh], initializer=tf.constant_initializer(1.0)) bc = tf.get_variable("bc", [nh], initializer=tf.constant_initializer(0.0)) c, h = tf.split(axis=1, num_or_size_splits=2, value=s) for idx, (x, m) in enumerate(zip(xs, ms)): c = c*(1-m) h = h*(1-m) z = _ln(tf.matmul(x, wx), gx, bx) + _ln(tf.matmul(h, wh), gh, bh) + b i, f, o, u = tf.split(axis=1, num_or_size_splits=4, value=z) i = tf.nn.sigmoid(i) f = tf.nn.sigmoid(f) o = tf.nn.sigmoid(o) u = tf.tanh(u) c = f*c + i*u h = o*tf.tanh(_ln(c, gc, bc)) xs[idx] = h s = tf.concat(axis=1, values=[c, h]) return xs, s def conv_to_fc(x): nh = np.prod([v.value for v in x.get_shape()[1:]]) x = tf.reshape(x, [-1, nh]) return x def discount_with_dones(rewards, dones, gamma): discounted = [] r = 0 for reward, done in zip(rewards[::-1], dones[::-1]): r = reward + gamma*r*(1.-done) # fixed off by one bug discounted.append(r) return discounted[::-1] def find_trainable_variables(key): return tf.trainable_variables(key) def make_path(f): return os.makedirs(f, exist_ok=True) def constant(p): return 1 def linear(p): return 1-p def middle_drop(p): eps = 0.75 if 1-p<eps: return eps*0.1 return 1-p def double_linear_con(p): p *= 2 eps = 0.125 if 1-p<eps: return eps return 1-p def double_middle_drop(p): eps1 = 0.75 eps2 = 0.25 if 1-p<eps1: if 1-p<eps2: return eps2*0.5 return eps1*0.1 return 1-p schedules = { 'linear':linear, 'constant':constant, 'double_linear_con': double_linear_con, 'middle_drop': middle_drop, 'double_middle_drop': double_middle_drop } class Scheduler(object): def __init__(self, v, nvalues, schedule): self.n = 0. self.v = v self.nvalues = nvalues self.schedule = schedules[schedule] def value(self): current_value = self.v*self.schedule(self.n/self.nvalues) self.n += 1. return current_value def value_steps(self, steps): return self.v*self.schedule(steps/self.nvalues) class EpisodeStats: def __init__(self, nsteps, nenvs): self.episode_rewards = [] for i in range(nenvs): self.episode_rewards.append([]) self.lenbuffer = deque(maxlen=40) # rolling buffer for episode lengths self.rewbuffer = deque(maxlen=40) # rolling buffer for episode rewards self.nsteps = nsteps self.nenvs = nenvs def feed(self, rewards, masks): rewards = np.reshape(rewards, [self.nenvs, self.nsteps]) masks = np.reshape(masks, [self.nenvs, self.nsteps]) for i in range(0, self.nenvs): for j in range(0, self.nsteps): self.episode_rewards[i].append(rewards[i][j]) if masks[i][j]: l = len(self.episode_rewards[i]) s = sum(self.episode_rewards[i]) self.lenbuffer.append(l) self.rewbuffer.append(s) self.episode_rewards[i] = [] def mean_length(self): if self.lenbuffer: return np.mean(self.lenbuffer) else: return 0 # on the first params dump, no episodes are finished def mean_reward(self): if self.rewbuffer: return np.mean(self.rewbuffer) else: return 0 # For ACER def get_by_index(x, idx): assert(len(x.get_shape()) == 2) assert(len(idx.get_shape()) == 1) idx_flattened = tf.range(0, x.shape[0]) * x.shape[1] + idx y = tf.gather(tf.reshape(x, [-1]), # flatten input idx_flattened) # use flattened indices return y def check_shape(ts,shapes): i = 0 for (t,shape) in zip(ts,shapes): assert t.get_shape().as_list()==shape, "id " + str(i) + " shape " + str(t.get_shape()) + str(shape) i += 1 def avg_norm(t): return tf.reduce_mean(tf.sqrt(tf.reduce_sum(tf.square(t), axis=-1))) def gradient_add(g1, g2, param): print([g1, g2, param.name]) assert (not (g1 is None and g2 is None)), param.name if g1 is None: return g2 elif g2 is None: return g1 else: return g1 + g2 def q_explained_variance(qpred, q): _, vary = tf.nn.moments(q, axes=[0, 1]) _, varpred = tf.nn.moments(q - qpred, axes=[0, 1]) check_shape([vary, varpred], [[]] * 2) return 1.0 - (varpred / vary)
9,348
32.035336
107
py
baselines
baselines-master/baselines/a2c/runner.py
import numpy as np from baselines.a2c.utils import discount_with_dones from baselines.common.runners import AbstractEnvRunner class Runner(AbstractEnvRunner): """ We use this class to generate batches of experiences __init__: - Initialize the runner run(): - Make a mini batch of experiences """ def __init__(self, env, model, nsteps=5, gamma=0.99): super().__init__(env=env, model=model, nsteps=nsteps) self.gamma = gamma self.batch_action_shape = [x if x is not None else -1 for x in model.train_model.action.shape.as_list()] self.ob_dtype = model.train_model.X.dtype.as_numpy_dtype def run(self): # We initialize the lists that will contain the mb of experiences mb_obs, mb_rewards, mb_actions, mb_values, mb_dones = [],[],[],[],[] mb_states = self.states epinfos = [] for n in range(self.nsteps): # Given observations, take action and value (V(s)) # We already have self.obs because Runner superclass run self.obs[:] = env.reset() on init actions, values, states, _ = self.model.step(self.obs, S=self.states, M=self.dones) # Append the experiences mb_obs.append(np.copy(self.obs)) mb_actions.append(actions) mb_values.append(values) mb_dones.append(self.dones) # Take actions in env and look the results obs, rewards, dones, infos = self.env.step(actions) for info in infos: maybeepinfo = info.get('episode') if maybeepinfo: epinfos.append(maybeepinfo) self.states = states self.dones = dones self.obs = obs mb_rewards.append(rewards) mb_dones.append(self.dones) # Batch of steps to batch of rollouts mb_obs = np.asarray(mb_obs, dtype=self.ob_dtype).swapaxes(1, 0).reshape(self.batch_ob_shape) mb_rewards = np.asarray(mb_rewards, dtype=np.float32).swapaxes(1, 0) mb_actions = np.asarray(mb_actions, dtype=self.model.train_model.action.dtype.name).swapaxes(1, 0) mb_values = np.asarray(mb_values, dtype=np.float32).swapaxes(1, 0) mb_dones = np.asarray(mb_dones, dtype=np.bool).swapaxes(1, 0) mb_masks = mb_dones[:, :-1] mb_dones = mb_dones[:, 1:] if self.gamma > 0.0: # Discount/bootstrap off value fn last_values = self.model.value(self.obs, S=self.states, M=self.dones).tolist() for n, (rewards, dones, value) in enumerate(zip(mb_rewards, mb_dones, last_values)): rewards = rewards.tolist() dones = dones.tolist() if dones[-1] == 0: rewards = discount_with_dones(rewards+[value], dones+[0], self.gamma)[:-1] else: rewards = discount_with_dones(rewards, dones, self.gamma) mb_rewards[n] = rewards mb_actions = mb_actions.reshape(self.batch_action_shape) mb_rewards = mb_rewards.flatten() mb_values = mb_values.flatten() mb_masks = mb_masks.flatten() return mb_obs, mb_states, mb_rewards, mb_masks, mb_actions, mb_values, epinfos
3,241
41.103896
112
py
baselines
baselines-master/baselines/a2c/__init__.py
0
0
0
py
baselines
baselines-master/baselines/gail/behavior_clone.py
''' The code is used to train BC imitator, or pretrained GAIL imitator ''' import argparse import tempfile import os.path as osp import gym import logging from tqdm import tqdm import tensorflow as tf from baselines.gail import mlp_policy from baselines import bench from baselines import logger from baselines.common import set_global_seeds, tf_util as U from baselines.common.misc_util import boolean_flag from baselines.common.mpi_adam import MpiAdam from baselines.gail.run_mujoco import runner from baselines.gail.dataset.mujoco_dset import Mujoco_Dset def argsparser(): parser = argparse.ArgumentParser("Tensorflow Implementation of Behavior Cloning") parser.add_argument('--env_id', help='environment ID', default='Hopper-v2') parser.add_argument('--seed', help='RNG seed', type=int, default=0) parser.add_argument('--expert_path', type=str, default='data/deterministic.trpo.Hopper.0.00.npz') parser.add_argument('--checkpoint_dir', help='the directory to save model', default='checkpoint') parser.add_argument('--log_dir', help='the directory to save log file', default='log') # Mujoco Dataset Configuration parser.add_argument('--traj_limitation', type=int, default=-1) # Network Configuration (Using MLP Policy) parser.add_argument('--policy_hidden_size', type=int, default=100) # for evaluatation boolean_flag(parser, 'stochastic_policy', default=False, help='use stochastic/deterministic policy to evaluate') boolean_flag(parser, 'save_sample', default=False, help='save the trajectories or not') parser.add_argument('--BC_max_iter', help='Max iteration for training BC', type=int, default=1e5) return parser.parse_args() def learn(env, policy_func, dataset, optim_batch_size=128, max_iters=1e4, adam_epsilon=1e-5, optim_stepsize=3e-4, ckpt_dir=None, log_dir=None, task_name=None, verbose=False): val_per_iter = int(max_iters/10) ob_space = env.observation_space ac_space = env.action_space pi = policy_func("pi", ob_space, ac_space) # Construct network for new policy # placeholder ob = U.get_placeholder_cached(name="ob") ac = pi.pdtype.sample_placeholder([None]) stochastic = U.get_placeholder_cached(name="stochastic") loss = tf.reduce_mean(tf.square(ac-pi.ac)) var_list = pi.get_trainable_variables() adam = MpiAdam(var_list, epsilon=adam_epsilon) lossandgrad = U.function([ob, ac, stochastic], [loss]+[U.flatgrad(loss, var_list)]) U.initialize() adam.sync() logger.log("Pretraining with Behavior Cloning...") for iter_so_far in tqdm(range(int(max_iters))): ob_expert, ac_expert = dataset.get_next_batch(optim_batch_size, 'train') train_loss, g = lossandgrad(ob_expert, ac_expert, True) adam.update(g, optim_stepsize) if verbose and iter_so_far % val_per_iter == 0: ob_expert, ac_expert = dataset.get_next_batch(-1, 'val') val_loss, _ = lossandgrad(ob_expert, ac_expert, True) logger.log("Training loss: {}, Validation loss: {}".format(train_loss, val_loss)) if ckpt_dir is None: savedir_fname = tempfile.TemporaryDirectory().name else: savedir_fname = osp.join(ckpt_dir, task_name) U.save_variables(savedir_fname, variables=pi.get_variables()) return savedir_fname def get_task_name(args): task_name = 'BC' task_name += '.{}'.format(args.env_id.split("-")[0]) task_name += '.traj_limitation_{}'.format(args.traj_limitation) task_name += ".seed_{}".format(args.seed) return task_name def main(args): U.make_session(num_cpu=1).__enter__() set_global_seeds(args.seed) env = gym.make(args.env_id) def policy_fn(name, ob_space, ac_space, reuse=False): return mlp_policy.MlpPolicy(name=name, ob_space=ob_space, ac_space=ac_space, reuse=reuse, hid_size=args.policy_hidden_size, num_hid_layers=2) env = bench.Monitor(env, logger.get_dir() and osp.join(logger.get_dir(), "monitor.json")) env.seed(args.seed) gym.logger.setLevel(logging.WARN) task_name = get_task_name(args) args.checkpoint_dir = osp.join(args.checkpoint_dir, task_name) args.log_dir = osp.join(args.log_dir, task_name) dataset = Mujoco_Dset(expert_path=args.expert_path, traj_limitation=args.traj_limitation) savedir_fname = learn(env, policy_fn, dataset, max_iters=args.BC_max_iter, ckpt_dir=args.checkpoint_dir, log_dir=args.log_dir, task_name=task_name, verbose=True) avg_len, avg_ret = runner(env, policy_fn, savedir_fname, timesteps_per_batch=1024, number_trajs=10, stochastic_policy=args.stochastic_policy, save=args.save_sample, reuse=True) if __name__ == '__main__': args = argsparser() main(args)
5,195
40.568
116
py
baselines
baselines-master/baselines/gail/adversary.py
''' Reference: https://github.com/openai/imitation I follow the architecture from the official repository ''' import tensorflow as tf import numpy as np from baselines.common.mpi_running_mean_std import RunningMeanStd from baselines.common import tf_util as U def logsigmoid(a): '''Equivalent to tf.log(tf.sigmoid(a))''' return -tf.nn.softplus(-a) """ Reference: https://github.com/openai/imitation/blob/99fbccf3e060b6e6c739bdf209758620fcdefd3c/policyopt/thutil.py#L48-L51""" def logit_bernoulli_entropy(logits): ent = (1.-tf.nn.sigmoid(logits))*logits - logsigmoid(logits) return ent class TransitionClassifier(object): def __init__(self, env, hidden_size, entcoeff=0.001, lr_rate=1e-3, scope="adversary"): self.scope = scope self.observation_shape = env.observation_space.shape self.actions_shape = env.action_space.shape self.input_shape = tuple([o+a for o, a in zip(self.observation_shape, self.actions_shape)]) self.num_actions = env.action_space.shape[0] self.hidden_size = hidden_size self.build_ph() # Build grpah generator_logits = self.build_graph(self.generator_obs_ph, self.generator_acs_ph, reuse=False) expert_logits = self.build_graph(self.expert_obs_ph, self.expert_acs_ph, reuse=True) # Build accuracy generator_acc = tf.reduce_mean(tf.to_float(tf.nn.sigmoid(generator_logits) < 0.5)) expert_acc = tf.reduce_mean(tf.to_float(tf.nn.sigmoid(expert_logits) > 0.5)) # Build regression loss # let x = logits, z = targets. # z * -log(sigmoid(x)) + (1 - z) * -log(1 - sigmoid(x)) generator_loss = tf.nn.sigmoid_cross_entropy_with_logits(logits=generator_logits, labels=tf.zeros_like(generator_logits)) generator_loss = tf.reduce_mean(generator_loss) expert_loss = tf.nn.sigmoid_cross_entropy_with_logits(logits=expert_logits, labels=tf.ones_like(expert_logits)) expert_loss = tf.reduce_mean(expert_loss) # Build entropy loss logits = tf.concat([generator_logits, expert_logits], 0) entropy = tf.reduce_mean(logit_bernoulli_entropy(logits)) entropy_loss = -entcoeff*entropy # Loss + Accuracy terms self.losses = [generator_loss, expert_loss, entropy, entropy_loss, generator_acc, expert_acc] self.loss_name = ["generator_loss", "expert_loss", "entropy", "entropy_loss", "generator_acc", "expert_acc"] self.total_loss = generator_loss + expert_loss + entropy_loss # Build Reward for policy self.reward_op = -tf.log(1-tf.nn.sigmoid(generator_logits)+1e-8) var_list = self.get_trainable_variables() self.lossandgrad = U.function([self.generator_obs_ph, self.generator_acs_ph, self.expert_obs_ph, self.expert_acs_ph], self.losses + [U.flatgrad(self.total_loss, var_list)]) def build_ph(self): self.generator_obs_ph = tf.placeholder(tf.float32, (None, ) + self.observation_shape, name="observations_ph") self.generator_acs_ph = tf.placeholder(tf.float32, (None, ) + self.actions_shape, name="actions_ph") self.expert_obs_ph = tf.placeholder(tf.float32, (None, ) + self.observation_shape, name="expert_observations_ph") self.expert_acs_ph = tf.placeholder(tf.float32, (None, ) + self.actions_shape, name="expert_actions_ph") def build_graph(self, obs_ph, acs_ph, reuse=False): with tf.variable_scope(self.scope): if reuse: tf.get_variable_scope().reuse_variables() with tf.variable_scope("obfilter"): self.obs_rms = RunningMeanStd(shape=self.observation_shape) obs = (obs_ph - self.obs_rms.mean) / self.obs_rms.std _input = tf.concat([obs, acs_ph], axis=1) # concatenate the two input -> form a transition p_h1 = tf.contrib.layers.fully_connected(_input, self.hidden_size, activation_fn=tf.nn.tanh) p_h2 = tf.contrib.layers.fully_connected(p_h1, self.hidden_size, activation_fn=tf.nn.tanh) logits = tf.contrib.layers.fully_connected(p_h2, 1, activation_fn=tf.identity) return logits def get_trainable_variables(self): return tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, self.scope) def get_reward(self, obs, acs): sess = tf.get_default_session() if len(obs.shape) == 1: obs = np.expand_dims(obs, 0) if len(acs.shape) == 1: acs = np.expand_dims(acs, 0) feed_dict = {self.generator_obs_ph: obs, self.generator_acs_ph: acs} reward = sess.run(self.reward_op, feed_dict) return reward
4,674
52.125
129
py
baselines
baselines-master/baselines/gail/run_mujoco.py
''' Disclaimer: this code is highly based on trpo_mpi at @openai/baselines and @openai/imitation ''' import argparse import os.path as osp import logging from mpi4py import MPI from tqdm import tqdm import numpy as np import gym from baselines.gail import mlp_policy from baselines.common import set_global_seeds, tf_util as U from baselines.common.misc_util import boolean_flag from baselines import bench from baselines import logger from baselines.gail.dataset.mujoco_dset import Mujoco_Dset from baselines.gail.adversary import TransitionClassifier def argsparser(): parser = argparse.ArgumentParser("Tensorflow Implementation of GAIL") parser.add_argument('--env_id', help='environment ID', default='Hopper-v2') parser.add_argument('--seed', help='RNG seed', type=int, default=0) parser.add_argument('--expert_path', type=str, default='data/deterministic.trpo.Hopper.0.00.npz') parser.add_argument('--checkpoint_dir', help='the directory to save model', default='checkpoint') parser.add_argument('--log_dir', help='the directory to save log file', default='log') parser.add_argument('--load_model_path', help='if provided, load the model', type=str, default=None) # Task parser.add_argument('--task', type=str, choices=['train', 'evaluate', 'sample'], default='train') # for evaluatation boolean_flag(parser, 'stochastic_policy', default=False, help='use stochastic/deterministic policy to evaluate') boolean_flag(parser, 'save_sample', default=False, help='save the trajectories or not') # Mujoco Dataset Configuration parser.add_argument('--traj_limitation', type=int, default=-1) # Optimization Configuration parser.add_argument('--g_step', help='number of steps to train policy in each epoch', type=int, default=3) parser.add_argument('--d_step', help='number of steps to train discriminator in each epoch', type=int, default=1) # Network Configuration (Using MLP Policy) parser.add_argument('--policy_hidden_size', type=int, default=100) parser.add_argument('--adversary_hidden_size', type=int, default=100) # Algorithms Configuration parser.add_argument('--algo', type=str, choices=['trpo', 'ppo'], default='trpo') parser.add_argument('--max_kl', type=float, default=0.01) parser.add_argument('--policy_entcoeff', help='entropy coefficiency of policy', type=float, default=0) parser.add_argument('--adversary_entcoeff', help='entropy coefficiency of discriminator', type=float, default=1e-3) # Traing Configuration parser.add_argument('--save_per_iter', help='save model every xx iterations', type=int, default=100) parser.add_argument('--num_timesteps', help='number of timesteps per episode', type=int, default=5e6) # Behavior Cloning boolean_flag(parser, 'pretrained', default=False, help='Use BC to pretrain') parser.add_argument('--BC_max_iter', help='Max iteration for training BC', type=int, default=1e4) return parser.parse_args() def get_task_name(args): task_name = args.algo + "_gail." if args.pretrained: task_name += "with_pretrained." if args.traj_limitation != np.inf: task_name += "transition_limitation_%d." % args.traj_limitation task_name += args.env_id.split("-")[0] task_name = task_name + ".g_step_" + str(args.g_step) + ".d_step_" + str(args.d_step) + \ ".policy_entcoeff_" + str(args.policy_entcoeff) + ".adversary_entcoeff_" + str(args.adversary_entcoeff) task_name += ".seed_" + str(args.seed) return task_name def main(args): U.make_session(num_cpu=1).__enter__() set_global_seeds(args.seed) env = gym.make(args.env_id) def policy_fn(name, ob_space, ac_space, reuse=False): return mlp_policy.MlpPolicy(name=name, ob_space=ob_space, ac_space=ac_space, reuse=reuse, hid_size=args.policy_hidden_size, num_hid_layers=2) env = bench.Monitor(env, logger.get_dir() and osp.join(logger.get_dir(), "monitor.json")) env.seed(args.seed) gym.logger.setLevel(logging.WARN) task_name = get_task_name(args) args.checkpoint_dir = osp.join(args.checkpoint_dir, task_name) args.log_dir = osp.join(args.log_dir, task_name) if args.task == 'train': dataset = Mujoco_Dset(expert_path=args.expert_path, traj_limitation=args.traj_limitation) reward_giver = TransitionClassifier(env, args.adversary_hidden_size, entcoeff=args.adversary_entcoeff) train(env, args.seed, policy_fn, reward_giver, dataset, args.algo, args.g_step, args.d_step, args.policy_entcoeff, args.num_timesteps, args.save_per_iter, args.checkpoint_dir, args.log_dir, args.pretrained, args.BC_max_iter, task_name ) elif args.task == 'evaluate': runner(env, policy_fn, args.load_model_path, timesteps_per_batch=1024, number_trajs=10, stochastic_policy=args.stochastic_policy, save=args.save_sample ) else: raise NotImplementedError env.close() def train(env, seed, policy_fn, reward_giver, dataset, algo, g_step, d_step, policy_entcoeff, num_timesteps, save_per_iter, checkpoint_dir, log_dir, pretrained, BC_max_iter, task_name=None): pretrained_weight = None if pretrained and (BC_max_iter > 0): # Pretrain with behavior cloning from baselines.gail import behavior_clone pretrained_weight = behavior_clone.learn(env, policy_fn, dataset, max_iters=BC_max_iter) if algo == 'trpo': from baselines.gail import trpo_mpi # Set up for MPI seed rank = MPI.COMM_WORLD.Get_rank() if rank != 0: logger.set_level(logger.DISABLED) workerseed = seed + 10000 * MPI.COMM_WORLD.Get_rank() set_global_seeds(workerseed) env.seed(workerseed) trpo_mpi.learn(env, policy_fn, reward_giver, dataset, rank, pretrained=pretrained, pretrained_weight=pretrained_weight, g_step=g_step, d_step=d_step, entcoeff=policy_entcoeff, max_timesteps=num_timesteps, ckpt_dir=checkpoint_dir, log_dir=log_dir, save_per_iter=save_per_iter, timesteps_per_batch=1024, max_kl=0.01, cg_iters=10, cg_damping=0.1, gamma=0.995, lam=0.97, vf_iters=5, vf_stepsize=1e-3, task_name=task_name) else: raise NotImplementedError def runner(env, policy_func, load_model_path, timesteps_per_batch, number_trajs, stochastic_policy, save=False, reuse=False): # Setup network # ---------------------------------------- ob_space = env.observation_space ac_space = env.action_space pi = policy_func("pi", ob_space, ac_space, reuse=reuse) U.initialize() # Prepare for rollouts # ---------------------------------------- U.load_variables(load_model_path) obs_list = [] acs_list = [] len_list = [] ret_list = [] for _ in tqdm(range(number_trajs)): traj = traj_1_generator(pi, env, timesteps_per_batch, stochastic=stochastic_policy) obs, acs, ep_len, ep_ret = traj['ob'], traj['ac'], traj['ep_len'], traj['ep_ret'] obs_list.append(obs) acs_list.append(acs) len_list.append(ep_len) ret_list.append(ep_ret) if stochastic_policy: print('stochastic policy:') else: print('deterministic policy:') if save: filename = load_model_path.split('/')[-1] + '.' + env.spec.id np.savez(filename, obs=np.array(obs_list), acs=np.array(acs_list), lens=np.array(len_list), rets=np.array(ret_list)) avg_len = sum(len_list)/len(len_list) avg_ret = sum(ret_list)/len(ret_list) print("Average length:", avg_len) print("Average return:", avg_ret) return avg_len, avg_ret # Sample one trajectory (until trajectory end) def traj_1_generator(pi, env, horizon, stochastic): t = 0 ac = env.action_space.sample() # not used, just so we have the datatype new = True # marks if we're on first timestep of an episode ob = env.reset() cur_ep_ret = 0 # return in current episode cur_ep_len = 0 # len of current episode # Initialize history arrays obs = [] rews = [] news = [] acs = [] while True: ac, vpred = pi.act(stochastic, ob) obs.append(ob) news.append(new) acs.append(ac) ob, rew, new, _ = env.step(ac) rews.append(rew) cur_ep_ret += rew cur_ep_len += 1 if new or t >= horizon: break t += 1 obs = np.array(obs) rews = np.array(rews) news = np.array(news) acs = np.array(acs) traj = {"ob": obs, "rew": rews, "new": news, "ac": acs, "ep_ret": cur_ep_ret, "ep_len": cur_ep_len} return traj if __name__ == '__main__': args = argsparser() main(args)
9,366
38.029167
119
py
baselines
baselines-master/baselines/gail/gail-eval.py
''' This code is used to evalaute the imitators trained with different number of trajectories and plot the results in the same figure for easy comparison. ''' import argparse import os import glob import gym import matplotlib.pyplot as plt import numpy as np import tensorflow as tf from baselines.gail import run_mujoco from baselines.gail import mlp_policy from baselines.common import set_global_seeds, tf_util as U from baselines.common.misc_util import boolean_flag from baselines.gail.dataset.mujoco_dset import Mujoco_Dset plt.style.use('ggplot') CONFIG = { 'traj_limitation': [1, 5, 10, 50], } def load_dataset(expert_path): dataset = Mujoco_Dset(expert_path=expert_path) return dataset def argsparser(): parser = argparse.ArgumentParser('Do evaluation') parser.add_argument('--seed', type=int, default=0) parser.add_argument('--policy_hidden_size', type=int, default=100) parser.add_argument('--env', type=str, choices=['Hopper', 'Walker2d', 'HalfCheetah', 'Humanoid', 'HumanoidStandup']) boolean_flag(parser, 'stochastic_policy', default=False, help='use stochastic/deterministic policy to evaluate') return parser.parse_args() def evaluate_env(env_name, seed, policy_hidden_size, stochastic, reuse, prefix): def get_checkpoint_dir(checkpoint_list, limit, prefix): for checkpoint in checkpoint_list: if ('limitation_'+str(limit) in checkpoint) and (prefix in checkpoint): return checkpoint return None def policy_fn(name, ob_space, ac_space, reuse=False): return mlp_policy.MlpPolicy(name=name, ob_space=ob_space, ac_space=ac_space, reuse=reuse, hid_size=policy_hidden_size, num_hid_layers=2) data_path = os.path.join('data', 'deterministic.trpo.' + env_name + '.0.00.npz') dataset = load_dataset(data_path) checkpoint_list = glob.glob(os.path.join('checkpoint', '*' + env_name + ".*")) log = { 'traj_limitation': [], 'upper_bound': [], 'avg_ret': [], 'avg_len': [], 'normalized_ret': [] } for i, limit in enumerate(CONFIG['traj_limitation']): # Do one evaluation upper_bound = sum(dataset.rets[:limit])/limit checkpoint_dir = get_checkpoint_dir(checkpoint_list, limit, prefix=prefix) checkpoint_path = tf.train.latest_checkpoint(checkpoint_dir) env = gym.make(env_name + '-v1') env.seed(seed) print('Trajectory limitation: {}, Load checkpoint: {}, '.format(limit, checkpoint_path)) avg_len, avg_ret = run_mujoco.runner(env, policy_fn, checkpoint_path, timesteps_per_batch=1024, number_trajs=10, stochastic_policy=stochastic, reuse=((i != 0) or reuse)) normalized_ret = avg_ret/upper_bound print('Upper bound: {}, evaluation returns: {}, normalized scores: {}'.format( upper_bound, avg_ret, normalized_ret)) log['traj_limitation'].append(limit) log['upper_bound'].append(upper_bound) log['avg_ret'].append(avg_ret) log['avg_len'].append(avg_len) log['normalized_ret'].append(normalized_ret) env.close() return log def plot(env_name, bc_log, gail_log, stochastic): upper_bound = bc_log['upper_bound'] bc_avg_ret = bc_log['avg_ret'] gail_avg_ret = gail_log['avg_ret'] plt.plot(CONFIG['traj_limitation'], upper_bound) plt.plot(CONFIG['traj_limitation'], bc_avg_ret) plt.plot(CONFIG['traj_limitation'], gail_avg_ret) plt.xlabel('Number of expert trajectories') plt.ylabel('Accumulated reward') plt.title('{} unnormalized scores'.format(env_name)) plt.legend(['expert', 'bc-imitator', 'gail-imitator'], loc='lower right') plt.grid(b=True, which='major', color='gray', linestyle='--') if stochastic: title_name = 'result/{}-unnormalized-stochastic-scores.png'.format(env_name) else: title_name = 'result/{}-unnormalized-deterministic-scores.png'.format(env_name) plt.savefig(title_name) plt.close() bc_normalized_ret = bc_log['normalized_ret'] gail_normalized_ret = gail_log['normalized_ret'] plt.plot(CONFIG['traj_limitation'], np.ones(len(CONFIG['traj_limitation']))) plt.plot(CONFIG['traj_limitation'], bc_normalized_ret) plt.plot(CONFIG['traj_limitation'], gail_normalized_ret) plt.xlabel('Number of expert trajectories') plt.ylabel('Normalized performance') plt.title('{} normalized scores'.format(env_name)) plt.legend(['expert', 'bc-imitator', 'gail-imitator'], loc='lower right') plt.grid(b=True, which='major', color='gray', linestyle='--') if stochastic: title_name = 'result/{}-normalized-stochastic-scores.png'.format(env_name) else: title_name = 'result/{}-normalized-deterministic-scores.png'.format(env_name) plt.ylim(0, 1.6) plt.savefig(title_name) plt.close() def main(args): U.make_session(num_cpu=1).__enter__() set_global_seeds(args.seed) print('Evaluating {}'.format(args.env)) bc_log = evaluate_env(args.env, args.seed, args.policy_hidden_size, args.stochastic_policy, False, 'BC') print('Evaluation for {}'.format(args.env)) print(bc_log) gail_log = evaluate_env(args.env, args.seed, args.policy_hidden_size, args.stochastic_policy, True, 'gail') print('Evaluation for {}'.format(args.env)) print(gail_log) plot(args.env, bc_log, gail_log, args.stochastic_policy) if __name__ == '__main__': args = argsparser() main(args)
5,886
38.777027
116
py
baselines
baselines-master/baselines/gail/mlp_policy.py
''' from baselines/ppo1/mlp_policy.py and add simple modification (1) add reuse argument (2) cache the `stochastic` placeholder ''' import tensorflow as tf import gym import baselines.common.tf_util as U from baselines.common.mpi_running_mean_std import RunningMeanStd from baselines.common.distributions import make_pdtype from baselines.acktr.utils import dense class MlpPolicy(object): recurrent = False def __init__(self, name, reuse=False, *args, **kwargs): with tf.variable_scope(name): if reuse: tf.get_variable_scope().reuse_variables() self._init(*args, **kwargs) self.scope = tf.get_variable_scope().name def _init(self, ob_space, ac_space, hid_size, num_hid_layers, gaussian_fixed_var=True): assert isinstance(ob_space, gym.spaces.Box) self.pdtype = pdtype = make_pdtype(ac_space) sequence_length = None ob = U.get_placeholder(name="ob", dtype=tf.float32, shape=[sequence_length] + list(ob_space.shape)) with tf.variable_scope("obfilter"): self.ob_rms = RunningMeanStd(shape=ob_space.shape) obz = tf.clip_by_value((ob - self.ob_rms.mean) / self.ob_rms.std, -5.0, 5.0) last_out = obz for i in range(num_hid_layers): last_out = tf.nn.tanh(dense(last_out, hid_size, "vffc%i" % (i+1), weight_init=U.normc_initializer(1.0))) self.vpred = dense(last_out, 1, "vffinal", weight_init=U.normc_initializer(1.0))[:, 0] last_out = obz for i in range(num_hid_layers): last_out = tf.nn.tanh(dense(last_out, hid_size, "polfc%i" % (i+1), weight_init=U.normc_initializer(1.0))) if gaussian_fixed_var and isinstance(ac_space, gym.spaces.Box): mean = dense(last_out, pdtype.param_shape()[0]//2, "polfinal", U.normc_initializer(0.01)) logstd = tf.get_variable(name="logstd", shape=[1, pdtype.param_shape()[0]//2], initializer=tf.zeros_initializer()) pdparam = tf.concat([mean, mean * 0.0 + logstd], axis=1) else: pdparam = dense(last_out, pdtype.param_shape()[0], "polfinal", U.normc_initializer(0.01)) self.pd = pdtype.pdfromflat(pdparam) self.state_in = [] self.state_out = [] # change for BC stochastic = U.get_placeholder(name="stochastic", dtype=tf.bool, shape=()) ac = U.switch(stochastic, self.pd.sample(), self.pd.mode()) self.ac = ac self._act = U.function([stochastic, ob], [ac, self.vpred]) def act(self, stochastic, ob): ac1, vpred1 = self._act(stochastic, ob[None]) return ac1[0], vpred1[0] def get_variables(self): return tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, self.scope) def get_trainable_variables(self): return tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, self.scope) def get_initial_state(self): return []
2,930
37.565789
126
py
baselines
baselines-master/baselines/gail/statistics.py
''' This code is highly based on https://github.com/carpedm20/deep-rl-tensorflow/blob/master/agents/statistic.py ''' import tensorflow as tf import numpy as np import baselines.common.tf_util as U class stats(): def __init__(self, scalar_keys=[], histogram_keys=[]): self.scalar_keys = scalar_keys self.histogram_keys = histogram_keys self.scalar_summaries = [] self.scalar_summaries_ph = [] self.histogram_summaries_ph = [] self.histogram_summaries = [] with tf.variable_scope('summary'): for k in scalar_keys: ph = tf.placeholder('float32', None, name=k+'.scalar.summary') sm = tf.summary.scalar(k+'.scalar.summary', ph) self.scalar_summaries_ph.append(ph) self.scalar_summaries.append(sm) for k in histogram_keys: ph = tf.placeholder('float32', None, name=k+'.histogram.summary') sm = tf.summary.scalar(k+'.histogram.summary', ph) self.histogram_summaries_ph.append(ph) self.histogram_summaries.append(sm) self.summaries = tf.summary.merge(self.scalar_summaries+self.histogram_summaries) def add_all_summary(self, writer, values, iter): # Note that the order of the incoming ```values``` should be the same as the that of the # ```scalar_keys``` given in ```__init__``` if np.sum(np.isnan(values)+0) != 0: return sess = U.get_session() keys = self.scalar_summaries_ph + self.histogram_summaries_ph feed_dict = {} for k, v in zip(keys, values): feed_dict.update({k: v}) summaries_str = sess.run(self.summaries, feed_dict) writer.add_summary(summaries_str, iter)
1,802
38.195652
108
py
baselines
baselines-master/baselines/gail/__init__.py
0
0
0
py
baselines
baselines-master/baselines/gail/trpo_mpi.py
''' Disclaimer: The trpo part highly rely on trpo_mpi at @openai/baselines ''' import time import os from contextlib import contextmanager from mpi4py import MPI from collections import deque import tensorflow as tf import numpy as np import baselines.common.tf_util as U from baselines.common import explained_variance, zipsame, dataset, fmt_row from baselines import logger from baselines.common import colorize from baselines.common.mpi_adam import MpiAdam from baselines.common.cg import cg from baselines.gail.statistics import stats def traj_segment_generator(pi, env, reward_giver, horizon, stochastic): # Initialize state variables t = 0 ac = env.action_space.sample() new = True rew = 0.0 true_rew = 0.0 ob = env.reset() cur_ep_ret = 0 cur_ep_len = 0 cur_ep_true_ret = 0 ep_true_rets = [] ep_rets = [] ep_lens = [] # Initialize history arrays obs = np.array([ob for _ in range(horizon)]) true_rews = np.zeros(horizon, 'float32') rews = np.zeros(horizon, 'float32') vpreds = np.zeros(horizon, 'float32') news = np.zeros(horizon, 'int32') acs = np.array([ac for _ in range(horizon)]) prevacs = acs.copy() while True: prevac = ac ac, vpred = pi.act(stochastic, ob) # Slight weirdness here because we need value function at time T # before returning segment [0, T-1] so we get the correct # terminal value if t > 0 and t % horizon == 0: yield {"ob": obs, "rew": rews, "vpred": vpreds, "new": news, "ac": acs, "prevac": prevacs, "nextvpred": vpred * (1 - new), "ep_rets": ep_rets, "ep_lens": ep_lens, "ep_true_rets": ep_true_rets} _, vpred = pi.act(stochastic, ob) # Be careful!!! if you change the downstream algorithm to aggregate # several of these batches, then be sure to do a deepcopy ep_rets = [] ep_true_rets = [] ep_lens = [] i = t % horizon obs[i] = ob vpreds[i] = vpred news[i] = new acs[i] = ac prevacs[i] = prevac rew = reward_giver.get_reward(ob, ac) ob, true_rew, new, _ = env.step(ac) rews[i] = rew true_rews[i] = true_rew cur_ep_ret += rew cur_ep_true_ret += true_rew cur_ep_len += 1 if new: ep_rets.append(cur_ep_ret) ep_true_rets.append(cur_ep_true_ret) ep_lens.append(cur_ep_len) cur_ep_ret = 0 cur_ep_true_ret = 0 cur_ep_len = 0 ob = env.reset() t += 1 def add_vtarg_and_adv(seg, gamma, lam): new = np.append(seg["new"], 0) # last element is only used for last vtarg, but we already zeroed it if last new = 1 vpred = np.append(seg["vpred"], seg["nextvpred"]) T = len(seg["rew"]) seg["adv"] = gaelam = np.empty(T, 'float32') rew = seg["rew"] lastgaelam = 0 for t in reversed(range(T)): nonterminal = 1-new[t+1] delta = rew[t] + gamma * vpred[t+1] * nonterminal - vpred[t] gaelam[t] = lastgaelam = delta + gamma * lam * nonterminal * lastgaelam seg["tdlamret"] = seg["adv"] + seg["vpred"] def learn(env, policy_func, reward_giver, expert_dataset, rank, pretrained, pretrained_weight, *, g_step, d_step, entcoeff, save_per_iter, ckpt_dir, log_dir, timesteps_per_batch, task_name, gamma, lam, max_kl, cg_iters, cg_damping=1e-2, vf_stepsize=3e-4, d_stepsize=3e-4, vf_iters=3, max_timesteps=0, max_episodes=0, max_iters=0, callback=None ): nworkers = MPI.COMM_WORLD.Get_size() rank = MPI.COMM_WORLD.Get_rank() np.set_printoptions(precision=3) # Setup losses and stuff # ---------------------------------------- ob_space = env.observation_space ac_space = env.action_space pi = policy_func("pi", ob_space, ac_space, reuse=(pretrained_weight != None)) oldpi = policy_func("oldpi", ob_space, ac_space) atarg = tf.placeholder(dtype=tf.float32, shape=[None]) # Target advantage function (if applicable) ret = tf.placeholder(dtype=tf.float32, shape=[None]) # Empirical return ob = U.get_placeholder_cached(name="ob") ac = pi.pdtype.sample_placeholder([None]) kloldnew = oldpi.pd.kl(pi.pd) ent = pi.pd.entropy() meankl = tf.reduce_mean(kloldnew) meanent = tf.reduce_mean(ent) entbonus = entcoeff * meanent vferr = tf.reduce_mean(tf.square(pi.vpred - ret)) ratio = tf.exp(pi.pd.logp(ac) - oldpi.pd.logp(ac)) # advantage * pnew / pold surrgain = tf.reduce_mean(ratio * atarg) optimgain = surrgain + entbonus losses = [optimgain, meankl, entbonus, surrgain, meanent] loss_names = ["optimgain", "meankl", "entloss", "surrgain", "entropy"] dist = meankl all_var_list = pi.get_trainable_variables() var_list = [v for v in all_var_list if v.name.startswith("pi/pol") or v.name.startswith("pi/logstd")] vf_var_list = [v for v in all_var_list if v.name.startswith("pi/vff")] assert len(var_list) == len(vf_var_list) + 1 d_adam = MpiAdam(reward_giver.get_trainable_variables()) vfadam = MpiAdam(vf_var_list) get_flat = U.GetFlat(var_list) set_from_flat = U.SetFromFlat(var_list) klgrads = tf.gradients(dist, var_list) flat_tangent = tf.placeholder(dtype=tf.float32, shape=[None], name="flat_tan") shapes = [var.get_shape().as_list() for var in var_list] start = 0 tangents = [] for shape in shapes: sz = U.intprod(shape) tangents.append(tf.reshape(flat_tangent[start:start+sz], shape)) start += sz gvp = tf.add_n([tf.reduce_sum(g*tangent) for (g, tangent) in zipsame(klgrads, tangents)]) # pylint: disable=E1111 fvp = U.flatgrad(gvp, var_list) assign_old_eq_new = U.function([], [], updates=[tf.assign(oldv, newv) for (oldv, newv) in zipsame(oldpi.get_variables(), pi.get_variables())]) compute_losses = U.function([ob, ac, atarg], losses) compute_lossandgrad = U.function([ob, ac, atarg], losses + [U.flatgrad(optimgain, var_list)]) compute_fvp = U.function([flat_tangent, ob, ac, atarg], fvp) compute_vflossandgrad = U.function([ob, ret], U.flatgrad(vferr, vf_var_list)) @contextmanager def timed(msg): if rank == 0: print(colorize(msg, color='magenta')) tstart = time.time() yield print(colorize("done in %.3f seconds" % (time.time() - tstart), color='magenta')) else: yield def allmean(x): assert isinstance(x, np.ndarray) out = np.empty_like(x) MPI.COMM_WORLD.Allreduce(x, out, op=MPI.SUM) out /= nworkers return out U.initialize() th_init = get_flat() MPI.COMM_WORLD.Bcast(th_init, root=0) set_from_flat(th_init) d_adam.sync() vfadam.sync() if rank == 0: print("Init param sum", th_init.sum(), flush=True) # Prepare for rollouts # ---------------------------------------- seg_gen = traj_segment_generator(pi, env, reward_giver, timesteps_per_batch, stochastic=True) episodes_so_far = 0 timesteps_so_far = 0 iters_so_far = 0 tstart = time.time() lenbuffer = deque(maxlen=40) # rolling buffer for episode lengths rewbuffer = deque(maxlen=40) # rolling buffer for episode rewards true_rewbuffer = deque(maxlen=40) assert sum([max_iters > 0, max_timesteps > 0, max_episodes > 0]) == 1 g_loss_stats = stats(loss_names) d_loss_stats = stats(reward_giver.loss_name) ep_stats = stats(["True_rewards", "Rewards", "Episode_length"]) # if provide pretrained weight if pretrained_weight is not None: U.load_state(pretrained_weight, var_list=pi.get_variables()) while True: if callback: callback(locals(), globals()) if max_timesteps and timesteps_so_far >= max_timesteps: break elif max_episodes and episodes_so_far >= max_episodes: break elif max_iters and iters_so_far >= max_iters: break # Save model if rank == 0 and iters_so_far % save_per_iter == 0 and ckpt_dir is not None: fname = os.path.join(ckpt_dir, task_name) os.makedirs(os.path.dirname(fname), exist_ok=True) saver = tf.train.Saver() saver.save(tf.get_default_session(), fname) logger.log("********** Iteration %i ************" % iters_so_far) def fisher_vector_product(p): return allmean(compute_fvp(p, *fvpargs)) + cg_damping * p # ------------------ Update G ------------------ logger.log("Optimizing Policy...") for _ in range(g_step): with timed("sampling"): seg = seg_gen.__next__() add_vtarg_and_adv(seg, gamma, lam) # ob, ac, atarg, ret, td1ret = map(np.concatenate, (obs, acs, atargs, rets, td1rets)) ob, ac, atarg, tdlamret = seg["ob"], seg["ac"], seg["adv"], seg["tdlamret"] vpredbefore = seg["vpred"] # predicted value function before udpate atarg = (atarg - atarg.mean()) / atarg.std() # standardized advantage function estimate if hasattr(pi, "ob_rms"): pi.ob_rms.update(ob) # update running mean/std for policy args = seg["ob"], seg["ac"], atarg fvpargs = [arr[::5] for arr in args] assign_old_eq_new() # set old parameter values to new parameter values with timed("computegrad"): *lossbefore, g = compute_lossandgrad(*args) lossbefore = allmean(np.array(lossbefore)) g = allmean(g) if np.allclose(g, 0): logger.log("Got zero gradient. not updating") else: with timed("cg"): stepdir = cg(fisher_vector_product, g, cg_iters=cg_iters, verbose=rank == 0) assert np.isfinite(stepdir).all() shs = .5*stepdir.dot(fisher_vector_product(stepdir)) lm = np.sqrt(shs / max_kl) # logger.log("lagrange multiplier:", lm, "gnorm:", np.linalg.norm(g)) fullstep = stepdir / lm expectedimprove = g.dot(fullstep) surrbefore = lossbefore[0] stepsize = 1.0 thbefore = get_flat() for _ in range(10): thnew = thbefore + fullstep * stepsize set_from_flat(thnew) meanlosses = surr, kl, *_ = allmean(np.array(compute_losses(*args))) improve = surr - surrbefore logger.log("Expected: %.3f Actual: %.3f" % (expectedimprove, improve)) if not np.isfinite(meanlosses).all(): logger.log("Got non-finite value of losses -- bad!") elif kl > max_kl * 1.5: logger.log("violated KL constraint. shrinking step.") elif improve < 0: logger.log("surrogate didn't improve. shrinking step.") else: logger.log("Stepsize OK!") break stepsize *= .5 else: logger.log("couldn't compute a good step") set_from_flat(thbefore) if nworkers > 1 and iters_so_far % 20 == 0: paramsums = MPI.COMM_WORLD.allgather((thnew.sum(), vfadam.getflat().sum())) # list of tuples assert all(np.allclose(ps, paramsums[0]) for ps in paramsums[1:]) with timed("vf"): for _ in range(vf_iters): for (mbob, mbret) in dataset.iterbatches((seg["ob"], seg["tdlamret"]), include_final_partial_batch=False, batch_size=128): if hasattr(pi, "ob_rms"): pi.ob_rms.update(mbob) # update running mean/std for policy g = allmean(compute_vflossandgrad(mbob, mbret)) vfadam.update(g, vf_stepsize) g_losses = meanlosses for (lossname, lossval) in zip(loss_names, meanlosses): logger.record_tabular(lossname, lossval) logger.record_tabular("ev_tdlam_before", explained_variance(vpredbefore, tdlamret)) # ------------------ Update D ------------------ logger.log("Optimizing Discriminator...") logger.log(fmt_row(13, reward_giver.loss_name)) ob_expert, ac_expert = expert_dataset.get_next_batch(len(ob)) batch_size = len(ob) // d_step d_losses = [] # list of tuples, each of which gives the loss for a minibatch for ob_batch, ac_batch in dataset.iterbatches((ob, ac), include_final_partial_batch=False, batch_size=batch_size): ob_expert, ac_expert = expert_dataset.get_next_batch(len(ob_batch)) # update running mean/std for reward_giver if hasattr(reward_giver, "obs_rms"): reward_giver.obs_rms.update(np.concatenate((ob_batch, ob_expert), 0)) *newlosses, g = reward_giver.lossandgrad(ob_batch, ac_batch, ob_expert, ac_expert) d_adam.update(allmean(g), d_stepsize) d_losses.append(newlosses) logger.log(fmt_row(13, np.mean(d_losses, axis=0))) lrlocal = (seg["ep_lens"], seg["ep_rets"], seg["ep_true_rets"]) # local values listoflrpairs = MPI.COMM_WORLD.allgather(lrlocal) # list of tuples lens, rews, true_rets = map(flatten_lists, zip(*listoflrpairs)) true_rewbuffer.extend(true_rets) lenbuffer.extend(lens) rewbuffer.extend(rews) logger.record_tabular("EpLenMean", np.mean(lenbuffer)) logger.record_tabular("EpRewMean", np.mean(rewbuffer)) logger.record_tabular("EpTrueRewMean", np.mean(true_rewbuffer)) logger.record_tabular("EpThisIter", len(lens)) episodes_so_far += len(lens) timesteps_so_far += sum(lens) iters_so_far += 1 logger.record_tabular("EpisodesSoFar", episodes_so_far) logger.record_tabular("TimestepsSoFar", timesteps_so_far) logger.record_tabular("TimeElapsed", time.time() - tstart) if rank == 0: logger.dump_tabular() def flatten_lists(listoflists): return [el for list_ in listoflists for el in list_]
14,662
40.304225
124
py
baselines
baselines-master/baselines/gail/dataset/mujoco_dset.py
''' Data structure of the input .npz: the data is save in python dictionary format with keys: 'acs', 'ep_rets', 'rews', 'obs' the values of each item is a list storing the expert trajectory sequentially a transition can be: (data['obs'][t], data['acs'][t], data['obs'][t+1]) and get reward data['rews'][t] ''' from baselines import logger import numpy as np class Dset(object): def __init__(self, inputs, labels, randomize): self.inputs = inputs self.labels = labels assert len(self.inputs) == len(self.labels) self.randomize = randomize self.num_pairs = len(inputs) self.init_pointer() def init_pointer(self): self.pointer = 0 if self.randomize: idx = np.arange(self.num_pairs) np.random.shuffle(idx) self.inputs = self.inputs[idx, :] self.labels = self.labels[idx, :] def get_next_batch(self, batch_size): # if batch_size is negative -> return all if batch_size < 0: return self.inputs, self.labels if self.pointer + batch_size >= self.num_pairs: self.init_pointer() end = self.pointer + batch_size inputs = self.inputs[self.pointer:end, :] labels = self.labels[self.pointer:end, :] self.pointer = end return inputs, labels class Mujoco_Dset(object): def __init__(self, expert_path, train_fraction=0.7, traj_limitation=-1, randomize=True): traj_data = np.load(expert_path) if traj_limitation < 0: traj_limitation = len(traj_data['obs']) obs = traj_data['obs'][:traj_limitation] acs = traj_data['acs'][:traj_limitation] # obs, acs: shape (N, L, ) + S where N = # episodes, L = episode length # and S is the environment observation/action space. # Flatten to (N * L, prod(S)) if len(obs.shape) > 2: self.obs = np.reshape(obs, [-1, np.prod(obs.shape[2:])]) self.acs = np.reshape(acs, [-1, np.prod(acs.shape[2:])]) else: self.obs = np.vstack(obs) self.acs = np.vstack(acs) self.rets = traj_data['ep_rets'][:traj_limitation] self.avg_ret = sum(self.rets)/len(self.rets) self.std_ret = np.std(np.array(self.rets)) if len(self.acs) > 2: self.acs = np.squeeze(self.acs) assert len(self.obs) == len(self.acs) self.num_traj = min(traj_limitation, len(traj_data['obs'])) self.num_transition = len(self.obs) self.randomize = randomize self.dset = Dset(self.obs, self.acs, self.randomize) # for behavior cloning self.train_set = Dset(self.obs[:int(self.num_transition*train_fraction), :], self.acs[:int(self.num_transition*train_fraction), :], self.randomize) self.val_set = Dset(self.obs[int(self.num_transition*train_fraction):, :], self.acs[int(self.num_transition*train_fraction):, :], self.randomize) self.log_info() def log_info(self): logger.log("Total trajectories: %d" % self.num_traj) logger.log("Total transitions: %d" % self.num_transition) logger.log("Average returns: %f" % self.avg_ret) logger.log("Std for returns: %f" % self.std_ret) def get_next_batch(self, batch_size, split=None): if split is None: return self.dset.get_next_batch(batch_size) elif split == 'train': return self.train_set.get_next_batch(batch_size) elif split == 'val': return self.val_set.get_next_batch(batch_size) else: raise NotImplementedError def plot(self): import matplotlib.pyplot as plt plt.hist(self.rets) plt.savefig("histogram_rets.png") plt.close() def test(expert_path, traj_limitation, plot): dset = Mujoco_Dset(expert_path, traj_limitation=traj_limitation) if plot: dset.plot() if __name__ == '__main__': import argparse parser = argparse.ArgumentParser() parser.add_argument("--expert_path", type=str, default="../data/deterministic.trpo.Hopper.0.00.npz") parser.add_argument("--traj_limitation", type=int, default=None) parser.add_argument("--plot", type=bool, default=False) args = parser.parse_args() test(args.expert_path, args.traj_limitation, args.plot)
4,448
37.686957
104
py
baselines
baselines-master/baselines/gail/dataset/__init__.py
0
0
0
py
baselines
baselines-master/baselines/ddpg/ddpg.py
import os import time from collections import deque import pickle from baselines.ddpg.ddpg_learner import DDPG from baselines.ddpg.models import Actor, Critic from baselines.ddpg.memory import Memory from baselines.ddpg.noise import AdaptiveParamNoiseSpec, NormalActionNoise, OrnsteinUhlenbeckActionNoise from baselines.common import set_global_seeds import baselines.common.tf_util as U from baselines import logger import numpy as np try: from mpi4py import MPI except ImportError: MPI = None def learn(network, env, seed=None, total_timesteps=None, nb_epochs=None, # with default settings, perform 1M steps total nb_epoch_cycles=20, nb_rollout_steps=100, reward_scale=1.0, render=False, render_eval=False, noise_type='adaptive-param_0.2', normalize_returns=False, normalize_observations=True, critic_l2_reg=1e-2, actor_lr=1e-4, critic_lr=1e-3, popart=False, gamma=0.99, clip_norm=None, nb_train_steps=50, # per epoch cycle and MPI worker, nb_eval_steps=100, batch_size=64, # per MPI worker tau=0.01, eval_env=None, param_noise_adaption_interval=50, **network_kwargs): set_global_seeds(seed) if total_timesteps is not None: assert nb_epochs is None nb_epochs = int(total_timesteps) // (nb_epoch_cycles * nb_rollout_steps) else: nb_epochs = 500 if MPI is not None: rank = MPI.COMM_WORLD.Get_rank() else: rank = 0 nb_actions = env.action_space.shape[-1] assert (np.abs(env.action_space.low) == env.action_space.high).all() # we assume symmetric actions. memory = Memory(limit=int(1e6), action_shape=env.action_space.shape, observation_shape=env.observation_space.shape) critic = Critic(network=network, **network_kwargs) actor = Actor(nb_actions, network=network, **network_kwargs) action_noise = None param_noise = None if noise_type is not None: for current_noise_type in noise_type.split(','): current_noise_type = current_noise_type.strip() if current_noise_type == 'none': pass elif 'adaptive-param' in current_noise_type: _, stddev = current_noise_type.split('_') param_noise = AdaptiveParamNoiseSpec(initial_stddev=float(stddev), desired_action_stddev=float(stddev)) elif 'normal' in current_noise_type: _, stddev = current_noise_type.split('_') action_noise = NormalActionNoise(mu=np.zeros(nb_actions), sigma=float(stddev) * np.ones(nb_actions)) elif 'ou' in current_noise_type: _, stddev = current_noise_type.split('_') action_noise = OrnsteinUhlenbeckActionNoise(mu=np.zeros(nb_actions), sigma=float(stddev) * np.ones(nb_actions)) else: raise RuntimeError('unknown noise type "{}"'.format(current_noise_type)) max_action = env.action_space.high logger.info('scaling actions by {} before executing in env'.format(max_action)) agent = DDPG(actor, critic, memory, env.observation_space.shape, env.action_space.shape, gamma=gamma, tau=tau, normalize_returns=normalize_returns, normalize_observations=normalize_observations, batch_size=batch_size, action_noise=action_noise, param_noise=param_noise, critic_l2_reg=critic_l2_reg, actor_lr=actor_lr, critic_lr=critic_lr, enable_popart=popart, clip_norm=clip_norm, reward_scale=reward_scale) logger.info('Using agent with the following configuration:') logger.info(str(agent.__dict__.items())) eval_episode_rewards_history = deque(maxlen=100) episode_rewards_history = deque(maxlen=100) sess = U.get_session() # Prepare everything. agent.initialize(sess) sess.graph.finalize() agent.reset() obs = env.reset() if eval_env is not None: eval_obs = eval_env.reset() nenvs = obs.shape[0] episode_reward = np.zeros(nenvs, dtype = np.float32) #vector episode_step = np.zeros(nenvs, dtype = int) # vector episodes = 0 #scalar t = 0 # scalar epoch = 0 start_time = time.time() epoch_episode_rewards = [] epoch_episode_steps = [] epoch_actions = [] epoch_qs = [] epoch_episodes = 0 for epoch in range(nb_epochs): for cycle in range(nb_epoch_cycles): # Perform rollouts. if nenvs > 1: # if simulating multiple envs in parallel, impossible to reset agent at the end of the episode in each # of the environments, so resetting here instead agent.reset() for t_rollout in range(nb_rollout_steps): # Predict next action. action, q, _, _ = agent.step(obs, apply_noise=True, compute_Q=True) # Execute next action. if rank == 0 and render: env.render() # max_action is of dimension A, whereas action is dimension (nenvs, A) - the multiplication gets broadcasted to the batch new_obs, r, done, info = env.step(max_action * action) # scale for execution in env (as far as DDPG is concerned, every action is in [-1, 1]) # note these outputs are batched from vecenv t += 1 if rank == 0 and render: env.render() episode_reward += r episode_step += 1 # Book-keeping. epoch_actions.append(action) epoch_qs.append(q) agent.store_transition(obs, action, r, new_obs, done) #the batched data will be unrolled in memory.py's append. obs = new_obs for d in range(len(done)): if done[d]: # Episode done. epoch_episode_rewards.append(episode_reward[d]) episode_rewards_history.append(episode_reward[d]) epoch_episode_steps.append(episode_step[d]) episode_reward[d] = 0. episode_step[d] = 0 epoch_episodes += 1 episodes += 1 if nenvs == 1: agent.reset() # Train. epoch_actor_losses = [] epoch_critic_losses = [] epoch_adaptive_distances = [] for t_train in range(nb_train_steps): # Adapt param noise, if necessary. if memory.nb_entries >= batch_size and t_train % param_noise_adaption_interval == 0: distance = agent.adapt_param_noise() epoch_adaptive_distances.append(distance) cl, al = agent.train() epoch_critic_losses.append(cl) epoch_actor_losses.append(al) agent.update_target_net() # Evaluate. eval_episode_rewards = [] eval_qs = [] if eval_env is not None: nenvs_eval = eval_obs.shape[0] eval_episode_reward = np.zeros(nenvs_eval, dtype = np.float32) for t_rollout in range(nb_eval_steps): eval_action, eval_q, _, _ = agent.step(eval_obs, apply_noise=False, compute_Q=True) eval_obs, eval_r, eval_done, eval_info = eval_env.step(max_action * eval_action) # scale for execution in env (as far as DDPG is concerned, every action is in [-1, 1]) if render_eval: eval_env.render() eval_episode_reward += eval_r eval_qs.append(eval_q) for d in range(len(eval_done)): if eval_done[d]: eval_episode_rewards.append(eval_episode_reward[d]) eval_episode_rewards_history.append(eval_episode_reward[d]) eval_episode_reward[d] = 0.0 if MPI is not None: mpi_size = MPI.COMM_WORLD.Get_size() else: mpi_size = 1 # Log stats. # XXX shouldn't call np.mean on variable length lists duration = time.time() - start_time stats = agent.get_stats() combined_stats = stats.copy() combined_stats['rollout/return'] = np.mean(epoch_episode_rewards) combined_stats['rollout/return_std'] = np.std(epoch_episode_rewards) combined_stats['rollout/return_history'] = np.mean(episode_rewards_history) combined_stats['rollout/return_history_std'] = np.std(episode_rewards_history) combined_stats['rollout/episode_steps'] = np.mean(epoch_episode_steps) combined_stats['rollout/actions_mean'] = np.mean(epoch_actions) combined_stats['rollout/Q_mean'] = np.mean(epoch_qs) combined_stats['train/loss_actor'] = np.mean(epoch_actor_losses) combined_stats['train/loss_critic'] = np.mean(epoch_critic_losses) combined_stats['train/param_noise_distance'] = np.mean(epoch_adaptive_distances) combined_stats['total/duration'] = duration combined_stats['total/steps_per_second'] = float(t) / float(duration) combined_stats['total/episodes'] = episodes combined_stats['rollout/episodes'] = epoch_episodes combined_stats['rollout/actions_std'] = np.std(epoch_actions) # Evaluation statistics. if eval_env is not None: combined_stats['eval/return'] = eval_episode_rewards combined_stats['eval/return_history'] = np.mean(eval_episode_rewards_history) combined_stats['eval/Q'] = eval_qs combined_stats['eval/episodes'] = len(eval_episode_rewards) def as_scalar(x): if isinstance(x, np.ndarray): assert x.size == 1 return x[0] elif np.isscalar(x): return x else: raise ValueError('expected scalar, got %s'%x) combined_stats_sums = np.array([ np.array(x).flatten()[0] for x in combined_stats.values()]) if MPI is not None: combined_stats_sums = MPI.COMM_WORLD.allreduce(combined_stats_sums) combined_stats = {k : v / mpi_size for (k,v) in zip(combined_stats.keys(), combined_stats_sums)} # Total statistics. combined_stats['total/epochs'] = epoch + 1 combined_stats['total/steps'] = t for key in sorted(combined_stats.keys()): logger.record_tabular(key, combined_stats[key]) if rank == 0: logger.dump_tabular() logger.info('') logdir = logger.get_dir() if rank == 0 and logdir: if hasattr(env, 'get_state'): with open(os.path.join(logdir, 'env_state.pkl'), 'wb') as f: pickle.dump(env.get_state(), f) if eval_env and hasattr(eval_env, 'get_state'): with open(os.path.join(logdir, 'eval_env_state.pkl'), 'wb') as f: pickle.dump(eval_env.get_state(), f) return agent
11,283
39.884058
188
py
baselines
baselines-master/baselines/ddpg/memory.py
import numpy as np class RingBuffer(object): def __init__(self, maxlen, shape, dtype='float32'): self.maxlen = maxlen self.start = 0 self.length = 0 self.data = np.zeros((maxlen,) + shape).astype(dtype) def __len__(self): return self.length def __getitem__(self, idx): if idx < 0 or idx >= self.length: raise KeyError() return self.data[(self.start + idx) % self.maxlen] def get_batch(self, idxs): return self.data[(self.start + idxs) % self.maxlen] def append(self, v): if self.length < self.maxlen: # We have space, simply increase the length. self.length += 1 elif self.length == self.maxlen: # No space, "remove" the first item. self.start = (self.start + 1) % self.maxlen else: # This should never happen. raise RuntimeError() self.data[(self.start + self.length - 1) % self.maxlen] = v def array_min2d(x): x = np.array(x) if x.ndim >= 2: return x return x.reshape(-1, 1) class Memory(object): def __init__(self, limit, action_shape, observation_shape): self.limit = limit self.observations0 = RingBuffer(limit, shape=observation_shape) self.actions = RingBuffer(limit, shape=action_shape) self.rewards = RingBuffer(limit, shape=(1,)) self.terminals1 = RingBuffer(limit, shape=(1,)) self.observations1 = RingBuffer(limit, shape=observation_shape) def sample(self, batch_size): # Draw such that we always have a proceeding element. batch_idxs = np.random.randint(self.nb_entries - 2, size=batch_size) obs0_batch = self.observations0.get_batch(batch_idxs) obs1_batch = self.observations1.get_batch(batch_idxs) action_batch = self.actions.get_batch(batch_idxs) reward_batch = self.rewards.get_batch(batch_idxs) terminal1_batch = self.terminals1.get_batch(batch_idxs) result = { 'obs0': array_min2d(obs0_batch), 'obs1': array_min2d(obs1_batch), 'rewards': array_min2d(reward_batch), 'actions': array_min2d(action_batch), 'terminals1': array_min2d(terminal1_batch), } return result def append(self, obs0, action, reward, obs1, terminal1, training=True): if not training: return self.observations0.append(obs0) self.actions.append(action) self.rewards.append(reward) self.observations1.append(obs1) self.terminals1.append(terminal1) @property def nb_entries(self): return len(self.observations0)
2,708
31.25
76
py
baselines
baselines-master/baselines/ddpg/ddpg_learner.py
from copy import copy from functools import reduce import numpy as np import tensorflow as tf import tensorflow.contrib as tc from baselines import logger from baselines.common.mpi_adam import MpiAdam import baselines.common.tf_util as U from baselines.common.mpi_running_mean_std import RunningMeanStd try: from mpi4py import MPI except ImportError: MPI = None def normalize(x, stats): if stats is None: return x return (x - stats.mean) / (stats.std + 1e-8) def denormalize(x, stats): if stats is None: return x return x * stats.std + stats.mean def reduce_std(x, axis=None, keepdims=False): return tf.sqrt(reduce_var(x, axis=axis, keepdims=keepdims)) def reduce_var(x, axis=None, keepdims=False): m = tf.reduce_mean(x, axis=axis, keepdims=True) devs_squared = tf.square(x - m) return tf.reduce_mean(devs_squared, axis=axis, keepdims=keepdims) def get_target_updates(vars, target_vars, tau): logger.info('setting up target updates ...') soft_updates = [] init_updates = [] assert len(vars) == len(target_vars) for var, target_var in zip(vars, target_vars): logger.info(' {} <- {}'.format(target_var.name, var.name)) init_updates.append(tf.assign(target_var, var)) soft_updates.append(tf.assign(target_var, (1. - tau) * target_var + tau * var)) assert len(init_updates) == len(vars) assert len(soft_updates) == len(vars) return tf.group(*init_updates), tf.group(*soft_updates) def get_perturbed_actor_updates(actor, perturbed_actor, param_noise_stddev): assert len(actor.vars) == len(perturbed_actor.vars) assert len(actor.perturbable_vars) == len(perturbed_actor.perturbable_vars) updates = [] for var, perturbed_var in zip(actor.vars, perturbed_actor.vars): if var in actor.perturbable_vars: logger.info(' {} <- {} + noise'.format(perturbed_var.name, var.name)) updates.append(tf.assign(perturbed_var, var + tf.random_normal(tf.shape(var), mean=0., stddev=param_noise_stddev))) else: logger.info(' {} <- {}'.format(perturbed_var.name, var.name)) updates.append(tf.assign(perturbed_var, var)) assert len(updates) == len(actor.vars) return tf.group(*updates) class DDPG(object): def __init__(self, actor, critic, memory, observation_shape, action_shape, param_noise=None, action_noise=None, gamma=0.99, tau=0.001, normalize_returns=False, enable_popart=False, normalize_observations=True, batch_size=128, observation_range=(-5., 5.), action_range=(-1., 1.), return_range=(-np.inf, np.inf), critic_l2_reg=0., actor_lr=1e-4, critic_lr=1e-3, clip_norm=None, reward_scale=1.): # Inputs. self.obs0 = tf.placeholder(tf.float32, shape=(None,) + observation_shape, name='obs0') self.obs1 = tf.placeholder(tf.float32, shape=(None,) + observation_shape, name='obs1') self.terminals1 = tf.placeholder(tf.float32, shape=(None, 1), name='terminals1') self.rewards = tf.placeholder(tf.float32, shape=(None, 1), name='rewards') self.actions = tf.placeholder(tf.float32, shape=(None,) + action_shape, name='actions') self.critic_target = tf.placeholder(tf.float32, shape=(None, 1), name='critic_target') self.param_noise_stddev = tf.placeholder(tf.float32, shape=(), name='param_noise_stddev') # Parameters. self.gamma = gamma self.tau = tau self.memory = memory self.normalize_observations = normalize_observations self.normalize_returns = normalize_returns self.action_noise = action_noise self.param_noise = param_noise self.action_range = action_range self.return_range = return_range self.observation_range = observation_range self.critic = critic self.actor = actor self.actor_lr = actor_lr self.critic_lr = critic_lr self.clip_norm = clip_norm self.enable_popart = enable_popart self.reward_scale = reward_scale self.batch_size = batch_size self.stats_sample = None self.critic_l2_reg = critic_l2_reg # Observation normalization. if self.normalize_observations: with tf.variable_scope('obs_rms'): self.obs_rms = RunningMeanStd(shape=observation_shape) else: self.obs_rms = None normalized_obs0 = tf.clip_by_value(normalize(self.obs0, self.obs_rms), self.observation_range[0], self.observation_range[1]) normalized_obs1 = tf.clip_by_value(normalize(self.obs1, self.obs_rms), self.observation_range[0], self.observation_range[1]) # Return normalization. if self.normalize_returns: with tf.variable_scope('ret_rms'): self.ret_rms = RunningMeanStd() else: self.ret_rms = None # Create target networks. target_actor = copy(actor) target_actor.name = 'target_actor' self.target_actor = target_actor target_critic = copy(critic) target_critic.name = 'target_critic' self.target_critic = target_critic # Create networks and core TF parts that are shared across setup parts. self.actor_tf = actor(normalized_obs0) self.normalized_critic_tf = critic(normalized_obs0, self.actions) self.critic_tf = denormalize(tf.clip_by_value(self.normalized_critic_tf, self.return_range[0], self.return_range[1]), self.ret_rms) self.normalized_critic_with_actor_tf = critic(normalized_obs0, self.actor_tf, reuse=True) self.critic_with_actor_tf = denormalize(tf.clip_by_value(self.normalized_critic_with_actor_tf, self.return_range[0], self.return_range[1]), self.ret_rms) Q_obs1 = denormalize(target_critic(normalized_obs1, target_actor(normalized_obs1)), self.ret_rms) self.target_Q = self.rewards + (1. - self.terminals1) * gamma * Q_obs1 # Set up parts. if self.param_noise is not None: self.setup_param_noise(normalized_obs0) self.setup_actor_optimizer() self.setup_critic_optimizer() if self.normalize_returns and self.enable_popart: self.setup_popart() self.setup_stats() self.setup_target_network_updates() self.initial_state = None # recurrent architectures not supported yet def setup_target_network_updates(self): actor_init_updates, actor_soft_updates = get_target_updates(self.actor.vars, self.target_actor.vars, self.tau) critic_init_updates, critic_soft_updates = get_target_updates(self.critic.vars, self.target_critic.vars, self.tau) self.target_init_updates = [actor_init_updates, critic_init_updates] self.target_soft_updates = [actor_soft_updates, critic_soft_updates] def setup_param_noise(self, normalized_obs0): assert self.param_noise is not None # Configure perturbed actor. param_noise_actor = copy(self.actor) param_noise_actor.name = 'param_noise_actor' self.perturbed_actor_tf = param_noise_actor(normalized_obs0) logger.info('setting up param noise') self.perturb_policy_ops = get_perturbed_actor_updates(self.actor, param_noise_actor, self.param_noise_stddev) # Configure separate copy for stddev adoption. adaptive_param_noise_actor = copy(self.actor) adaptive_param_noise_actor.name = 'adaptive_param_noise_actor' adaptive_actor_tf = adaptive_param_noise_actor(normalized_obs0) self.perturb_adaptive_policy_ops = get_perturbed_actor_updates(self.actor, adaptive_param_noise_actor, self.param_noise_stddev) self.adaptive_policy_distance = tf.sqrt(tf.reduce_mean(tf.square(self.actor_tf - adaptive_actor_tf))) def setup_actor_optimizer(self): logger.info('setting up actor optimizer') self.actor_loss = -tf.reduce_mean(self.critic_with_actor_tf) actor_shapes = [var.get_shape().as_list() for var in self.actor.trainable_vars] actor_nb_params = sum([reduce(lambda x, y: x * y, shape) for shape in actor_shapes]) logger.info(' actor shapes: {}'.format(actor_shapes)) logger.info(' actor params: {}'.format(actor_nb_params)) self.actor_grads = U.flatgrad(self.actor_loss, self.actor.trainable_vars, clip_norm=self.clip_norm) self.actor_optimizer = MpiAdam(var_list=self.actor.trainable_vars, beta1=0.9, beta2=0.999, epsilon=1e-08) def setup_critic_optimizer(self): logger.info('setting up critic optimizer') normalized_critic_target_tf = tf.clip_by_value(normalize(self.critic_target, self.ret_rms), self.return_range[0], self.return_range[1]) self.critic_loss = tf.reduce_mean(tf.square(self.normalized_critic_tf - normalized_critic_target_tf)) if self.critic_l2_reg > 0.: critic_reg_vars = [var for var in self.critic.trainable_vars if var.name.endswith('/w:0') and 'output' not in var.name] for var in critic_reg_vars: logger.info(' regularizing: {}'.format(var.name)) logger.info(' applying l2 regularization with {}'.format(self.critic_l2_reg)) critic_reg = tc.layers.apply_regularization( tc.layers.l2_regularizer(self.critic_l2_reg), weights_list=critic_reg_vars ) self.critic_loss += critic_reg critic_shapes = [var.get_shape().as_list() for var in self.critic.trainable_vars] critic_nb_params = sum([reduce(lambda x, y: x * y, shape) for shape in critic_shapes]) logger.info(' critic shapes: {}'.format(critic_shapes)) logger.info(' critic params: {}'.format(critic_nb_params)) self.critic_grads = U.flatgrad(self.critic_loss, self.critic.trainable_vars, clip_norm=self.clip_norm) self.critic_optimizer = MpiAdam(var_list=self.critic.trainable_vars, beta1=0.9, beta2=0.999, epsilon=1e-08) def setup_popart(self): # See https://arxiv.org/pdf/1602.07714.pdf for details. self.old_std = tf.placeholder(tf.float32, shape=[1], name='old_std') new_std = self.ret_rms.std self.old_mean = tf.placeholder(tf.float32, shape=[1], name='old_mean') new_mean = self.ret_rms.mean self.renormalize_Q_outputs_op = [] for vs in [self.critic.output_vars, self.target_critic.output_vars]: assert len(vs) == 2 M, b = vs assert 'kernel' in M.name assert 'bias' in b.name assert M.get_shape()[-1] == 1 assert b.get_shape()[-1] == 1 self.renormalize_Q_outputs_op += [M.assign(M * self.old_std / new_std)] self.renormalize_Q_outputs_op += [b.assign((b * self.old_std + self.old_mean - new_mean) / new_std)] def setup_stats(self): ops = [] names = [] if self.normalize_returns: ops += [self.ret_rms.mean, self.ret_rms.std] names += ['ret_rms_mean', 'ret_rms_std'] if self.normalize_observations: ops += [tf.reduce_mean(self.obs_rms.mean), tf.reduce_mean(self.obs_rms.std)] names += ['obs_rms_mean', 'obs_rms_std'] ops += [tf.reduce_mean(self.critic_tf)] names += ['reference_Q_mean'] ops += [reduce_std(self.critic_tf)] names += ['reference_Q_std'] ops += [tf.reduce_mean(self.critic_with_actor_tf)] names += ['reference_actor_Q_mean'] ops += [reduce_std(self.critic_with_actor_tf)] names += ['reference_actor_Q_std'] ops += [tf.reduce_mean(self.actor_tf)] names += ['reference_action_mean'] ops += [reduce_std(self.actor_tf)] names += ['reference_action_std'] if self.param_noise: ops += [tf.reduce_mean(self.perturbed_actor_tf)] names += ['reference_perturbed_action_mean'] ops += [reduce_std(self.perturbed_actor_tf)] names += ['reference_perturbed_action_std'] self.stats_ops = ops self.stats_names = names def step(self, obs, apply_noise=True, compute_Q=True): if self.param_noise is not None and apply_noise: actor_tf = self.perturbed_actor_tf else: actor_tf = self.actor_tf feed_dict = {self.obs0: U.adjust_shape(self.obs0, [obs])} if compute_Q: action, q = self.sess.run([actor_tf, self.critic_with_actor_tf], feed_dict=feed_dict) else: action = self.sess.run(actor_tf, feed_dict=feed_dict) q = None if self.action_noise is not None and apply_noise: noise = self.action_noise() assert noise.shape == action[0].shape action += noise action = np.clip(action, self.action_range[0], self.action_range[1]) return action, q, None, None def store_transition(self, obs0, action, reward, obs1, terminal1): reward *= self.reward_scale B = obs0.shape[0] for b in range(B): self.memory.append(obs0[b], action[b], reward[b], obs1[b], terminal1[b]) if self.normalize_observations: self.obs_rms.update(np.array([obs0[b]])) def train(self): # Get a batch. batch = self.memory.sample(batch_size=self.batch_size) if self.normalize_returns and self.enable_popart: old_mean, old_std, target_Q = self.sess.run([self.ret_rms.mean, self.ret_rms.std, self.target_Q], feed_dict={ self.obs1: batch['obs1'], self.rewards: batch['rewards'], self.terminals1: batch['terminals1'].astype('float32'), }) self.ret_rms.update(target_Q.flatten()) self.sess.run(self.renormalize_Q_outputs_op, feed_dict={ self.old_std : np.array([old_std]), self.old_mean : np.array([old_mean]), }) # Run sanity check. Disabled by default since it slows down things considerably. # print('running sanity check') # target_Q_new, new_mean, new_std = self.sess.run([self.target_Q, self.ret_rms.mean, self.ret_rms.std], feed_dict={ # self.obs1: batch['obs1'], # self.rewards: batch['rewards'], # self.terminals1: batch['terminals1'].astype('float32'), # }) # print(target_Q_new, target_Q, new_mean, new_std) # assert (np.abs(target_Q - target_Q_new) < 1e-3).all() else: target_Q = self.sess.run(self.target_Q, feed_dict={ self.obs1: batch['obs1'], self.rewards: batch['rewards'], self.terminals1: batch['terminals1'].astype('float32'), }) # Get all gradients and perform a synced update. ops = [self.actor_grads, self.actor_loss, self.critic_grads, self.critic_loss] actor_grads, actor_loss, critic_grads, critic_loss = self.sess.run(ops, feed_dict={ self.obs0: batch['obs0'], self.actions: batch['actions'], self.critic_target: target_Q, }) self.actor_optimizer.update(actor_grads, stepsize=self.actor_lr) self.critic_optimizer.update(critic_grads, stepsize=self.critic_lr) return critic_loss, actor_loss def initialize(self, sess): self.sess = sess self.sess.run(tf.global_variables_initializer()) self.actor_optimizer.sync() self.critic_optimizer.sync() self.sess.run(self.target_init_updates) def update_target_net(self): self.sess.run(self.target_soft_updates) def get_stats(self): if self.stats_sample is None: # Get a sample and keep that fixed for all further computations. # This allows us to estimate the change in value for the same set of inputs. self.stats_sample = self.memory.sample(batch_size=self.batch_size) values = self.sess.run(self.stats_ops, feed_dict={ self.obs0: self.stats_sample['obs0'], self.actions: self.stats_sample['actions'], }) names = self.stats_names[:] assert len(names) == len(values) stats = dict(zip(names, values)) if self.param_noise is not None: stats = {**stats, **self.param_noise.get_stats()} return stats def adapt_param_noise(self): try: from mpi4py import MPI except ImportError: MPI = None if self.param_noise is None: return 0. # Perturb a separate copy of the policy to adjust the scale for the next "real" perturbation. batch = self.memory.sample(batch_size=self.batch_size) self.sess.run(self.perturb_adaptive_policy_ops, feed_dict={ self.param_noise_stddev: self.param_noise.current_stddev, }) distance = self.sess.run(self.adaptive_policy_distance, feed_dict={ self.obs0: batch['obs0'], self.param_noise_stddev: self.param_noise.current_stddev, }) if MPI is not None: mean_distance = MPI.COMM_WORLD.allreduce(distance, op=MPI.SUM) / MPI.COMM_WORLD.Get_size() else: mean_distance = distance self.param_noise.adapt(mean_distance) return mean_distance def reset(self): # Reset internal state after an episode is complete. if self.action_noise is not None: self.action_noise.reset() if self.param_noise is not None: self.sess.run(self.perturb_policy_ops, feed_dict={ self.param_noise_stddev: self.param_noise.current_stddev, })
17,731
43.664987
161
py
baselines
baselines-master/baselines/ddpg/noise.py
import numpy as np class AdaptiveParamNoiseSpec(object): def __init__(self, initial_stddev=0.1, desired_action_stddev=0.1, adoption_coefficient=1.01): self.initial_stddev = initial_stddev self.desired_action_stddev = desired_action_stddev self.adoption_coefficient = adoption_coefficient self.current_stddev = initial_stddev def adapt(self, distance): if distance > self.desired_action_stddev: # Decrease stddev. self.current_stddev /= self.adoption_coefficient else: # Increase stddev. self.current_stddev *= self.adoption_coefficient def get_stats(self): stats = { 'param_noise_stddev': self.current_stddev, } return stats def __repr__(self): fmt = 'AdaptiveParamNoiseSpec(initial_stddev={}, desired_action_stddev={}, adoption_coefficient={})' return fmt.format(self.initial_stddev, self.desired_action_stddev, self.adoption_coefficient) class ActionNoise(object): def reset(self): pass class NormalActionNoise(ActionNoise): def __init__(self, mu, sigma): self.mu = mu self.sigma = sigma def __call__(self): return np.random.normal(self.mu, self.sigma) def __repr__(self): return 'NormalActionNoise(mu={}, sigma={})'.format(self.mu, self.sigma) # Based on http://math.stackexchange.com/questions/1287634/implementing-ornstein-uhlenbeck-in-matlab class OrnsteinUhlenbeckActionNoise(ActionNoise): def __init__(self, mu, sigma, theta=.15, dt=1e-2, x0=None): self.theta = theta self.mu = mu self.sigma = sigma self.dt = dt self.x0 = x0 self.reset() def __call__(self): x = self.x_prev + self.theta * (self.mu - self.x_prev) * self.dt + self.sigma * np.sqrt(self.dt) * np.random.normal(size=self.mu.shape) self.x_prev = x return x def reset(self): self.x_prev = self.x0 if self.x0 is not None else np.zeros_like(self.mu) def __repr__(self): return 'OrnsteinUhlenbeckActionNoise(mu={}, sigma={})'.format(self.mu, self.sigma)
2,162
30.808824
143
py
baselines
baselines-master/baselines/ddpg/test_smoke.py
from baselines.common.tests.util import smoketest def _run(argstr): smoketest('--alg=ddpg --env=Pendulum-v0 --num_timesteps=0 ' + argstr) def test_popart(): _run('--normalize_returns=True --popart=True') def test_noise_normal(): _run('--noise_type=normal_0.1') def test_noise_ou(): _run('--noise_type=ou_0.1') def test_noise_adaptive(): _run('--noise_type=adaptive-param_0.2,normal_0.1')
413
23.352941
73
py
baselines
baselines-master/baselines/ddpg/models.py
import tensorflow as tf from baselines.common.models import get_network_builder class Model(object): def __init__(self, name, network='mlp', **network_kwargs): self.name = name self.network_builder = get_network_builder(network)(**network_kwargs) @property def vars(self): return tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, scope=self.name) @property def trainable_vars(self): return tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, scope=self.name) @property def perturbable_vars(self): return [var for var in self.trainable_vars if 'LayerNorm' not in var.name] class Actor(Model): def __init__(self, nb_actions, name='actor', network='mlp', **network_kwargs): super().__init__(name=name, network=network, **network_kwargs) self.nb_actions = nb_actions def __call__(self, obs, reuse=False): with tf.variable_scope(self.name, reuse=tf.AUTO_REUSE): x = self.network_builder(obs) x = tf.layers.dense(x, self.nb_actions, kernel_initializer=tf.random_uniform_initializer(minval=-3e-3, maxval=3e-3)) x = tf.nn.tanh(x) return x class Critic(Model): def __init__(self, name='critic', network='mlp', **network_kwargs): super().__init__(name=name, network=network, **network_kwargs) self.layer_norm = True def __call__(self, obs, action, reuse=False): with tf.variable_scope(self.name, reuse=tf.AUTO_REUSE): x = tf.concat([obs, action], axis=-1) # this assumes observation and action can be concatenated x = self.network_builder(x) x = tf.layers.dense(x, 1, kernel_initializer=tf.random_uniform_initializer(minval=-3e-3, maxval=3e-3), name='output') return x @property def output_vars(self): output_vars = [var for var in self.trainable_vars if 'output' in var.name] return output_vars
1,941
36.346154
129
py
baselines
baselines-master/baselines/ddpg/__init__.py
0
0
0
py
baselines
baselines-master/baselines/acktr/acktr.py
import os.path as osp import time import functools import tensorflow as tf from baselines import logger from baselines.common import set_global_seeds, explained_variance from baselines.common.policies import build_policy from baselines.common.tf_util import get_session, save_variables, load_variables from baselines.a2c.runner import Runner from baselines.a2c.utils import Scheduler, find_trainable_variables from baselines.acktr import kfac from baselines.ppo2.ppo2 import safemean from collections import deque class Model(object): def __init__(self, policy, ob_space, ac_space, nenvs,total_timesteps, nprocs=32, nsteps=20, ent_coef=0.01, vf_coef=0.5, vf_fisher_coef=1.0, lr=0.25, max_grad_norm=0.5, kfac_clip=0.001, lrschedule='linear', is_async=True): self.sess = sess = get_session() nbatch = nenvs * nsteps with tf.variable_scope('acktr_model', reuse=tf.AUTO_REUSE): self.model = step_model = policy(nenvs, 1, sess=sess) self.model2 = train_model = policy(nenvs*nsteps, nsteps, sess=sess) A = train_model.pdtype.sample_placeholder([None]) ADV = tf.placeholder(tf.float32, [nbatch]) R = tf.placeholder(tf.float32, [nbatch]) PG_LR = tf.placeholder(tf.float32, []) VF_LR = tf.placeholder(tf.float32, []) neglogpac = train_model.pd.neglogp(A) self.logits = train_model.pi ##training loss pg_loss = tf.reduce_mean(ADV*neglogpac) entropy = tf.reduce_mean(train_model.pd.entropy()) pg_loss = pg_loss - ent_coef * entropy vf_loss = tf.losses.mean_squared_error(tf.squeeze(train_model.vf), R) train_loss = pg_loss + vf_coef * vf_loss ##Fisher loss construction self.pg_fisher = pg_fisher_loss = -tf.reduce_mean(neglogpac) sample_net = train_model.vf + tf.random_normal(tf.shape(train_model.vf)) self.vf_fisher = vf_fisher_loss = - vf_fisher_coef*tf.reduce_mean(tf.pow(train_model.vf - tf.stop_gradient(sample_net), 2)) self.joint_fisher = joint_fisher_loss = pg_fisher_loss + vf_fisher_loss self.params=params = find_trainable_variables("acktr_model") self.grads_check = grads = tf.gradients(train_loss,params) with tf.device('/gpu:0'): self.optim = optim = kfac.KfacOptimizer(learning_rate=PG_LR, clip_kl=kfac_clip,\ momentum=0.9, kfac_update=1, epsilon=0.01,\ stats_decay=0.99, is_async=is_async, cold_iter=10, max_grad_norm=max_grad_norm) # update_stats_op = optim.compute_and_apply_stats(joint_fisher_loss, var_list=params) optim.compute_and_apply_stats(joint_fisher_loss, var_list=params) train_op, q_runner = optim.apply_gradients(list(zip(grads,params))) self.q_runner = q_runner self.lr = Scheduler(v=lr, nvalues=total_timesteps, schedule=lrschedule) def train(obs, states, rewards, masks, actions, values): advs = rewards - values for step in range(len(obs)): cur_lr = self.lr.value() td_map = {train_model.X:obs, A:actions, ADV:advs, R:rewards, PG_LR:cur_lr, VF_LR:cur_lr} if states is not None: td_map[train_model.S] = states td_map[train_model.M] = masks policy_loss, value_loss, policy_entropy, _ = sess.run( [pg_loss, vf_loss, entropy, train_op], td_map ) return policy_loss, value_loss, policy_entropy self.train = train self.save = functools.partial(save_variables, sess=sess) self.load = functools.partial(load_variables, sess=sess) self.train_model = train_model self.step_model = step_model self.step = step_model.step self.value = step_model.value self.initial_state = step_model.initial_state tf.global_variables_initializer().run(session=sess) def learn(network, env, seed, total_timesteps=int(40e6), gamma=0.99, log_interval=100, nprocs=32, nsteps=20, ent_coef=0.01, vf_coef=0.5, vf_fisher_coef=1.0, lr=0.25, max_grad_norm=0.5, kfac_clip=0.001, save_interval=None, lrschedule='linear', load_path=None, is_async=True, **network_kwargs): set_global_seeds(seed) if network == 'cnn': network_kwargs['one_dim_bias'] = True policy = build_policy(env, network, **network_kwargs) nenvs = env.num_envs ob_space = env.observation_space ac_space = env.action_space make_model = lambda : Model(policy, ob_space, ac_space, nenvs, total_timesteps, nprocs=nprocs, nsteps =nsteps, ent_coef=ent_coef, vf_coef=vf_coef, vf_fisher_coef= vf_fisher_coef, lr=lr, max_grad_norm=max_grad_norm, kfac_clip=kfac_clip, lrschedule=lrschedule, is_async=is_async) if save_interval and logger.get_dir(): import cloudpickle with open(osp.join(logger.get_dir(), 'make_model.pkl'), 'wb') as fh: fh.write(cloudpickle.dumps(make_model)) model = make_model() if load_path is not None: model.load(load_path) runner = Runner(env, model, nsteps=nsteps, gamma=gamma) epinfobuf = deque(maxlen=100) nbatch = nenvs*nsteps tstart = time.time() coord = tf.train.Coordinator() if is_async: enqueue_threads = model.q_runner.create_threads(model.sess, coord=coord, start=True) else: enqueue_threads = [] for update in range(1, total_timesteps//nbatch+1): obs, states, rewards, masks, actions, values, epinfos = runner.run() epinfobuf.extend(epinfos) policy_loss, value_loss, policy_entropy = model.train(obs, states, rewards, masks, actions, values) model.old_obs = obs nseconds = time.time()-tstart fps = int((update*nbatch)/nseconds) if update % log_interval == 0 or update == 1: ev = explained_variance(values, rewards) logger.record_tabular("nupdates", update) logger.record_tabular("total_timesteps", update*nbatch) logger.record_tabular("fps", fps) logger.record_tabular("policy_entropy", float(policy_entropy)) logger.record_tabular("policy_loss", float(policy_loss)) logger.record_tabular("value_loss", float(value_loss)) logger.record_tabular("explained_variance", float(ev)) logger.record_tabular("eprewmean", safemean([epinfo['r'] for epinfo in epinfobuf])) logger.record_tabular("eplenmean", safemean([epinfo['l'] for epinfo in epinfobuf])) logger.dump_tabular() if save_interval and (update % save_interval == 0 or update == 1) and logger.get_dir(): savepath = osp.join(logger.get_dir(), 'checkpoint%.5i'%update) print('Saving to', savepath) model.save(savepath) coord.request_stop() coord.join(enqueue_threads) return model
7,037
43.264151
131
py
baselines
baselines-master/baselines/acktr/kfac.py
import tensorflow as tf import numpy as np import re # flake8: noqa F403, F405 from baselines.acktr.kfac_utils import * from functools import reduce KFAC_OPS = ['MatMul', 'Conv2D', 'BiasAdd'] KFAC_DEBUG = False class KfacOptimizer(): # note that KfacOptimizer will be truly synchronous (and thus deterministic) only if a single-threaded session is used def __init__(self, learning_rate=0.01, momentum=0.9, clip_kl=0.01, kfac_update=2, stats_accum_iter=60, full_stats_init=False, cold_iter=100, cold_lr=None, is_async=False, async_stats=False, epsilon=1e-2, stats_decay=0.95, blockdiag_bias=False, channel_fac=False, factored_damping=False, approxT2=False, use_float64=False, weight_decay_dict={},max_grad_norm=0.5): self.max_grad_norm = max_grad_norm self._lr = learning_rate self._momentum = momentum self._clip_kl = clip_kl self._channel_fac = channel_fac self._kfac_update = kfac_update self._async = is_async self._async_stats = async_stats self._epsilon = epsilon self._stats_decay = stats_decay self._blockdiag_bias = blockdiag_bias self._approxT2 = approxT2 self._use_float64 = use_float64 self._factored_damping = factored_damping self._cold_iter = cold_iter if cold_lr == None: # good heuristics self._cold_lr = self._lr# * 3. else: self._cold_lr = cold_lr self._stats_accum_iter = stats_accum_iter self._weight_decay_dict = weight_decay_dict self._diag_init_coeff = 0. self._full_stats_init = full_stats_init if not self._full_stats_init: self._stats_accum_iter = self._cold_iter self.sgd_step = tf.Variable(0, name='KFAC/sgd_step', trainable=False) self.global_step = tf.Variable( 0, name='KFAC/global_step', trainable=False) self.cold_step = tf.Variable(0, name='KFAC/cold_step', trainable=False) self.factor_step = tf.Variable( 0, name='KFAC/factor_step', trainable=False) self.stats_step = tf.Variable( 0, name='KFAC/stats_step', trainable=False) self.vFv = tf.Variable(0., name='KFAC/vFv', trainable=False) self.factors = {} self.param_vars = [] self.stats = {} self.stats_eigen = {} def getFactors(self, g, varlist): graph = tf.get_default_graph() factorTensors = {} fpropTensors = [] bpropTensors = [] opTypes = [] fops = [] def searchFactors(gradient, graph): # hard coded search stratergy bpropOp = gradient.op bpropOp_name = bpropOp.name bTensors = [] fTensors = [] # combining additive gradient, assume they are the same op type and # indepedent if 'AddN' in bpropOp_name: factors = [] for g in gradient.op.inputs: factors.append(searchFactors(g, graph)) op_names = [item['opName'] for item in factors] # TO-DO: need to check all the attribute of the ops as well print (gradient.name) print (op_names) print (len(np.unique(op_names))) assert len(np.unique(op_names)) == 1, gradient.name + \ ' is shared among different computation OPs' bTensors = reduce(lambda x, y: x + y, [item['bpropFactors'] for item in factors]) if len(factors[0]['fpropFactors']) > 0: fTensors = reduce( lambda x, y: x + y, [item['fpropFactors'] for item in factors]) fpropOp_name = op_names[0] fpropOp = factors[0]['op'] else: fpropOp_name = re.search( 'gradientsSampled(_[0-9]+|)/(.+?)_grad', bpropOp_name).group(2) fpropOp = graph.get_operation_by_name(fpropOp_name) if fpropOp.op_def.name in KFAC_OPS: # Known OPs ### bTensor = [ i for i in bpropOp.inputs if 'gradientsSampled' in i.name][-1] bTensorShape = fpropOp.outputs[0].get_shape() if bTensor.get_shape()[0].value == None: bTensor.set_shape(bTensorShape) bTensors.append(bTensor) ### if fpropOp.op_def.name == 'BiasAdd': fTensors = [] else: fTensors.append( [i for i in fpropOp.inputs if param.op.name not in i.name][0]) fpropOp_name = fpropOp.op_def.name else: # unknown OPs, block approximation used bInputsList = [i for i in bpropOp.inputs[ 0].op.inputs if 'gradientsSampled' in i.name if 'Shape' not in i.name] if len(bInputsList) > 0: bTensor = bInputsList[0] bTensorShape = fpropOp.outputs[0].get_shape() if len(bTensor.get_shape()) > 0 and bTensor.get_shape()[0].value == None: bTensor.set_shape(bTensorShape) bTensors.append(bTensor) fpropOp_name = opTypes.append('UNK-' + fpropOp.op_def.name) return {'opName': fpropOp_name, 'op': fpropOp, 'fpropFactors': fTensors, 'bpropFactors': bTensors} for t, param in zip(g, varlist): if KFAC_DEBUG: print(('get factor for '+param.name)) factors = searchFactors(t, graph) factorTensors[param] = factors ######## # check associated weights and bias for homogeneous coordinate representation # and check redundent factors # TO-DO: there may be a bug to detect associate bias and weights for # forking layer, e.g. in inception models. for param in varlist: factorTensors[param]['assnWeights'] = None factorTensors[param]['assnBias'] = None for param in varlist: if factorTensors[param]['opName'] == 'BiasAdd': factorTensors[param]['assnWeights'] = None for item in varlist: if len(factorTensors[item]['bpropFactors']) > 0: if (set(factorTensors[item]['bpropFactors']) == set(factorTensors[param]['bpropFactors'])) and (len(factorTensors[item]['fpropFactors']) > 0): factorTensors[param]['assnWeights'] = item factorTensors[item]['assnBias'] = param factorTensors[param]['bpropFactors'] = factorTensors[ item]['bpropFactors'] ######## ######## # concatenate the additive gradients along the batch dimension, i.e. # assuming independence structure for key in ['fpropFactors', 'bpropFactors']: for i, param in enumerate(varlist): if len(factorTensors[param][key]) > 0: if (key + '_concat') not in factorTensors[param]: name_scope = factorTensors[param][key][0].name.split(':')[ 0] with tf.name_scope(name_scope): factorTensors[param][ key + '_concat'] = tf.concat(factorTensors[param][key], 0) else: factorTensors[param][key + '_concat'] = None for j, param2 in enumerate(varlist[(i + 1):]): if (len(factorTensors[param][key]) > 0) and (set(factorTensors[param2][key]) == set(factorTensors[param][key])): factorTensors[param2][key] = factorTensors[param][key] factorTensors[param2][ key + '_concat'] = factorTensors[param][key + '_concat'] ######## if KFAC_DEBUG: for items in zip(varlist, fpropTensors, bpropTensors, opTypes): print((items[0].name, factorTensors[item])) self.factors = factorTensors return factorTensors def getStats(self, factors, varlist): if len(self.stats) == 0: # initialize stats variables on CPU because eigen decomp is # computed on CPU with tf.device('/cpu'): tmpStatsCache = {} # search for tensor factors and # use block diag approx for the bias units for var in varlist: fpropFactor = factors[var]['fpropFactors_concat'] bpropFactor = factors[var]['bpropFactors_concat'] opType = factors[var]['opName'] if opType == 'Conv2D': Kh = var.get_shape()[0] Kw = var.get_shape()[1] C = fpropFactor.get_shape()[-1] Oh = bpropFactor.get_shape()[1] Ow = bpropFactor.get_shape()[2] if Oh == 1 and Ow == 1 and self._channel_fac: # factorization along the channels do not support # homogeneous coordinate var_assnBias = factors[var]['assnBias'] if var_assnBias: factors[var]['assnBias'] = None factors[var_assnBias]['assnWeights'] = None ## for var in varlist: fpropFactor = factors[var]['fpropFactors_concat'] bpropFactor = factors[var]['bpropFactors_concat'] opType = factors[var]['opName'] self.stats[var] = {'opName': opType, 'fprop_concat_stats': [], 'bprop_concat_stats': [], 'assnWeights': factors[var]['assnWeights'], 'assnBias': factors[var]['assnBias'], } if fpropFactor is not None: if fpropFactor not in tmpStatsCache: if opType == 'Conv2D': Kh = var.get_shape()[0] Kw = var.get_shape()[1] C = fpropFactor.get_shape()[-1] Oh = bpropFactor.get_shape()[1] Ow = bpropFactor.get_shape()[2] if Oh == 1 and Ow == 1 and self._channel_fac: # factorization along the channels # assume independence between input channels and spatial # 2K-1 x 2K-1 covariance matrix and C x C covariance matrix # factorization along the channels do not # support homogeneous coordinate, assnBias # is always None fpropFactor2_size = Kh * Kw slot_fpropFactor_stats2 = tf.Variable(tf.diag(tf.ones( [fpropFactor2_size])) * self._diag_init_coeff, name='KFAC_STATS/' + fpropFactor.op.name, trainable=False) self.stats[var]['fprop_concat_stats'].append( slot_fpropFactor_stats2) fpropFactor_size = C else: # 2K-1 x 2K-1 x C x C covariance matrix # assume BHWC fpropFactor_size = Kh * Kw * C else: # D x D covariance matrix fpropFactor_size = fpropFactor.get_shape()[-1] # use homogeneous coordinate if not self._blockdiag_bias and self.stats[var]['assnBias']: fpropFactor_size += 1 slot_fpropFactor_stats = tf.Variable(tf.diag(tf.ones( [fpropFactor_size])) * self._diag_init_coeff, name='KFAC_STATS/' + fpropFactor.op.name, trainable=False) self.stats[var]['fprop_concat_stats'].append( slot_fpropFactor_stats) if opType != 'Conv2D': tmpStatsCache[fpropFactor] = self.stats[ var]['fprop_concat_stats'] else: self.stats[var][ 'fprop_concat_stats'] = tmpStatsCache[fpropFactor] if bpropFactor is not None: # no need to collect backward stats for bias vectors if # using homogeneous coordinates if not((not self._blockdiag_bias) and self.stats[var]['assnWeights']): if bpropFactor not in tmpStatsCache: slot_bpropFactor_stats = tf.Variable(tf.diag(tf.ones([bpropFactor.get_shape( )[-1]])) * self._diag_init_coeff, name='KFAC_STATS/' + bpropFactor.op.name, trainable=False) self.stats[var]['bprop_concat_stats'].append( slot_bpropFactor_stats) tmpStatsCache[bpropFactor] = self.stats[ var]['bprop_concat_stats'] else: self.stats[var][ 'bprop_concat_stats'] = tmpStatsCache[bpropFactor] return self.stats def compute_and_apply_stats(self, loss_sampled, var_list=None): varlist = var_list if varlist is None: varlist = tf.trainable_variables() stats = self.compute_stats(loss_sampled, var_list=varlist) return self.apply_stats(stats) def compute_stats(self, loss_sampled, var_list=None): varlist = var_list if varlist is None: varlist = tf.trainable_variables() gs = tf.gradients(loss_sampled, varlist, name='gradientsSampled') self.gs = gs factors = self.getFactors(gs, varlist) stats = self.getStats(factors, varlist) updateOps = [] statsUpdates = {} statsUpdates_cache = {} for var in varlist: opType = factors[var]['opName'] fops = factors[var]['op'] fpropFactor = factors[var]['fpropFactors_concat'] fpropStats_vars = stats[var]['fprop_concat_stats'] bpropFactor = factors[var]['bpropFactors_concat'] bpropStats_vars = stats[var]['bprop_concat_stats'] SVD_factors = {} for stats_var in fpropStats_vars: stats_var_dim = int(stats_var.get_shape()[0]) if stats_var not in statsUpdates_cache: old_fpropFactor = fpropFactor B = (tf.shape(fpropFactor)[0]) # batch size if opType == 'Conv2D': strides = fops.get_attr("strides") padding = fops.get_attr("padding") convkernel_size = var.get_shape()[0:3] KH = int(convkernel_size[0]) KW = int(convkernel_size[1]) C = int(convkernel_size[2]) flatten_size = int(KH * KW * C) Oh = int(bpropFactor.get_shape()[1]) Ow = int(bpropFactor.get_shape()[2]) if Oh == 1 and Ow == 1 and self._channel_fac: # factorization along the channels # assume independence among input channels # factor = B x 1 x 1 x (KH xKW x C) # patches = B x Oh x Ow x (KH xKW x C) if len(SVD_factors) == 0: if KFAC_DEBUG: print(('approx %s act factor with rank-1 SVD factors' % (var.name))) # find closest rank-1 approx to the feature map S, U, V = tf.batch_svd(tf.reshape( fpropFactor, [-1, KH * KW, C])) # get rank-1 approx slides sqrtS1 = tf.expand_dims(tf.sqrt(S[:, 0, 0]), 1) patches_k = U[:, :, 0] * sqrtS1 # B x KH*KW full_factor_shape = fpropFactor.get_shape() patches_k.set_shape( [full_factor_shape[0], KH * KW]) patches_c = V[:, :, 0] * sqrtS1 # B x C patches_c.set_shape([full_factor_shape[0], C]) SVD_factors[C] = patches_c SVD_factors[KH * KW] = patches_k fpropFactor = SVD_factors[stats_var_dim] else: # poor mem usage implementation patches = tf.extract_image_patches(fpropFactor, ksizes=[1, convkernel_size[ 0], convkernel_size[1], 1], strides=strides, rates=[1, 1, 1, 1], padding=padding) if self._approxT2: if KFAC_DEBUG: print(('approxT2 act fisher for %s' % (var.name))) # T^2 terms * 1/T^2, size: B x C fpropFactor = tf.reduce_mean(patches, [1, 2]) else: # size: (B x Oh x Ow) x C fpropFactor = tf.reshape( patches, [-1, flatten_size]) / Oh / Ow fpropFactor_size = int(fpropFactor.get_shape()[-1]) if stats_var_dim == (fpropFactor_size + 1) and not self._blockdiag_bias: if opType == 'Conv2D' and not self._approxT2: # correct padding for numerical stability (we # divided out OhxOw from activations for T1 approx) fpropFactor = tf.concat([fpropFactor, tf.ones( [tf.shape(fpropFactor)[0], 1]) / Oh / Ow], 1) else: # use homogeneous coordinates fpropFactor = tf.concat( [fpropFactor, tf.ones([tf.shape(fpropFactor)[0], 1])], 1) # average over the number of data points in a batch # divided by B cov = tf.matmul(fpropFactor, fpropFactor, transpose_a=True) / tf.cast(B, tf.float32) updateOps.append(cov) statsUpdates[stats_var] = cov if opType != 'Conv2D': # HACK: for convolution we recompute fprop stats for # every layer including forking layers statsUpdates_cache[stats_var] = cov for stats_var in bpropStats_vars: stats_var_dim = int(stats_var.get_shape()[0]) if stats_var not in statsUpdates_cache: old_bpropFactor = bpropFactor bpropFactor_shape = bpropFactor.get_shape() B = tf.shape(bpropFactor)[0] # batch size C = int(bpropFactor_shape[-1]) # num channels if opType == 'Conv2D' or len(bpropFactor_shape) == 4: if fpropFactor is not None: if self._approxT2: if KFAC_DEBUG: print(('approxT2 grad fisher for %s' % (var.name))) bpropFactor = tf.reduce_sum( bpropFactor, [1, 2]) # T^2 terms * 1/T^2 else: bpropFactor = tf.reshape( bpropFactor, [-1, C]) * Oh * Ow # T * 1/T terms else: # just doing block diag approx. spatial independent # structure does not apply here. summing over # spatial locations if KFAC_DEBUG: print(('block diag approx fisher for %s' % (var.name))) bpropFactor = tf.reduce_sum(bpropFactor, [1, 2]) # assume sampled loss is averaged. TO-DO:figure out better # way to handle this bpropFactor *= tf.to_float(B) ## cov_b = tf.matmul( bpropFactor, bpropFactor, transpose_a=True) / tf.to_float(tf.shape(bpropFactor)[0]) updateOps.append(cov_b) statsUpdates[stats_var] = cov_b statsUpdates_cache[stats_var] = cov_b if KFAC_DEBUG: aKey = list(statsUpdates.keys())[0] statsUpdates[aKey] = tf.Print(statsUpdates[aKey], [tf.convert_to_tensor('step:'), self.global_step, tf.convert_to_tensor( 'computing stats'), ]) self.statsUpdates = statsUpdates return statsUpdates def apply_stats(self, statsUpdates): """ compute stats and update/apply the new stats to the running average """ def updateAccumStats(): if self._full_stats_init: return tf.cond(tf.greater(self.sgd_step, self._cold_iter), lambda: tf.group(*self._apply_stats(statsUpdates, accumulate=True, accumulateCoeff=1. / self._stats_accum_iter)), tf.no_op) else: return tf.group(*self._apply_stats(statsUpdates, accumulate=True, accumulateCoeff=1. / self._stats_accum_iter)) def updateRunningAvgStats(statsUpdates, fac_iter=1): # return tf.cond(tf.greater_equal(self.factor_step, # tf.convert_to_tensor(fac_iter)), lambda: # tf.group(*self._apply_stats(stats_list, varlist)), tf.no_op) return tf.group(*self._apply_stats(statsUpdates)) if self._async_stats: # asynchronous stats update update_stats = self._apply_stats(statsUpdates) queue = tf.FIFOQueue(1, [item.dtype for item in update_stats], shapes=[ item.get_shape() for item in update_stats]) enqueue_op = queue.enqueue(update_stats) def dequeue_stats_op(): return queue.dequeue() self.qr_stats = tf.train.QueueRunner(queue, [enqueue_op]) update_stats_op = tf.cond(tf.equal(queue.size(), tf.convert_to_tensor( 0)), tf.no_op, lambda: tf.group(*[dequeue_stats_op(), ])) else: # synchronous stats update update_stats_op = tf.cond(tf.greater_equal( self.stats_step, self._stats_accum_iter), lambda: updateRunningAvgStats(statsUpdates), updateAccumStats) self._update_stats_op = update_stats_op return update_stats_op def _apply_stats(self, statsUpdates, accumulate=False, accumulateCoeff=0.): updateOps = [] # obtain the stats var list for stats_var in statsUpdates: stats_new = statsUpdates[stats_var] if accumulate: # simple superbatch averaging update_op = tf.assign_add( stats_var, accumulateCoeff * stats_new, use_locking=True) else: # exponential running averaging update_op = tf.assign( stats_var, stats_var * self._stats_decay, use_locking=True) update_op = tf.assign_add( update_op, (1. - self._stats_decay) * stats_new, use_locking=True) updateOps.append(update_op) with tf.control_dependencies(updateOps): stats_step_op = tf.assign_add(self.stats_step, 1) if KFAC_DEBUG: stats_step_op = (tf.Print(stats_step_op, [tf.convert_to_tensor('step:'), self.global_step, tf.convert_to_tensor('fac step:'), self.factor_step, tf.convert_to_tensor('sgd step:'), self.sgd_step, tf.convert_to_tensor('Accum:'), tf.convert_to_tensor(accumulate), tf.convert_to_tensor('Accum coeff:'), tf.convert_to_tensor(accumulateCoeff), tf.convert_to_tensor('stat step:'), self.stats_step, updateOps[0], updateOps[1]])) return [stats_step_op, ] def getStatsEigen(self, stats=None): if len(self.stats_eigen) == 0: stats_eigen = {} if stats is None: stats = self.stats tmpEigenCache = {} with tf.device('/cpu:0'): for var in stats: for key in ['fprop_concat_stats', 'bprop_concat_stats']: for stats_var in stats[var][key]: if stats_var not in tmpEigenCache: stats_dim = stats_var.get_shape()[1].value e = tf.Variable(tf.ones( [stats_dim]), name='KFAC_FAC/' + stats_var.name.split(':')[0] + '/e', trainable=False) Q = tf.Variable(tf.diag(tf.ones( [stats_dim])), name='KFAC_FAC/' + stats_var.name.split(':')[0] + '/Q', trainable=False) stats_eigen[stats_var] = {'e': e, 'Q': Q} tmpEigenCache[ stats_var] = stats_eigen[stats_var] else: stats_eigen[stats_var] = tmpEigenCache[ stats_var] self.stats_eigen = stats_eigen return self.stats_eigen def computeStatsEigen(self): """ compute the eigen decomp using copied var stats to avoid concurrent read/write from other queue """ # TO-DO: figure out why this op has delays (possibly moving # eigenvectors around?) with tf.device('/cpu:0'): def removeNone(tensor_list): local_list = [] for item in tensor_list: if item is not None: local_list.append(item) return local_list def copyStats(var_list): print("copying stats to buffer tensors before eigen decomp") redundant_stats = {} copied_list = [] for item in var_list: if item is not None: if item not in redundant_stats: if self._use_float64: redundant_stats[item] = tf.cast( tf.identity(item), tf.float64) else: redundant_stats[item] = tf.identity(item) copied_list.append(redundant_stats[item]) else: copied_list.append(None) return copied_list #stats = [copyStats(self.fStats), copyStats(self.bStats)] #stats = [self.fStats, self.bStats] stats_eigen = self.stats_eigen computedEigen = {} eigen_reverse_lookup = {} updateOps = [] # sync copied stats # with tf.control_dependencies(removeNone(stats[0]) + # removeNone(stats[1])): with tf.control_dependencies([]): for stats_var in stats_eigen: if stats_var not in computedEigen: eigens = tf.self_adjoint_eig(stats_var) e = eigens[0] Q = eigens[1] if self._use_float64: e = tf.cast(e, tf.float32) Q = tf.cast(Q, tf.float32) updateOps.append(e) updateOps.append(Q) computedEigen[stats_var] = {'e': e, 'Q': Q} eigen_reverse_lookup[e] = stats_eigen[stats_var]['e'] eigen_reverse_lookup[Q] = stats_eigen[stats_var]['Q'] self.eigen_reverse_lookup = eigen_reverse_lookup self.eigen_update_list = updateOps if KFAC_DEBUG: self.eigen_update_list = [item for item in updateOps] with tf.control_dependencies(updateOps): updateOps.append(tf.Print(tf.constant( 0.), [tf.convert_to_tensor('computed factor eigen')])) return updateOps def applyStatsEigen(self, eigen_list): updateOps = [] print(('updating %d eigenvalue/vectors' % len(eigen_list))) for i, (tensor, mark) in enumerate(zip(eigen_list, self.eigen_update_list)): stats_eigen_var = self.eigen_reverse_lookup[mark] updateOps.append( tf.assign(stats_eigen_var, tensor, use_locking=True)) with tf.control_dependencies(updateOps): factor_step_op = tf.assign_add(self.factor_step, 1) updateOps.append(factor_step_op) if KFAC_DEBUG: updateOps.append(tf.Print(tf.constant( 0.), [tf.convert_to_tensor('updated kfac factors')])) return updateOps def getKfacPrecondUpdates(self, gradlist, varlist): updatelist = [] vg = 0. assert len(self.stats) > 0 assert len(self.stats_eigen) > 0 assert len(self.factors) > 0 counter = 0 grad_dict = {var: grad for grad, var in zip(gradlist, varlist)} for grad, var in zip(gradlist, varlist): GRAD_RESHAPE = False GRAD_TRANSPOSE = False fpropFactoredFishers = self.stats[var]['fprop_concat_stats'] bpropFactoredFishers = self.stats[var]['bprop_concat_stats'] if (len(fpropFactoredFishers) + len(bpropFactoredFishers)) > 0: counter += 1 GRAD_SHAPE = grad.get_shape() if len(grad.get_shape()) > 2: # reshape conv kernel parameters KW = int(grad.get_shape()[0]) KH = int(grad.get_shape()[1]) C = int(grad.get_shape()[2]) D = int(grad.get_shape()[3]) if len(fpropFactoredFishers) > 1 and self._channel_fac: # reshape conv kernel parameters into tensor grad = tf.reshape(grad, [KW * KH, C, D]) else: # reshape conv kernel parameters into 2D grad grad = tf.reshape(grad, [-1, D]) GRAD_RESHAPE = True elif len(grad.get_shape()) == 1: # reshape bias or 1D parameters D = int(grad.get_shape()[0]) grad = tf.expand_dims(grad, 0) GRAD_RESHAPE = True else: # 2D parameters C = int(grad.get_shape()[0]) D = int(grad.get_shape()[1]) if (self.stats[var]['assnBias'] is not None) and not self._blockdiag_bias: # use homogeneous coordinates only works for 2D grad. # TO-DO: figure out how to factorize bias grad # stack bias grad var_assnBias = self.stats[var]['assnBias'] grad = tf.concat( [grad, tf.expand_dims(grad_dict[var_assnBias], 0)], 0) # project gradient to eigen space and reshape the eigenvalues # for broadcasting eigVals = [] for idx, stats in enumerate(self.stats[var]['fprop_concat_stats']): Q = self.stats_eigen[stats]['Q'] e = detectMinVal(self.stats_eigen[stats][ 'e'], var, name='act', debug=KFAC_DEBUG) Q, e = factorReshape(Q, e, grad, facIndx=idx, ftype='act') eigVals.append(e) grad = gmatmul(Q, grad, transpose_a=True, reduce_dim=idx) for idx, stats in enumerate(self.stats[var]['bprop_concat_stats']): Q = self.stats_eigen[stats]['Q'] e = detectMinVal(self.stats_eigen[stats][ 'e'], var, name='grad', debug=KFAC_DEBUG) Q, e = factorReshape(Q, e, grad, facIndx=idx, ftype='grad') eigVals.append(e) grad = gmatmul(grad, Q, transpose_b=False, reduce_dim=idx) ## ##### # whiten using eigenvalues weightDecayCoeff = 0. if var in self._weight_decay_dict: weightDecayCoeff = self._weight_decay_dict[var] if KFAC_DEBUG: print(('weight decay coeff for %s is %f' % (var.name, weightDecayCoeff))) if self._factored_damping: if KFAC_DEBUG: print(('use factored damping for %s' % (var.name))) coeffs = 1. num_factors = len(eigVals) # compute the ratio of two trace norm of the left and right # KFac matrices, and their generalization if len(eigVals) == 1: damping = self._epsilon + weightDecayCoeff else: damping = tf.pow( self._epsilon + weightDecayCoeff, 1. / num_factors) eigVals_tnorm_avg = [tf.reduce_mean( tf.abs(e)) for e in eigVals] for e, e_tnorm in zip(eigVals, eigVals_tnorm_avg): eig_tnorm_negList = [ item for item in eigVals_tnorm_avg if item != e_tnorm] if len(eigVals) == 1: adjustment = 1. elif len(eigVals) == 2: adjustment = tf.sqrt( e_tnorm / eig_tnorm_negList[0]) else: eig_tnorm_negList_prod = reduce( lambda x, y: x * y, eig_tnorm_negList) adjustment = tf.pow( tf.pow(e_tnorm, num_factors - 1.) / eig_tnorm_negList_prod, 1. / num_factors) coeffs *= (e + adjustment * damping) else: coeffs = 1. damping = (self._epsilon + weightDecayCoeff) for e in eigVals: coeffs *= e coeffs += damping #grad = tf.Print(grad, [tf.convert_to_tensor('1'), tf.convert_to_tensor(var.name), grad.get_shape()]) grad /= coeffs #grad = tf.Print(grad, [tf.convert_to_tensor('2'), tf.convert_to_tensor(var.name), grad.get_shape()]) ##### # project gradient back to euclidean space for idx, stats in enumerate(self.stats[var]['fprop_concat_stats']): Q = self.stats_eigen[stats]['Q'] grad = gmatmul(Q, grad, transpose_a=False, reduce_dim=idx) for idx, stats in enumerate(self.stats[var]['bprop_concat_stats']): Q = self.stats_eigen[stats]['Q'] grad = gmatmul(grad, Q, transpose_b=True, reduce_dim=idx) ## #grad = tf.Print(grad, [tf.convert_to_tensor('3'), tf.convert_to_tensor(var.name), grad.get_shape()]) if (self.stats[var]['assnBias'] is not None) and not self._blockdiag_bias: # use homogeneous coordinates only works for 2D grad. # TO-DO: figure out how to factorize bias grad # un-stack bias grad var_assnBias = self.stats[var]['assnBias'] C_plus_one = int(grad.get_shape()[0]) grad_assnBias = tf.reshape(tf.slice(grad, begin=[ C_plus_one - 1, 0], size=[1, -1]), var_assnBias.get_shape()) grad_assnWeights = tf.slice(grad, begin=[0, 0], size=[C_plus_one - 1, -1]) grad_dict[var_assnBias] = grad_assnBias grad = grad_assnWeights #grad = tf.Print(grad, [tf.convert_to_tensor('4'), tf.convert_to_tensor(var.name), grad.get_shape()]) if GRAD_RESHAPE: grad = tf.reshape(grad, GRAD_SHAPE) grad_dict[var] = grad print(('projecting %d gradient matrices' % counter)) for g, var in zip(gradlist, varlist): grad = grad_dict[var] ### clipping ### if KFAC_DEBUG: print(('apply clipping to %s' % (var.name))) tf.Print(grad, [tf.sqrt(tf.reduce_sum(tf.pow(grad, 2)))], "Euclidean norm of new grad") local_vg = tf.reduce_sum(grad * g * (self._lr * self._lr)) vg += local_vg # recale everything if KFAC_DEBUG: print('apply vFv clipping') scaling = tf.minimum(1., tf.sqrt(self._clip_kl / vg)) if KFAC_DEBUG: scaling = tf.Print(scaling, [tf.convert_to_tensor( 'clip: '), scaling, tf.convert_to_tensor(' vFv: '), vg]) with tf.control_dependencies([tf.assign(self.vFv, vg)]): updatelist = [grad_dict[var] for var in varlist] for i, item in enumerate(updatelist): updatelist[i] = scaling * item return updatelist def compute_gradients(self, loss, var_list=None): varlist = var_list if varlist is None: varlist = tf.trainable_variables() g = tf.gradients(loss, varlist) return [(a, b) for a, b in zip(g, varlist)] def apply_gradients_kfac(self, grads): g, varlist = list(zip(*grads)) if len(self.stats_eigen) == 0: self.getStatsEigen() qr = None # launch eigen-decomp on a queue thread if self._async: print('Use async eigen decomp') # get a list of factor loading tensors factorOps_dummy = self.computeStatsEigen() # define a queue for the list of factor loading tensors queue = tf.FIFOQueue(1, [item.dtype for item in factorOps_dummy], shapes=[ item.get_shape() for item in factorOps_dummy]) enqueue_op = tf.cond(tf.logical_and(tf.equal(tf.mod(self.stats_step, self._kfac_update), tf.convert_to_tensor( 0)), tf.greater_equal(self.stats_step, self._stats_accum_iter)), lambda: queue.enqueue(self.computeStatsEigen()), tf.no_op) def dequeue_op(): return queue.dequeue() qr = tf.train.QueueRunner(queue, [enqueue_op]) updateOps = [] global_step_op = tf.assign_add(self.global_step, 1) updateOps.append(global_step_op) with tf.control_dependencies([global_step_op]): # compute updates assert self._update_stats_op != None updateOps.append(self._update_stats_op) dependency_list = [] if not self._async: dependency_list.append(self._update_stats_op) with tf.control_dependencies(dependency_list): def no_op_wrapper(): return tf.group(*[tf.assign_add(self.cold_step, 1)]) if not self._async: # synchronous eigen-decomp updates updateFactorOps = tf.cond(tf.logical_and(tf.equal(tf.mod(self.stats_step, self._kfac_update), tf.convert_to_tensor(0)), tf.greater_equal(self.stats_step, self._stats_accum_iter)), lambda: tf.group(*self.applyStatsEigen(self.computeStatsEigen())), no_op_wrapper) else: # asynchronous eigen-decomp updates using queue updateFactorOps = tf.cond(tf.greater_equal(self.stats_step, self._stats_accum_iter), lambda: tf.cond(tf.equal(queue.size(), tf.convert_to_tensor(0)), tf.no_op, lambda: tf.group( *self.applyStatsEigen(dequeue_op())), ), no_op_wrapper) updateOps.append(updateFactorOps) with tf.control_dependencies([updateFactorOps]): def gradOp(): return list(g) def getKfacGradOp(): return self.getKfacPrecondUpdates(g, varlist) u = tf.cond(tf.greater(self.factor_step, tf.convert_to_tensor(0)), getKfacGradOp, gradOp) optim = tf.train.MomentumOptimizer( self._lr * (1. - self._momentum), self._momentum) #optim = tf.train.AdamOptimizer(self._lr, epsilon=0.01) def optimOp(): def updateOptimOp(): if self._full_stats_init: return tf.cond(tf.greater(self.factor_step, tf.convert_to_tensor(0)), lambda: optim.apply_gradients(list(zip(u, varlist))), tf.no_op) else: return optim.apply_gradients(list(zip(u, varlist))) if self._full_stats_init: return tf.cond(tf.greater_equal(self.stats_step, self._stats_accum_iter), updateOptimOp, tf.no_op) else: return tf.cond(tf.greater_equal(self.sgd_step, self._cold_iter), updateOptimOp, tf.no_op) updateOps.append(optimOp()) return tf.group(*updateOps), qr def apply_gradients(self, grads): coldOptim = tf.train.MomentumOptimizer( self._cold_lr, self._momentum) def coldSGDstart(): sgd_grads, sgd_var = zip(*grads) if self.max_grad_norm != None: sgd_grads, sgd_grad_norm = tf.clip_by_global_norm(sgd_grads,self.max_grad_norm) sgd_grads = list(zip(sgd_grads,sgd_var)) sgd_step_op = tf.assign_add(self.sgd_step, 1) coldOptim_op = coldOptim.apply_gradients(sgd_grads) if KFAC_DEBUG: with tf.control_dependencies([sgd_step_op, coldOptim_op]): sgd_step_op = tf.Print( sgd_step_op, [self.sgd_step, tf.convert_to_tensor('doing cold sgd step')]) return tf.group(*[sgd_step_op, coldOptim_op]) kfacOptim_op, qr = self.apply_gradients_kfac(grads) def warmKFACstart(): return kfacOptim_op return tf.cond(tf.greater(self.sgd_step, self._cold_iter), warmKFACstart, coldSGDstart), qr def minimize(self, loss, loss_sampled, var_list=None): grads = self.compute_gradients(loss, var_list=var_list) update_stats_op = self.compute_and_apply_stats( loss_sampled, var_list=var_list) return self.apply_gradients(grads)
45,679
48.171152
366
py
baselines
baselines-master/baselines/acktr/utils.py
import tensorflow as tf def dense(x, size, name, weight_init=None, bias_init=0, weight_loss_dict=None, reuse=None): with tf.variable_scope(name, reuse=reuse): assert (len(tf.get_variable_scope().name.split('/')) == 2) w = tf.get_variable("w", [x.get_shape()[1], size], initializer=weight_init) b = tf.get_variable("b", [size], initializer=tf.constant_initializer(bias_init)) weight_decay_fc = 3e-4 if weight_loss_dict is not None: weight_decay = tf.multiply(tf.nn.l2_loss(w), weight_decay_fc, name='weight_decay_loss') if weight_loss_dict is not None: weight_loss_dict[w] = weight_decay_fc weight_loss_dict[b] = 0.0 tf.add_to_collection(tf.get_variable_scope().name.split('/')[0] + '_' + 'losses', weight_decay) return tf.nn.bias_add(tf.matmul(x, w), b) def kl_div(action_dist1, action_dist2, action_size): mean1, std1 = action_dist1[:, :action_size], action_dist1[:, action_size:] mean2, std2 = action_dist2[:, :action_size], action_dist2[:, action_size:] numerator = tf.square(mean1 - mean2) + tf.square(std1) - tf.square(std2) denominator = 2 * tf.square(std2) + 1e-8 return tf.reduce_sum( numerator/denominator + tf.log(std2) - tf.log(std1),reduction_indices=-1)
1,322
44.62069
107
py
baselines
baselines-master/baselines/acktr/defaults.py
def mujoco(): return dict( nsteps=2500, value_network='copy' )
87
13.666667
28
py