repo
stringlengths 2
99
| file
stringlengths 13
225
| code
stringlengths 0
18.3M
| file_length
int64 0
18.3M
| avg_line_length
float64 0
1.36M
| max_line_length
int64 0
4.26M
| extension_type
stringclasses 1
value |
---|---|---|---|---|---|---|
pairwiseMKL
|
pairwiseMKL-master/pairwisemkl/learner/compute_M__arrayjob.py
|
#
# The MIT License (MIT)
#
# This file is part of pairwiseMKL
#
# Copyright (c) 2018 Anna Cichonska
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
import numpy as np
import copy
from pairwisemkl.learner.kron_decomp import kron_decomp_centralization_operator
def compute_M_row(Ka_list, Kb_list, id_in):
"""
Task: to compute a single row of the matrix M (indexed by an integer id_in)
needed for optimizing pairwise kernel weights
(equation 12 of the paper describing pairwiseMKL method)
Input: Ka_list List of drug (view A in general) kernel matrices
Kb_list List of cell line (view B in general) kernel matrices
id_in Integer specyfying the row of the matrix M
Output: m id_in'th row of the matrix M
References:
[1] Anna Cichonska, Tapio Pahikkala, Sandor Szedmak, Heli Julkunen, Antti Airola,
Markus Heinonen, Tero Aittokallio, Juho Rousu.
Learning with multiple pairwise kernels for drug bioactivity prediction.
Bioinformatics, 34, pages i509–i518. 2018.
"""
# Compute the factors of the pairwise kernel centering operator
Q = kron_decomp_centralization_operator(Ka_list[0].shape[0], Kb_list[0].shape[0])
# Total number of pairwise kernels
p = len(Ka_list)*len(Kb_list)
M = np.empty([p,p]); M[:] = np.NAN
ids_kernels = np.arange(p)
Ka_ids, Kb_ids = np.unravel_index(ids_kernels, (len(Ka_list),len(Kb_list)), order = 'C')
i_pairwise_k = id_in
i = Ka_ids[i_pairwise_k]
j = Kb_ids[i_pairwise_k]
h_col_start = i_pairwise_k+1
h_col_temp = copy.deepcopy(h_col_start)
h = 0
for ii in Ka_ids[h_col_start:p]:
jj = Kb_ids[h_col_start:p][h]
h = h + 1
# Compute < K_k, K_l>_F
M[i_pairwise_k, h_col_temp] = calculate_element(Q, Ka_list[i], Ka_list[ii], Kb_list[j], Kb_list[jj])
h_col_temp = h_col_temp + 1
# diagonal(M) = ( ||K_k||_F )^2
M[i_pairwise_k, i_pairwise_k] = calculate_element(Q, Ka_list[i], Ka_list[i], Kb_list[j], Kb_list[j])
m = M[id_in,]
return m
def calculate_element(Q, Ka_1, Ka_2, Kb_1, Kb_2):
"""
Task: to compute a single element of the matrix M
Input: Q List of lists, 2\times 2, of the factor matrices of
the kernel centering operator
Ka_i i'th drug kernel matrix
Ka_j j'th drug kernel matrix
Kb_i i'th cell line kernel matrix
Kb_j j'th cell line kernel matrix
Output: m Frobenius inner product between centered pairwise
kernels (Ka_i \otimes Kb_i) and (Ka_j \otimes Kb_j)
"""
nsvalue = 2
m = 0
for q in range(nsvalue):
for r in range(nsvalue):
m += np.trace( np.dot(np.dot(np.dot(Q[q][0],Ka_1),Q[r][0]),Ka_2) ) \
* np.trace( np.dot(np.dot(np.dot(Q[q][1],Kb_1),Q[r][1]),Kb_2) )
return m
| 4,090 | 37.233645 | 108 |
py
|
pairwiseMKL
|
pairwiseMKL-master/pairwisemkl/learner/kron_decomp.py
|
#
# The MIT License (MIT)
#
# This file is part of pairwiseMKL
#
# Copyright (c) 2018 Anna Cichonska, Sandor Szedmak
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
import numpy as np
def kron_decomp_centralization_operator(m,n):
"""
Task: to compute the factors of the pairwise kernel centralization operator
with dimension mn=m*n:
C_{mn} = I_{mn} - 1_{mn} \otimes 1'_{mn} / mn
I_{mn}=np.eye(mn)
1_{mn}=np.ones(mn)
C_nm reproduced as
C_mn= Q[0][0] \otimes Q[0][1] + Q[1][0] \otimes Q[1][1]
The factors have the structure:
Q[0][0]=(w_{000}-w_{001}) I_{m} + w_{001} 1_m \otimes 1'_m
Q[0][1]=(w_{010}-w_{011}) I_{n} + w_{011} 1_n \otimes 1'_n
Q[1][0]=(w_{100}-w_{101}) I_{m} + w_{101} 1_m \otimes 1'_m
Q[1][1]=(w_{110}-w_{111}) I_{n} + w_{111} 1_n \otimes 1'_n
Input: m The size m\times m of the first factor
n The size n\times n of the second factor
Output: Q List of lists, 2\times 2, of the factor matrices:
C_mn = Q[0][0] \otimes Q[0][1] + Q[1][0] \otimes Q[1][1]
References:
[1] Anna Cichonska, Tapio Pahikkala, Sandor Szedmak, Heli Julkunen, Antti Airola,
Markus Heinonen, Tero Aittokallio, Juho Rousu.
Learning with multiple pairwise kernels for drug bioactivity prediction.
Bioinformatics, 34, pages i509–i518. 2018.
"""
# Two singular values, two factors, two weights
nsvalue = 2
nfactor = 2
nweight = 2
xw = np.zeros((nsvalue,nfactor,nweight)) # the component weights
mn = m*n # the full size of the Kronecker product matrix
# The compressed reordered centralization matrix
Q = np.array([[mn-1,-(n-1)],[-(m-1),-(m-1)*(n-1)]])
# The singular vectors are rescaled for the compressed matrix
qu = np.array([1/m**0.5,1/(m*(m-1))**0.5])
qv = np.array([1/n**0.5,1/(n*(n-1))**0.5])
Creduced = Q*np.outer(qu,qv)
# Singular value decomposition of the compressed matrix
(Ur,Sr,Vr) = np.linalg.svd(Creduced)
# Vr is provided as transpose by numpy linalg
Vr = Vr.T
# Recover the components of the singular vectors
# of the original uncom,pressed matrix
U = Ur*np.outer(qu,np.ones(nsvalue))
V = Vr*np.outer(qv,np.ones(nsvalue))
# Recover the singular values for the uncompressed matrix
singval = np.diag(np.dot(U.T,np.dot(Q,V)))
# print(singval)
# Compute the weights:
# components of the singular vectors * sqrt(singular values)
Uw = U*np.outer(np.ones(nsvalue),np.sqrt(singval))
Vw = V*np.outer(np.ones(nsvalue),np.sqrt(singval))
# The weight matrix
xw[0] = np.vstack((Uw[:,0],Vw[:,0]))
xw[1] = np.vstack((Uw[:,1],Vw[:,1]))
# Build the factors from the weights
Qfactors = [[None,None] for _ in range(nsvalue)]
factorsize = [m,n]
for i in range(nsvalue):
for j in range(nfactor):
Qfactors[i][j] = (xw[i,j,0]-xw[i,j,1])*np.eye(factorsize[j]) \
+xw[i,j,1]*np.ones((factorsize[j],factorsize[j]))
return Qfactors
| 4,003 | 37.873786 | 84 |
py
|
pairwiseMKL
|
pairwiseMKL-master/pairwisemkl/learner/optimize_kernel_weights.py
|
#
# The MIT License (MIT)
#
# This file is part of pairwiseMKL
#
# Copyright (c) 2018 Anna Cichonska
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
import numpy as np
from cvxopt import matrix
from cvxopt import solvers
def optimize_kernel_weights(a, M):
"""
Task: to determine pairwise kernel weights
Input: a Vector storing Frobenius inner products between each
centered input pairwise kernel and the response kernel
M Matrix storing Frobenius inner products between all pairs
of centered input pairwise kernels
Output: w Vector with pairwise kernel weights
References:
[1] Anna Cichonska, Tapio Pahikkala, Sandor Szedmak, Heli Julkunen, Antti Airola,
Markus Heinonen, Tero Aittokallio, Juho Rousu.
Learning with multiple pairwise kernels for drug bioactivity prediction.
Bioinformatics, 34, pages i509–i518. 2018.
"""
n_k = len(M)
a = np.array(a,dtype='d').T
P = matrix(2*M)
q = matrix(-2*a)
G = matrix(np.diag([-1.0]*n_k))
h = matrix(np.zeros(n_k,dtype='d'))
sol = solvers.qp(P,q,G,h)
w = sol['x']
w = w/sum(w)
return np.asarray(w.T)
| 2,248 | 35.274194 | 86 |
py
|
GAMMA
|
GAMMA-master/bin/Tools/plotting_scripts.py
|
# -*- coding: utf-8 -*-
# @Author: eliotayache
# @Date: 2020-05-14 16:24:48
# @Last Modified by: Eliot Ayache
# @Last Modified time: 2022-03-22 16:22:32
'''
This file contains functions used to print GAMMA outputs. These functions
should be run from the ./bin/Tools directory.
This can be run from a jupyter or iPython notebook:
$run plotting_scripts.py
'''
# Imports
# --------------------------------------------------------------------------------------------------
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from matplotlib.colors import LogNorm
# MPL options
# --------------------------------------------------------------------------------------------------
plt.rc('font', family='serif', size=12)
plt.rc('xtick', labelsize=12)
plt.rc('ytick', labelsize=12)
plt.rc('legend', fontsize=12)
plt.rcParams['savefig.dpi'] = 200
# IO functions
# --------------------------------------------------------------------------------------------------
def readData(key, it=None, sequence=False):
if sequence:
filename = '../../results/%s/phys%010d.out' % (key, it)
elif it is None:
filename = '../../results/%s' % (key)
else:
filename = '../../results/%s%d.out' % (key, it)
data = pd.read_csv(filename, sep=" ")
return(data)
def pivot(data, key):
return(data.pivot(index="j", columns="i", values=key).to_numpy())
# Plotting functions
# --------------------------------------------------------------------------------------------------
def plotMulti(data, keys, jtrack=None, log=[], labels={}, **kwargs):
'''
Plots multiple variables for a single 1D track in the same figure.
Args:
-----
data: pandas dataframe. Output data.
keys: list of string. Variables to plot.
kwargs:
-------
log: list of strings. keys of variables to be plotted in logspace.
Returns:
--------
f: pyplot figure.
axes: list of axes contained in the figure.
Example usage:
--------------
f, axes = plotMulti(data, ["rho","p","lfac"],
tracer=False,
line=False,
labels={"rho":"$\\rho/\\rho_0$", "p":"$p/p_0$","lfac":"$\\gamma$"},
x_norm=RShock)
'''
Nk = len(keys)
f, axes = plt.subplots(Nk, 1, sharex=True, figsize=(6,2*Nk))
for key, k, ax in zip(keys, range(Nk), axes):
logkey = False
label = None
if key in log:
logkey = True
if key in labels:
label = labels[key]
plot1D(data, key, ax, jtrack=jtrack, log=logkey, label=label, **kwargs)
plt.tight_layout()
return(f, axes)
def plot1D(data, key, ax=None, mov="x", log=False, v1min=None, tracer=True,
line=True, r2=False, x_norm=None, jtrack=None, label=None,
**kwargs):
'''
Plots 1D outputs from GAMMA.
Works on 1D AND 2D outputs. In the 2D case, specify the jtrack to plot.
Args:
-----
data: pandas dataframe. Output data.
key: string. Variable to plot.
Example usage:
--------------
data = readData('Last/phys0000000000.out')
plot1D(data, "rho", log=True, jtrack=0)
'''
if key == "lfac":
var = "vx"
else:
var = key
if jtrack is not(None):
z = pivot(data, var)[jtrack, :]
x = pivot(data, "x")[jtrack, :]
tracvals = pivot(data, "trac")[jtrack, :]
else:
z = data[var].to_numpy()
x = np.copy(data["x"].to_numpy())
tracvals = data["trac"].to_numpy()
if x_norm is not(None):
x /= x_norm
if key == "lfac":
z = 1./np.sqrt(1 - z**2)
if r2:
z *= x**2
if ax is None:
plt.figure()
ax = plt.gca()
if label is not(None):
ax.set_ylabel(label)
else:
ax.set_ylabel(key)
if log:
ax.set_yscale('log')
if line:
ax.plot(x, z, 'k',zorder=1)
ax.scatter(x, z, c='None', edgecolors='k', lw=2, zorder=2, label="numerical")
if tracer:
ax.scatter(x, z, c=tracvals, edgecolors='None', zorder=3, cmap='cividis')
def plot2D(data, key, z_override=None, mov="x", log=False, v1min=None,
geometry="cartesian", quiver=False, color=None, edges='None',
invert=False, r2=False, cmap='magma', tlayout=False, colorbar=True,
slick=False, phi=0., fig=None, label=None, axis=None, thetaobs=0.,
nuobs=1.e17, shrink=0.6, expand=False):
'''
Plots 2D outputs from GAMMA.
Args:
-----
data: pandas dataframe. Output data.
key: string. Variable to plot.
Returns:
--------
xmin: double. Minimum coordinate x in data.
xmax: double. Maximum coordinate x in data.
thetamax: double. Highest track angle in polar geometry.
im: pyplot.image. 2D map of the requested variable.
Example usage:
--------------
data = readData('Last/phys0000000000.out')
# On specific axes
f = plt.figure()
ax = plt.axes(projection='polar')
plot2D(data, "rho", fig=f, axis=ax, **kwargs)
# On axies of its own
plot2D(data, "rho", geometry='polar', **kwargs)
'''
if z_override is not None:
z = z_override
if key == "lfac":
vx = data.pivot(index='j', columns='i', values="vx").to_numpy()
vy = data.pivot(index='j', columns='i', values="vy").to_numpy()
z = 1./np.sqrt(1 - (vx**2+vy**2))
else:
z = data.pivot(index='j', columns='i', values=key).to_numpy()
x = data.pivot(index='j', columns='i', values='x').to_numpy()
dx = data.pivot(index='j', columns='i', values='dx').to_numpy()
y = data.pivot(index='j', columns='i', values='y').to_numpy()
dy = data.pivot(index='j', columns='i', values='dy').to_numpy()
# duplicating last row for plotting
z = np.append(z, np.expand_dims(z[-1, :], axis=0), axis=0)
x = np.append(x, np.expand_dims(x[-1, :], axis=0), axis=0)
dx = np.append(dx, np.expand_dims(dx[-1, :], axis=0), axis=0)
y = np.append(y, np.expand_dims(y[-1, :], axis=0), axis=0)
dy = np.append(dy, np.expand_dims(dy[-1, :], axis=0), axis=0)
# duplicating first column for plotting
z = np.append(z, np.expand_dims(z[:, -1], axis=1), axis=1)
x = np.append(x, np.expand_dims(x[:, -1], axis=1), axis=1)
dx = np.append(dx, np.expand_dims(dx[:, -1], axis=1), axis=1)
y = np.append(y, np.expand_dims(y[:, -1], axis=1), axis=1)
dy = np.append(dy, np.expand_dims(dy[:, -1], axis=1), axis=1)
nact = np.array([np.count_nonzero(~np.isnan(xj)) for xj in x])
if (quiver):
vx = data.pivot(index='j', columns='i', values='vx').to_numpy()
vx = np.append(vx, np.expand_dims(vx[-1, :], axis=0), axis=0)
vx = np.ma.masked_array(vx, np.isnan(vx))
if r2:
z *= x**2
xmin = np.nanmin(x)
xmax = np.nanmax(x)
ymin = np.nanmin(y)
ymax = np.nanmax(y)
vmax = np.nanmax(z[4:, :])
vmin = np.nanmin(z)
if log:
vmin = np.nanmin(z[z > 0])
if v1min:
vmin = v1min
if geometry == "polar":
projection = "polar"
else:
projection = None
if axis is None:
f = plt.figure()
ax = plt.axes(projection=projection)
else:
f = fig
ax = axis
if geometry == "polar" or axis is not None:
ax.set_thetamax(ymax*180./np.pi)
ax.set_thetamin(ymin*180./np.pi)
if invert:
ax.set_thetamin(-ymax*180./np.pi)
if slick:
ax.axis("off")
for j in range(z.shape[0]-1):
xj = x - dx/2.
yj = y - dy/2.
dyj = dy
xj[j, nact[j]-1] += dx[j, nact[j]-1]
if mov == 'y':
tmp = np.copy(xj)
xj = yj
yj = np.copy(tmp)
dyj = dx
xj[j+1, :] = xj[j, :]
yj[j+1, :] = yj[j, :]+dyj[j, :]
xj = xj[j:j+2, :]
yj = yj[j:j+2, :]
zj = z[j:j+2, :]
if invert:
yj *= -1
if log:
im = ax.pcolor(yj, xj, zj,
norm=LogNorm(vmin=vmin, vmax=vmax),
edgecolors=edges,
cmap=cmap,
facecolor=color)
else:
im = ax.pcolor(yj, xj, zj,
vmin=vmin, vmax=vmax,
edgecolors=edges,
cmap=cmap,
facecolor=color)
if geometry != "polar":
ax.set_aspect('equal')
if geometry == "polar" or axis is not None:
ax.set_rorigin(0)
ax.set_rmin(xmin)
ax.set_rticks([xmin, xmax])
if colorbar:
cb = f.colorbar(im, ax=ax, orientation='vertical', shrink=shrink, pad=0.1)
if label is None:
label = key
cb.set_label(label, fontsize=14)
if tlayout:
f.tight_layout()
thetamax = ymax*180./np.pi
return xmin, xmax, thetamax, im
| 8,273 | 25.266667 | 100 |
py
|
rl-perturbed-reward
|
rl-perturbed-reward-master/gym-control/naf_pendulum.py
|
import argparse
import collections
import pandas
import numpy as np
import os
import gym
from keras.models import Sequential, Model
from keras.layers import Dense, Activation, Flatten, Input, Concatenate
from keras.optimizers import Adam
import tensorflow as tf
from rl.agents import NAFAgent
from rl.memory import SequentialMemory
from rl.random import OrnsteinUhlenbeckProcess
from rl.core import Processor
from noise_estimator import *
parser = argparse.ArgumentParser()
parser.add_argument('--log_dir', default='logs',
help='Log dir [default: logs]')
parser.add_argument('--reward', default='normal',
help='reward choice: normal/noisy/surrogate [default: normal]')
parser.add_argument('--weight', type=float, default=0.6,
help='Weight of random confusion matrix [default: 0.6]')
parser.add_argument('--noise_type', type=str, default='norm_all',
help='Type of noise added: norm_all/norm_one/anti_iden/max_one [default: norm_all]')
FLAGS = parser.parse_args()
REWARD = FLAGS.reward
WEIGHT = FLAGS.weight
NOISE_TYPE = FLAGS.noise_type
assert (NOISE_TYPE in ["norm_all", "norm_one", "anti_iden", "max_one"])
if REWARD == "normal":
LOG_DIR = os.path.join(os.path.join(FLAGS.log_dir, "naf_pendulum"), "normal")
else:
LOG_DIR = os.path.join(os.path.join(os.path.join(FLAGS.log_dir, "naf_pendulum"), NOISE_TYPE), str(WEIGHT))
ENV_NAME = 'Pendulum-v0'
# gym.undo_logger_setup()
if not os.path.exists(LOG_DIR):
os.makedirs(LOG_DIR)
os.system('cp naf_pendulum.py %s' % (LOG_DIR)) # bkp of train procedure
def train():
# Get the environment and extract the number of actions.
env = gym.make(ENV_NAME)
np.random.seed(123)
env.seed(123)
assert len(env.action_space.shape) == 1
nb_actions = env.action_space.shape[0]
config = tf.ConfigProto()
config.gpu_options.allow_growth = True
sess = tf.Session(config=config)
from keras import backend as K
K.set_session(sess)
# Build all necessary models: V, mu, and L networks.
V_model = Sequential()
V_model.add(Flatten(input_shape=(1,) + env.observation_space.shape))
V_model.add(Dense(16))
V_model.add(Activation('relu'))
V_model.add(Dense(16))
V_model.add(Activation('relu'))
V_model.add(Dense(16))
V_model.add(Activation('relu'))
V_model.add(Dense(1))
V_model.add(Activation('linear'))
V_model.summary()
mu_model = Sequential()
mu_model.add(Flatten(input_shape=(1,) + env.observation_space.shape))
mu_model.add(Dense(16))
mu_model.add(Activation('relu'))
mu_model.add(Dense(16))
mu_model.add(Activation('relu'))
mu_model.add(Dense(16))
mu_model.add(Activation('relu'))
mu_model.add(Dense(nb_actions))
mu_model.add(Activation('linear'))
mu_model.summary()
action_input = Input(shape=(nb_actions,), name='action_input')
observation_input = Input(shape=(1,) + env.observation_space.shape, name='observation_input')
x = Concatenate()([action_input, Flatten()(observation_input)])
x = Dense(32)(x)
x = Activation('relu')(x)
x = Dense(32)(x)
x = Activation('relu')(x)
x = Dense(32)(x)
x = Activation('relu')(x)
x = Dense(((nb_actions * nb_actions + nb_actions) // 2))(x)
x = Activation('linear')(x)
L_model = Model(inputs=[action_input, observation_input], outputs=x)
L_model.summary()
# Finally, we configure and compile our agent. You can use every built-in Keras optimizer and
# even the metrics!
memory = SequentialMemory(limit=100000, window_length=1)
random_process = OrnsteinUhlenbeckProcess(theta=.15, mu=0., sigma=.3, size=nb_actions)
if REWARD == "normal":
processor = NAFPendulumProcessor()
naf_normal = NAFAgent(nb_actions=nb_actions, V_model=V_model, L_model=L_model, mu_model=mu_model,
memory=memory, nb_steps_warmup=100, random_process=random_process,
gamma=.99, target_model_update=1e-3, processor=processor)
naf_normal.compile(Adam(lr=.00025, clipnorm=1.), metrics=['mae'])
history_normal = naf_normal.fit(env, nb_steps=150000, visualize=False, verbose=2, nb_max_episode_steps=200)
naf_normal.save_weights(os.path.join(LOG_DIR, 'naf_normal_{}_weights.h5f'.format(ENV_NAME)), overwrite=True)
naf_normal.test(env, nb_episodes=10, visualize=False, nb_max_episode_steps=200)
pandas.DataFrame(history_normal.history).to_csv(os.path.join(LOG_DIR, "normal.csv"))
elif REWARD == "noisy":
# processor_noisy = PendulumSurrogateProcessor(weight=WEIGHT, surrogate=False, noise_type=NOISE_TYPE)
processor_noisy = PendulumProcessor(weight=WEIGHT, surrogate=False, noise_type=NOISE_TYPE)
naf_noisy = NAFAgent(nb_actions=nb_actions, V_model=V_model, L_model=L_model, mu_model=mu_model,
memory=memory, nb_steps_warmup=100, random_process=random_process,
gamma=.99, target_model_update=1e-3, processor=processor_noisy)
naf_noisy.compile(Adam(lr=.00025, clipnorm=1.), metrics=['mae'])
history_noisy = naf_noisy.fit(env, nb_steps=150000, visualize=False, verbose=2, nb_max_episode_steps=200)
naf_noisy.save_weights(os.path.join(LOG_DIR, 'naf_noisy_{}_weights.h5f'.format(ENV_NAME)), overwrite=True)
naf_noisy.test(env, nb_episodes=10, visualize=False, nb_max_episode_steps=200)
pandas.DataFrame(history_noisy.history).to_csv(os.path.join(LOG_DIR, "noisy.csv"))
elif REWARD == "surrogate":
# processor_surrogate = PendulumSurrogateProcessor(weight=WEIGHT, surrogate=True, noise_type=NOISE_TYPE)
processor_surrogate = PendulumProcessor(weight=WEIGHT, surrogate=True, noise_type=NOISE_TYPE)
naf_surrogate = NAFAgent(nb_actions=nb_actions, V_model=V_model, L_model=L_model, mu_model=mu_model,
memory=memory, nb_steps_warmup=100, random_process=random_process,
gamma=.99, target_model_update=1e-3, processor=processor_surrogate)
naf_surrogate.compile(Adam(lr=.00025, clipnorm=1.), metrics=['mae'])
history_surrogate = naf_surrogate.fit(env, nb_steps=150000, visualize=False, verbose=2, nb_max_episode_steps=200)
naf_surrogate.save_weights(os.path.join(LOG_DIR, 'naf_surrogate_{}_weights.h5f'.format(ENV_NAME)), overwrite=True)
naf_surrogate.test(env, nb_episodes=10, visualize=False, nb_max_episode_steps=200)
pandas.DataFrame(history_surrogate.history).to_csv(os.path.join(LOG_DIR, "surrogate.csv"))
else:
raise NotImplementedError
if __name__ == "__main__":
train()
| 6,696 | 43.059211 | 122 |
py
|
rl-perturbed-reward
|
rl-perturbed-reward-master/gym-control/dqn_cartpole.py
|
import argparse
import collections
import pandas
import numpy as np
import os
import gym
from keras.layers import Activation, Dense, Flatten
from keras.models import Sequential
from keras.optimizers import Adam
import tensorflow as tf
from rl.agents.dqn import DQNAgent
from rl.core import Processor
from rl.memory import SequentialMemory
from rl.policy import BoltzmannQPolicy
from noise_estimator import *
from utils import *
parser = argparse.ArgumentParser()
parser.add_argument('--error_positive', type=float, default=0.2,
help='Error positive rate [default: 0.2]')
parser.add_argument('--error_negative', type=float, default=0.0,
help='Error negative rate [default: 0.0]')
parser.add_argument('--log_dir', default='logs',
help='Log dir [default: logs]')
parser.add_argument('--reward', default='normal',
help='Reward choice: normal/noisy/surrogate [default: normal]')
parser.add_argument('--smooth', type=str2bool, default=False,
help='Add smoothing to rewards [default: False]')
FLAGS = parser.parse_args()
ERR_P = FLAGS.error_positive
ERR_N = FLAGS.error_negative
REWARD = FLAGS.reward
SMOOTH = FLAGS.smooth
if REWARD == "normal":
LOG_DIR = os.path.join(FLAGS.log_dir, "dqn_cartpole")
else:
LOG_DIR = os.path.join(os.path.join(FLAGS.log_dir, "dqn_cartpole"), str(ERR_P))
ENV_NAME = 'CartPole-v0'
if not os.path.exists(LOG_DIR):
os.makedirs(LOG_DIR)
os.system('cp dqn_cartpole.py %s' % (LOG_DIR)) # bkp of train procedure
LOG_FOUT = open(os.path.join(LOG_DIR, 'setting.txt'), 'w')
LOG_FOUT.write(str(FLAGS)+'\n')
def train():
# Get the environment and extract the number of actions.
env = gym.make(ENV_NAME)
np.random.seed(123)
env.seed(123)
nb_actions = env.action_space.n
config = tf.ConfigProto()
config.gpu_options.allow_growth = True
sess = tf.Session(config=config)
from keras import backend as K
K.set_session(sess)
# Next, we build a very simple model.
model = Sequential()
model.add(Flatten(input_shape=(1,) + env.observation_space.shape))
model.add(Dense(16))
model.add(Activation('relu'))
model.add(Dense(16))
model.add(Activation('relu'))
model.add(Dense(16))
model.add(Activation('relu'))
model.add(Dense(nb_actions))
model.add(Activation('linear'))
model.summary()
# Finally, we configure and compile our agent. You can use every built-in Keras optimizer and
# even the metrics!
memory = SequentialMemory(limit=50000, window_length=1)
policy = BoltzmannQPolicy()
# Okay, now it's time to learn something! We visualize the training here for show, but this
# slows down training quite a lot. You can always safely abort the training prematurely using
# Ctrl + C.
if REWARD == "normal":
dqn_normal = DQNAgent(model=model, nb_actions=nb_actions, memory=memory, nb_steps_warmup=10,
target_model_update=1e-2, policy=policy)
dqn_normal.compile(Adam(lr=1e-3), metrics=['mae'])
history_normal = dqn_normal.fit(env, nb_steps=10000, visualize=False, verbose=2)
dqn_normal.save_weights(os.path.join(LOG_DIR, 'dqn_normal_{}_weights.h5f'.format(ENV_NAME)), overwrite=True)
dqn_normal.test(env, nb_episodes=10, visualize=False, verbose=2)
pandas.DataFrame(history_normal.history).to_csv(os.path.join(LOG_DIR, "normal.csv"))
elif REWARD == "noisy":
if not SMOOTH:
processor_noisy = CartpoleProcessor(e_=ERR_N, e=ERR_P, surrogate=False)
else:
processor_noisy = CartpoleProcessor(e_=ERR_N, e=ERR_P, smooth=True, surrogate=False)
# processor_noisy = CartpoleSurrogateProcessor(e_=ERR_N, e=ERR_P, surrogate=False)
dqn_noisy = DQNAgent(model=model, nb_actions=nb_actions, memory=memory, nb_steps_warmup=10,
target_model_update=1e-2, policy=policy, processor=processor_noisy)
dqn_noisy.compile(Adam(lr=1e-3), metrics=['mae'])
history_noisy = dqn_noisy.fit(env, nb_steps=10000, visualize=False, verbose=2)
if not SMOOTH:
dqn_noisy.save_weights(os.path.join(LOG_DIR, 'dqn_noisy_{}_weights.h5f'.format(ENV_NAME)), overwrite=True)
pandas.DataFrame(history_noisy.history).to_csv(os.path.join(LOG_DIR, "noisy.csv"))
else:
dqn_noisy.save_weights(os.path.join(LOG_DIR, 'dqn_noisy_smooth_{}_weights.h5f'.format(ENV_NAME)), overwrite=True)
pandas.DataFrame(history_noisy.history).to_csv(os.path.join(LOG_DIR, "noisy_smooth.csv"))
dqn_noisy.test(env, nb_episodes=10, visualize=False, verbose=2)
elif REWARD == "surrogate":
if not SMOOTH:
processor_surrogate = CartpoleProcessor(e_=ERR_N, e=ERR_P, smooth=False, surrogate=True)
else:
processor_surrogate = CartpoleProcessor(e_=ERR_N, e=ERR_P, smooth=True, surrogate=True)
# processor_surrogate = CartpoleSurrogateProcessor(e_=ERR_N, e=ERR_P, surrogate=True)
dqn_surrogate = DQNAgent(model=model, nb_actions=nb_actions, memory=memory, nb_steps_warmup=10,
target_model_update=1e-2, policy=policy, processor=processor_surrogate)
dqn_surrogate.compile(Adam(lr=1e-3), metrics=['mae'])
history_surrogate = dqn_surrogate.fit(env, nb_steps=10000, visualize=False, verbose=2)
if not SMOOTH:
dqn_surrogate.save_weights(os.path.join(LOG_DIR, 'dqn_surrogate_{}_weights.h5f'.format(ENV_NAME)), overwrite=True)
pandas.DataFrame(history_surrogate.history).to_csv(os.path.join(LOG_DIR, "surrogate.csv"))
else:
dqn_surrogate.save_weights(os.path.join(LOG_DIR, 'dqn_surrogate_smooth_{}_weights.h5f'.format(ENV_NAME)), overwrite=True)
pandas.DataFrame(history_surrogate.history).to_csv(os.path.join(LOG_DIR, "surrogate_smooth.csv"))
dqn_surrogate.test(env, nb_episodes=10, visualize=False, verbose=2)
else:
raise NotImplementedError
if __name__ == "__main__":
train()
| 6,113 | 42.056338 | 133 |
py
|
rl-perturbed-reward
|
rl-perturbed-reward-master/gym-control/duel_dqn_cartpole.py
|
import argparse
import collections
import pandas
import numpy as np
import os
import gym
from keras.layers import Activation, Dense, Flatten
from keras.models import Sequential
from keras.optimizers import Adam
import tensorflow as tf
from rl.agents.dqn import DQNAgent
from rl.core import Processor
from rl.memory import SequentialMemory
from rl.policy import BoltzmannQPolicy
from noise_estimator import *
from utils import *
parser = argparse.ArgumentParser()
parser.add_argument('--error_positive', type=float, default=0.2,
help='Error positive rate [default: 0.2]')
parser.add_argument('--error_negative', type=float, default=0.0,
help='Error negative rate [default: 0.0]')
parser.add_argument('--log_dir', default='logs',
help='Log dir [default: logs]')
parser.add_argument('--reward', default='normal',
help='Reward choice: normal/noisy/surrogate [default: normal]')
parser.add_argument('--smooth', type=str2bool, default=False,
help='Add smoothing to rewards [default: False]')
FLAGS = parser.parse_args()
ERR_P = FLAGS.error_positive
ERR_N = FLAGS.error_negative
REWARD = FLAGS.reward
SMOOTH = FLAGS.smooth
if REWARD == "normal":
LOG_DIR = os.path.join(FLAGS.log_dir, "duel_dqn_cartpole")
else:
LOG_DIR = os.path.join(os.path.join(FLAGS.log_dir, "duel_dqn_cartpole"), str(ERR_P))
ENV_NAME = 'CartPole-v0'
if not os.path.exists(LOG_DIR):
os.makedirs(LOG_DIR)
os.system('cp duel_dqn_cartpole.py %s' % (LOG_DIR)) # bkp of train procedure
LOG_FOUT = open(os.path.join(LOG_DIR, 'setting.txt'), 'w')
LOG_FOUT.write(str(FLAGS)+'\n')
def train():
# Get the environment and extract the number of actions.
env = gym.make(ENV_NAME)
np.random.seed(123)
env.seed(123)
nb_actions = env.action_space.n
config = tf.ConfigProto()
config.gpu_options.allow_growth = True
sess = tf.Session(config=config)
from keras import backend as K
K.set_session(sess)
# Next, we build a very simple model.
model = Sequential()
model.add(Flatten(input_shape=(1,) + env.observation_space.shape))
model.add(Dense(16))
model.add(Activation('relu'))
model.add(Dense(16))
model.add(Activation('relu'))
model.add(Dense(16))
model.add(Activation('relu'))
model.add(Dense(nb_actions))
model.add(Activation('linear'))
model.summary()
# Finally, we configure and compile our agent. You can use every built-in Keras optimizer and
# even the metrics!
memory = SequentialMemory(limit=50000, window_length=1)
policy = BoltzmannQPolicy()
# Okay, now it's time to learn something! We visualize the training here for show, but this
# slows down training quite a lot. You can always safely abort the training prematurely using
# Ctrl + C.
if REWARD == "normal":
dqn_normal = DQNAgent(model=model, nb_actions=nb_actions, memory=memory, nb_steps_warmup=10,
enable_dueling_network=True, dueling_type='avg',
target_model_update=1e-2, policy=policy)
dqn_normal.compile(Adam(lr=1e-3), metrics=['mae'])
history_normal = dqn_normal.fit(env, nb_steps=10000, visualize=False, verbose=2)
dqn_normal.save_weights(os.path.join(LOG_DIR, 'duel_dqn_normal_{}_weights.h5f'.format(ENV_NAME)), overwrite=True)
dqn_normal.test(env, nb_episodes=10, visualize=False, verbose=2)
pandas.DataFrame(history_normal.history).to_csv(os.path.join(LOG_DIR, "normal.csv"))
elif REWARD == "noisy":
if not SMOOTH:
processor_noisy = CartpoleProcessor(e_=ERR_N, e=ERR_P, smooth=False, surrogate=False)
else:
processor_noisy = CartpoleProcessor(e_=ERR_N, e=ERR_P, smooth=True, surrogate=False)
# processor_noisy = CartpoleSurrogateProcessor(e_=ERR_N, e=ERR_P, surrogate=False)
dqn_noisy = DQNAgent(model=model, nb_actions=nb_actions, memory=memory, nb_steps_warmup=10,
enable_dueling_network=True, dueling_type='avg',
target_model_update=1e-2, policy=policy, processor=processor_noisy)
dqn_noisy.compile(Adam(lr=1e-3), metrics=['mae'])
history_noisy = dqn_noisy.fit(env, nb_steps=10000, visualize=False, verbose=2)
if not SMOOTH:
dqn_noisy.save_weights(os.path.join(LOG_DIR, 'duel_dqn_noisy_{}_weights.h5f'.format(ENV_NAME)), overwrite=True)
pandas.DataFrame(history_noisy.history).to_csv(os.path.join(LOG_DIR, "noisy.csv"))
else:
dqn_noisy.save_weights(os.path.join(LOG_DIR, 'duel_dqn_noisy_smooth_{}_weights.h5f'.format(ENV_NAME)), overwrite=True)
pandas.DataFrame(history_noisy.history).to_csv(os.path.join(LOG_DIR, "noisy_smooth.csv"))
dqn_noisy.test(env, nb_episodes=10, visualize=False, verbose=2)
elif REWARD == "surrogate":
if not SMOOTH:
processor_surrogate = CartpoleProcessor(e_=ERR_N, e=ERR_P, smooth=False, surrogate=True)
else:
processor_surrogate = CartpoleProcessor(e_=ERR_N, e=ERR_P, smooth=True, surrogate=True)
# processor_surrogate = CartpoleSurrogateProcessor(e_=ERR_N, e=ERR_P, surrogate=True)
dqn_surrogate = DQNAgent(model=model, nb_actions=nb_actions, memory=memory, nb_steps_warmup=10,
enable_dueling_network=True, dueling_type='avg',
target_model_update=1e-2, policy=policy, processor=processor_surrogate)
dqn_surrogate.compile(Adam(lr=1e-3), metrics=['mae'])
history_surrogate = dqn_surrogate.fit(env, nb_steps=10000, visualize=False, verbose=2)
if not SMOOTH:
dqn_surrogate.save_weights(os.path.join(LOG_DIR, 'duel_dqn_surrogate_{}_weights.h5f'.format(ENV_NAME)), overwrite=True)
pandas.DataFrame(history_surrogate.history).to_csv(os.path.join(LOG_DIR, "surrogate.csv"))
else:
dqn_surrogate.save_weights(os.path.join(LOG_DIR, 'duel_dqn_surrogate_smooth_{}_weights.h5f'.format(ENV_NAME)), overwrite=True)
pandas.DataFrame(history_surrogate.history).to_csv(os.path.join(LOG_DIR, "surrogate_smooth.csv"))
dqn_surrogate.test(env, nb_episodes=10, visualize=False, verbose=2)
else:
raise NotImplementedError
if __name__ == "__main__":
train()
| 6,413 | 43.541667 | 138 |
py
|
rl-perturbed-reward
|
rl-perturbed-reward-master/gym-control/collect.py
|
import argparse
import glob
import os
parser = argparse.ArgumentParser()
parser.add_argument('--log_dir', default='logs/ddpg_pendulum/norm_one',
help='Log dir [default: logs/ddpg_pendulum/norm_one]')
parser.add_argument('--save_dir', default='docs/ddpg_pendulum/norm_one',
help='Path of directory to saved [default: docs/ddpg_pendulum/norm_one]')
FLAGS = parser.parse_args()
LOG_DIR = FLAGS.log_dir
SAVE_DIR = FLAGS.save_dir
assert (os.path.exists(LOG_DIR))
if not os.path.exists(SAVE_DIR):
os.makedirs(SAVE_DIR)
def collect():
for j in [0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8]:
input_dir = os.path.join(LOG_DIR, str(j))
files = glob.glob(os.path.join(input_dir, "*.png"))
for fin in files:
filename = fin[fin.rindex("/")+1:]
fout = os.path.join(SAVE_DIR, filename)
print "cp '%s' '%s'" % (fin, fout)
os.system("cp '%s' '%s'" % (fin, fout))
if __name__ == "__main__":
collect()
| 1,009 | 29.606061 | 93 |
py
|
rl-perturbed-reward
|
rl-perturbed-reward-master/gym-control/utils.py
|
def str2bool(v):
if v.lower() in ('yes', 'true', 't', 'y', '1'):
return True
elif v.lower() in ('no', 'false', 'f', 'n', '0'):
return False
else:
raise argparse.ArgumentTypeError('Boolean value expected.')
| 241 | 33.571429 | 67 |
py
|
rl-perturbed-reward
|
rl-perturbed-reward-master/gym-control/sarsa_cartpole.py
|
import argparse
import collections
import pandas
import numpy as np
import os
import gym
from keras.layers import Activation, Dense, Flatten
from keras.models import Sequential
from keras.optimizers import Adam
import tensorflow as tf
from rl.agents import SARSAAgent
from rl.core import Processor
from rl.policy import BoltzmannQPolicy
from noise_estimator import *
from utils import *
parser = argparse.ArgumentParser()
parser.add_argument('--error_positive', type=float, default=0.2,
help='Error positive rate [default: 0.2]')
parser.add_argument('--error_negative', type=float, default=0.0,
help='Error negative rate [default: 0.0]')
parser.add_argument('--log_dir', default='logs',
help='Log dir [default: logs]')
parser.add_argument('--reward', default='normal',
help='reward choice: normal/noisy/surrogate [default: normal]')
parser.add_argument('--smooth', type=str2bool, default=False,
help='Add smoothing to rewards [default: False]')
FLAGS = parser.parse_args()
ERR_P = FLAGS.error_positive
ERR_N = FLAGS.error_negative
REWARD = FLAGS.reward
SMOOTH = FLAGS.smooth
if REWARD == "normal":
LOG_DIR = os.path.join(FLAGS.log_dir, "sarsa_cartpole")
else:
LOG_DIR = os.path.join(os.path.join(FLAGS.log_dir, "sarsa_cartpole"), str(ERR_P))
ENV_NAME = 'CartPole-v0'
if not os.path.exists(LOG_DIR):
os.makedirs(LOG_DIR)
os.system('cp sarsa_cartpole.py %s' % (LOG_DIR)) # bkp of train procedure
print ('cp sarsa_cartpole.py %s' % (LOG_DIR))
LOG_FOUT = open(os.path.join(LOG_DIR, 'setting.txt'), 'w')
LOG_FOUT.write(str(FLAGS)+'\n')
def log_string(out_str):
LOG_FOUT.write(out_str+'\n')
LOG_FOUT.flush()
print(out_str)
def build_state(features):
return int("".join(map(lambda feature: str(int(feature)), features)))
def to_bin(value, bins):
return np.digitize(x=[value], bins=bins)[0]
def train():
# Get the environment and extract the number of actions.
env = gym.make(ENV_NAME)
np.random.seed(123)
env.seed(123)
nb_actions = env.action_space.n
config = tf.ConfigProto()
config.gpu_options.allow_growth = True
sess = tf.Session(config=config)
from keras import backend as K
K.set_session(sess)
# Next, we build a very simple model.
model = Sequential()
model.add(Flatten(input_shape=(1,) + env.observation_space.shape))
model.add(Dense(16))
model.add(Activation('relu'))
model.add(Dense(16))
model.add(Activation('relu'))
model.add(Dense(16))
model.add(Activation('relu'))
model.add(Dense(nb_actions))
model.add(Activation('linear'))
print(model.summary())
# SARSA does not require a memory.
policy = BoltzmannQPolicy()
# processor_noisy = CartpoleSurrogateProcessor(e_= ERR_N, e=ERR_P, surrogate=False)
# processor_surrogate = CartpoleSurrogateProcessor(e_= ERR_N, e=ERR_P, surrogate=True)
if not SMOOTH:
processor_noisy = CartpoleProcessor(e_= ERR_N, e=ERR_P, smooth=False, surrogate=False)
processor_surrogate = CartpoleProcessor(e_= ERR_N, e=ERR_P, smooth=False, surrogate=True)
else:
processor_noisy = CartpoleProcessor(e_= ERR_N, e=ERR_P, smooth=True, surrogate=False)
processor_surrogate = CartpoleProcessor(e_= ERR_N, e=ERR_P, smooth=True, surrogate=True)
if REWARD == "normal":
sarsa_normal = SARSAAgent(model=model, nb_actions=nb_actions, nb_steps_warmup=10,
policy=policy)
sarsa_normal.compile(Adam(lr=1e-3), metrics=['mae'])
history_normal = sarsa_normal.fit(env, nb_steps=50000, visualize=False, verbose=2)
sarsa_normal.save_weights(os.path.join(LOG_DIR, 'sarsa_normal_{}_weights.h5f'.format(ENV_NAME)), overwrite=True)
sarsa_normal.test(env, nb_episodes=10, visualize=False, verbose=2)
pandas.DataFrame(history_normal.history).to_csv(os.path.join(LOG_DIR, "normal.csv"))
elif REWARD == "noisy":
sarsa_noisy = SARSAAgent(model=model, nb_actions=nb_actions, nb_steps_warmup=10,
policy=policy, processor=processor_noisy)
sarsa_noisy.compile(Adam(lr=1e-3), metrics=['mae'])
history_noisy = sarsa_noisy.fit(env, nb_steps=50000, visualize=False, verbose=2)
if not SMOOTH:
sarsa_noisy.save_weights(os.path.join(LOG_DIR, 'sarsa_noisy_{}_weights.h5f'.format(ENV_NAME)), overwrite=True)
pandas.DataFrame(history_noisy.history).to_csv(os.path.join(LOG_DIR, "noisy.csv"))
else:
sarsa_noisy.save_weights(os.path.join(LOG_DIR, 'sarsa_noisy_smooth_{}_weights.h5f'.format(ENV_NAME)), overwrite=True)
pandas.DataFrame(history_noisy.history).to_csv(os.path.join(LOG_DIR, "noisy_smooth.csv"))
sarsa_noisy.test(env, nb_episodes=10, visualize=False)
elif REWARD == "surrogate":
sarsa_surrogate = SARSAAgent(model=model, nb_actions=nb_actions, nb_steps_warmup=10,
policy=policy, processor=processor_surrogate)
sarsa_surrogate.compile(Adam(lr=1e-3), metrics=['mae'])
history_surrogate = sarsa_surrogate.fit(env, nb_steps=50000, visualize=False, verbose=2)
if not SMOOTH:
sarsa_surrogate.save_weights(os.path.join(LOG_DIR, 'sarsa_surrogate_{}_weights.h5f'.format(ENV_NAME)), overwrite=True)
pandas.DataFrame(history_surrogate.history).to_csv(os.path.join(LOG_DIR, "surrogate.csv"))
else:
sarsa_surrogate.save_weights(os.path.join(LOG_DIR, 'sarsa_surrogate_smooth_{}_weights.h5f'.format(ENV_NAME)), overwrite=True)
pandas.DataFrame(history_surrogate.history).to_csv(os.path.join(LOG_DIR, "surrogate_smooth.csv"))
sarsa_surrogate.test(env, nb_episodes=10, visualize=False)
if __name__ == "__main__":
train()
| 5,869 | 39.482759 | 137 |
py
|
rl-perturbed-reward
|
rl-perturbed-reward-master/gym-control/qlearn_cartpole.py
|
import argparse
import collections
import os
import random
import numpy as np
import gym
import pandas
from utils import *
parser = argparse.ArgumentParser()
parser.add_argument('--error_positive', type=float, default=0.2,
help='Error positive rate [default: 0.2]')
parser.add_argument('--error_negative', type=float, default=0.,
help='Error negative rate [default: 0.]')
parser.add_argument('--log_dir', default='logs',
help='Log dir [default: logs]')
parser.add_argument('--reward', default='normal',
help='reward choice: normal/noisy/surrogate [default: normal]')
parser.add_argument('--smooth', type=str2bool, default=False,
help='Add smoothing to rewards [default: False]')
FLAGS = parser.parse_args()
ERR_P = FLAGS.error_positive
ERR_N = FLAGS.error_negative
REWARD = FLAGS.reward
SMOOTH = FLAGS.smooth
assert(REWARD in ["normal", "noisy", "surrogate"])
if REWARD == "normal":
LOG_DIR = os.path.join(FLAGS.log_dir, "qlearn_cartpole")
else:
LOG_DIR = os.path.join(os.path.join(FLAGS.log_dir, "qlearn_cartpole"), str(ERR_P))
ENV_NAME = 'CartPole-v0'
if not os.path.exists(LOG_DIR):
os.makedirs(LOG_DIR)
os.system('cp qlearn_cartpole.py %s' % (LOG_DIR)) # bkp of train procedure
LOG_FOUT = open(os.path.join(LOG_DIR, 'setting.txt'), 'w')
LOG_FOUT.write(str(FLAGS)+'\n')
def log_string(out_str):
LOG_FOUT.write(out_str+'\n')
LOG_FOUT.flush()
print(out_str)
class QLearn:
def __init__(self, actions, epsilon, alpha, gamma):
self.q = {}
self.epsilon = epsilon # exploration constant
self.alpha = alpha # discount constant
self.gamma = gamma # discount factor
self.actions = actions
def getQ(self, state, action):
return self.q.get((state, action), 0.0)
def learnQ(self, state, action, reward, value):
'''
Q-learning:
Q(s, a) += alpha * (reward(s,a) + max(Q(s') - Q(s,a))
'''
oldv = self.q.get((state, action), None)
if oldv is None:
self.q[(state, action)] = reward
else:
self.q[(state, action)] = oldv + self.alpha * (value - oldv)
def chooseAction(self, state, return_q=False):
q = [self.getQ(state, a) for a in self.actions]
maxQ = max(q)
if random.random() < self.epsilon:
minQ = min(q); mag = max(abs(minQ), abs(maxQ))
# add random values to all the actions, recalculate maxQ
q = [q[i] + random.random() * mag - .5 * mag for i in range(len(self.actions))]
maxQ = max(q)
count = q.count(maxQ)
# In case there're several state-action max values
# we select a random one among them
if count > 1:
best = [i for i in range(len(self.actions)) if q[i] == maxQ]
i = random.choice(best)
else:
i = q.index(maxQ)
action = self.actions[i]
if return_q: # if they want it, give it!
return action, q
return action
def learn(self, state1, action1, reward, state2):
maxqnew = max([self.getQ(state2, a) for a in self.actions])
self.learnQ(state1, action1, reward, reward + self.gamma*maxqnew)
def build_state(features):
return int("".join(map(lambda feature: str(int(feature)), features)))
def to_bin(value, bins):
return np.digitize(x=[value], bins=bins)[0]
class SurrogateRewardProcessor():
"""
Learning from surrogate reward
following paper "Learning from noisy labels"
"""
def __init__(self, e_=0.0, e=0.2, surrogate=False, epsilon=1e-6):
assert (e_ + e <= 1.0)
self.e_ = e_
self.e = e
self.surrogate = surrogate
self.epsilon = 1e-6
def noisy_reward(self, reward):
n = np.random.random()
if np.abs(reward - 1.0) < self.epsilon:
if (n < self.e):
return -1 * reward
else:
if (n < self.e_):
return -1 * reward
return reward
def process_reward(self, reward):
r = self.noisy_reward(reward)
if not self.surrogate:
return r
if np.abs(r - 1.0) < self.epsilon:
r_surrogate = ((1 - self.e_) * r + self.e * r) / (1 - self.e_ - self.e)
else:
r_surrogate = ((1 - self.e) * r + self.e_ * r) / (1 - self.e_ - self.e)
return r_surrogate
class PreProcessor:
"Add noise to reward"
def __init__(self, e_=0.1, e=0.3, normal=True, epsilon=1e-6):
assert (np.abs(e_ + e - 1) > epsilon)
self.normal = normal
self.e_ = e_
self.e = e
self.epsilon = 1e-6
self.r1 = -1
self.r2 = 1
def noisy_reward(self, reward):
n = np.random.random()
if np.abs(reward - self.r1) < self.epsilon:
if (n < self.e_):
return self.r2
else:
if (n < self.e):
return self.r1
return reward
def process_reward(self, reward):
if self.normal:
return reward
r = self.noisy_reward(reward)
return r
class PostProcessor:
"""
Learning from surrogate reward
following paper "Learning from noisy labels"
"""
def __init__(self, smooth=False, surrogate=True,reverse=False, epsilon=1e-6):
self.surrogate = surrogate
self.smooth = smooth
self.r_sets = {}
self.r_smooth = {}
self.r1 = -1
self.r2 = 1
self.counter = 0
self.C = np.identity(2)
self.epsilon = epsilon
self.reverse = reverse
def process_reward(self, reward):
self.estimate_C()
self.e_ = self.C[0, 1]
self.e = self.C[1, 0]
if self.surrogate:
if np.abs(reward - self.r1) < self.epsilon:
reward = ((1 - self.e) * self.r1 - self.e_ * self.r2) / (1 - self.e_ - self.e)
else:
reward = ((1 - self.e_) * self.r2 - self.e * self.r1) / (1 - self.e_ - self.e)
return reward
def estimate_C(self):
if self.counter >= 100 and self.counter % 100 == 0:
e_ = 0; e = 0
# a = 0; b = 0
# prob = 0
self.count1 = 0
self.count2 = 0
for k in self.r_sets.keys():
freq_count = collections.Counter(self.r_sets[k])
if self.reverse:
truth, count = freq_count.most_common()[-1]
else: truth, count = freq_count.most_common()[0]
if truth == self.r1:
self.count1 += len(self.r_sets[k])
else:
self.count2 += len(self.r_sets[k])
for k in self.r_sets.keys():
freq_count = collections.Counter(self.r_sets[k])
# if self.e_ > 0.05:
# self.reverse = True
# self.counter = 0; self.r_sets = {}
# break
if self.reverse:
truth, count = freq_count.most_common()[-1]
else:
truth, count = freq_count.most_common()[0]
prob_correct = float(count) / len(self.r_sets[k])
if truth == self.r1:
if self.count1 > 2000:
prob_k = float(len(self.r_sets[k])) / self.count1
e_ += prob_k * (1 - prob_correct)
else: e_ = 0.0
# a += 2 * prob_k * prob_correct
else:
prob_k = float(len(self.r_sets[k])) / self.count2
e += prob_k * (1 - prob_correct)
# b += 2 * prob_k * prob_correct
# print prob
log_string(str(e_) + " " + str(e))
self.C = np.array([[1-e_, e_], [e, 1-e]])
# if self.counter >= 10000:
# self.counter = 0
# self.r_sets = {}
# print self.C
def smooth_reward(self, state, action, reward):
if self.smooth:
if (state, action) in self.r_smooth:
if len(self.r_smooth[(state, action)]) >= 100:
self.r_smooth[(state, action)].pop(0)
self.r_smooth[(state, action)].append(reward)
return sum(self.r_smooth[(state, action)]) / float(len(self.r_smooth[(state, action)]))
else:
self.r_smooth[(state, action)].append(reward)
else:
self.r_smooth[(state, action)] = [reward]
return reward
def collect(self, state, action, reward):
if (state, action) in self.r_sets:
self.r_sets[(state, action)].append(reward)
else:
self.r_sets[(state, action)] = [reward]
self.counter += 1
if __name__ == '__main__':
env = gym.make('CartPole-v0')
goal_average_steps = 195
max_number_of_steps = 200
last_time_steps = np.ndarray(0)
n_bins = 8
n_bins_angle = 10
number_of_features = env.observation_space.shape[0]
last_time_steps = np.ndarray(0)
# Number of states is huge so in order to simplify the situation
# we discretize the space to: 10 ** number_of_features
cart_position_bins = pandas.cut([-2.4, 2.4], bins=n_bins, retbins=True)[1][1:-1]
pole_angle_bins = pandas.cut([-2, 2], bins=n_bins_angle, retbins=True)[1][1:-1]
cart_velocity_bins = pandas.cut([-1, 1], bins=n_bins, retbins=True)[1][1:-1]
angle_rate_bins = pandas.cut([-3.5, 3.5], bins=n_bins_angle, retbins=True)[1][1:-1]
# The Q-learn algorithm
qlearn = QLearn(actions=range(env.action_space.n),
alpha=0.5, gamma=0.90, epsilon=0.1)
pre_processor = PreProcessor(normal=False, e_=ERR_N, e=ERR_P)
if ERR_P > 0.5:
if not SMOOTH: post_processor = PostProcessor(smooth=False, surrogate=True, reverse=True)
else: post_processor = PostProcessor(smooth=True, surrogate=True, reverse=True)
else:
if not SMOOTH: post_processor = PostProcessor(smooth=False, surrogate=True)
else: post_processor = PostProcessor(smooth=True, surrogate=True)
steps = 0
while True:
observation = env.reset()
cart_position, pole_angle, cart_velocity, angle_rate_of_change = observation
state = build_state([to_bin(cart_position, cart_position_bins),
to_bin(pole_angle, pole_angle_bins),
to_bin(cart_velocity, cart_velocity_bins),
to_bin(angle_rate_of_change, angle_rate_bins)])
for t in range(max_number_of_steps):
# env.render()
# Pick an action based on the current state
action = qlearn.chooseAction(state)
# Execute the action and get feedback
observation, reward, done, info = env.step(action)
if REWARD == "noisy":
reward = pre_processor.process_reward(reward)
reward = post_processor.smooth_reward(state, action, reward)
elif REWARD == "surrogate":
reward = pre_processor.process_reward(reward)
post_processor.collect(state, action, reward)
reward = post_processor.process_reward(reward)
reward = post_processor.smooth_reward(state, action, reward)
else:
pass
# Digitize the observation to get a state
cart_position, pole_angle, cart_velocity, angle_rate_of_change = observation
nextState = build_state([to_bin(cart_position, cart_position_bins),
to_bin(pole_angle, pole_angle_bins),
to_bin(cart_velocity, cart_velocity_bins),
to_bin(angle_rate_of_change, angle_rate_bins)])
# # If out of bounds
# if (cart_position > 2.4 or cart_position < -2.4):
# reward = -200
# qlearn.learn(state, action, reward, nextState)
# print("Out of bounds, reseting")
# break
if not(done):
qlearn.learn(state, action, reward, nextState)
state = nextState
else:
# Q-learn stuff
reward = -20
qlearn.learn(state, action, reward, nextState)
last_time_steps = np.append(last_time_steps, [int(t + 1)])
# print last_time_steps
break
steps += 1
if steps >= 30000: break
l = last_time_steps.tolist()
if REWARD == "normal":
pandas.DataFrame(l).to_csv(os.path.join(LOG_DIR, "normal.csv"))
elif REWARD == "noisy":
if not SMOOTH:
pandas.DataFrame(l).to_csv(os.path.join(LOG_DIR, "noisy.csv"))
else:
pandas.DataFrame(l).to_csv(os.path.join(LOG_DIR, "noisy_smooth.csv"))
else:
if not SMOOTH:
pandas.DataFrame(l).to_csv(os.path.join(LOG_DIR, "surrogate.csv"))
else:
pandas.DataFrame(l).to_csv(os.path.join(LOG_DIR, "surrogate_smooth.csv"))
# l.sort()
# print("Overall score: {:0.2f}".format(last_time_steps.mean()))
# print("Best 100 score: {:0.2f}".format(reduce(lambda x, y: x + y, l[-100:]) / len(l[-100:])))
# env.monitor.close()
| 13,444 | 34.288714 | 107 |
py
|
rl-perturbed-reward
|
rl-perturbed-reward-master/gym-control/ddpg_pendulum.py
|
import argparse
import pandas
import numpy as np
import os
import gym
from keras.models import Sequential, Model
from keras.layers import Dense, Activation, Flatten, Input, Concatenate
from keras.optimizers import Adam
import tensorflow as tf
from rl.agents import DDPGAgent
from rl.core import Processor
from rl.memory import SequentialMemory
from rl.random import OrnsteinUhlenbeckProcess
parser = argparse.ArgumentParser()
parser.add_argument('--log_dir', default='logs',
help='Log dir [default: logs]')
parser.add_argument('--reward', default='normal',
help='reward choice: normal/noisy/surrogate [default: normal]')
parser.add_argument('--weight', type=float, default=0.6,
help='Weight of random confusion matrix [default: 0.6]')
parser.add_argument('--noise_type', type=str, default='norm_all',
help='Type of noise added: norm_all/norm_one/anti_iden/max_one [default: norm_all]')
FLAGS = parser.parse_args()
REWARD = FLAGS.reward
WEIGHT = FLAGS.weight
NOISE_TYPE = FLAGS.noise_type
assert (NOISE_TYPE in ["norm_all", "norm_one", "anti_iden", "max_one"])
if REWARD == "normal":
LOG_DIR = os.path.join(os.path.join(FLAGS.log_dir, "ddpg_pendulum"), NOISE_TYPE)
else:
LOG_DIR = os.path.join(os.path.join(os.path.join(FLAGS.log_dir, "ddpg_pendulum"), NOISE_TYPE), str(WEIGHT))
ENV_NAME = 'Pendulum-v0'
# gym.undo_logger_setup()
if not os.path.exists(LOG_DIR):
os.makedirs(LOG_DIR)
os.system('cp ddpg_pendulum.py %s' % (LOG_DIR)) # bkp of train procedure
def train():
# Get the environment and extract the number of actions.
env = gym.make(ENV_NAME)
np.random.seed(123)
env.seed(123)
assert len(env.action_space.shape) == 1
nb_actions = env.action_space.shape[0]
config = tf.ConfigProto()
config.gpu_options.allow_growth = True
sess = tf.Session(config=config)
from keras import backend as K
K.set_session(sess)
# Next, we build a very simple model.
actor = Sequential()
actor.add(Flatten(input_shape=(1,) + env.observation_space.shape))
actor.add(Dense(16))
actor.add(Activation('relu'))
actor.add(Dense(16))
actor.add(Activation('relu'))
actor.add(Dense(16))
actor.add(Activation('relu'))
actor.add(Dense(nb_actions))
actor.add(Activation('linear'))
# print(actor.summary())
action_input = Input(shape=(nb_actions,), name='action_input')
observation_input = Input(shape=(1,) + env.observation_space.shape, name='observation_input')
flattened_observation = Flatten()(observation_input)
x = Concatenate()([action_input, flattened_observation])
x = Dense(32)(x)
x = Activation('relu')(x)
x = Dense(32)(x)
x = Activation('relu')(x)
x = Dense(32)(x)
x = Activation('relu')(x)
x = Dense(1)(x)
x = Activation('linear')(x)
critic = Model(inputs=[action_input, observation_input], outputs=x)
# print(critic.summary())
# Finally, we configure and compile our agent. You can use every built-in Keras optimizer and
# even the metrics!
memory = SequentialMemory(limit=100000, window_length=1)
random_process = OrnsteinUhlenbeckProcess(size=nb_actions, theta=.15, mu=0., sigma=.3)
if REWARD == "normal":
ddpg_normal = DDPGAgent(nb_actions=nb_actions, actor=actor, critic=critic, critic_action_input=action_input,
memory=memory, nb_steps_warmup_critic=100, nb_steps_warmup_actor=100,
random_process=random_process, gamma=.99, target_model_update=1e-3)
ddpg_normal.compile(Adam(lr=.0005, clipnorm=1.), metrics=['mae'])
# Okay, now it's time to learn something! We visualize the training here for show, but this
# slows down training quite a lot. You can always safely abort the training prematurely using
# Ctrl + C.
history_normal = ddpg_normal.fit(env, nb_steps=150000, visualize=False, verbose=2, nb_max_episode_steps=200)
# After training is done, we save the final weights.
ddpg_normal.save_weights(os.path.join(LOG_DIR, 'ddpg_normal_{}_weights.h5f'.format(ENV_NAME)), overwrite=True)
# Finally, evaluate our algorithm for 5 episodes.
ddpg_normal.test(env, nb_episodes=5, visualize=False, verbose=2, nb_max_episode_steps=200)
pandas.DataFrame(history_normal.history).to_csv(os.path.join(LOG_DIR, "normal.csv"))
elif REWARD == "noisy":
processor_noisy = PendulumSurrogateProcessor(weight=WEIGHT, surrogate=False, noise_type=NOISE_TYPE)
ddpg_noisy = DDPGAgent(nb_actions=nb_actions, actor=actor, critic=critic, critic_action_input=action_input,
memory=memory, nb_steps_warmup_critic=100, nb_steps_warmup_actor=100,
random_process=random_process, gamma=.99, target_model_update=1e-3,
processor=processor_noisy)
ddpg_noisy.compile(Adam(lr=.0005, clipnorm=1.), metrics=['mae'])
history_noisy = ddpg_noisy.fit(env, nb_steps=150000, visualize=False, verbose=2, nb_max_episode_steps=200)
ddpg_noisy.save_weights(os.path.join(LOG_DIR, 'ddpg_noisy_{}_weights.h5f'.format(ENV_NAME)), overwrite=True)
ddpg_noisy.test(env, nb_episodes=5, visualize=False, verbose=2, nb_max_episode_steps=200)
pandas.DataFrame(history_noisy.history).to_csv(os.path.join(LOG_DIR, "noisy.csv"))
elif REWARD == "surrogate":
processor_surrogate = PendulumSurrogateProcessor(weight=WEIGHT, surrogate=True, noise_type=NOISE_TYPE)
ddpg_surrogate = DDPGAgent(nb_actions=nb_actions, actor=actor, critic=critic, critic_action_input=action_input,
memory=memory, nb_steps_warmup_critic=100, nb_steps_warmup_actor=100,
random_process=random_process, gamma=.99, target_model_update=1e-3,
processor=processor_surrogate)
ddpg_surrogate.compile(Adam(lr=.0005, clipnorm=1.), metrics=['mae'])
history_surrogate = ddpg_surrogate.fit(env, nb_steps=150000, visualize=False, verbose=2, nb_max_episode_steps=200)
ddpg_surrogate.save_weights(os.path.join(LOG_DIR, 'ddpg_surrogate_{}_weights.h5f'.format(ENV_NAME)), overwrite=True)
ddpg_surrogate.test(env, nb_episodes=5, visualize=False, verbose=2, nb_max_episode_steps=200)
pandas.DataFrame(history_surrogate.history).to_csv(os.path.join(LOG_DIR, "surrogate.csv"))
else:
raise NotImplementedError
if __name__ == "__main__":
train()
| 6,589 | 44.763889 | 124 |
py
|
rl-perturbed-reward
|
rl-perturbed-reward-master/gym-control/noise_estimator.py
|
import collections
import pandas
import numpy as np
from rl.core import Processor
def build_state(features):
return int("".join(map(lambda feature: str(int(feature)), features)))
def to_bin(value, bins):
return np.digitize(x=[value], bins=bins)[0]
def is_invertible(a):
return a.shape[0] == a.shape[1] and np.linalg.matrix_rank(a) == a.shape[0]
def initialize_cmat(noise_type, M, weight):
cmat = None
flag = True
cnt = 0
while flag:
if noise_type == "norm_all":
init_norm = np.random.rand(M, M) # reward: 0 ~ -16
cmat = init_norm / init_norm.sum(axis=1, keepdims=1) * weight + \
(1 - weight) * np.identity(M)
elif noise_type == "norm_one":
i_mat = np.identity(M)
map(np.random.shuffle, i_mat)
cmat = i_mat * weight + (1 - weight) * np.identity(M)
elif noise_type == "anti_iden":
# if weight == 0.5: raise ValueError
cmat = np.identity(M)[::-1] * weight + \
(1 - weight) * np.identity(M)
if weight == 0.5: break
else:
# if weight == 0.5: raise ValueError
i1_mat = np.zeros((M, M)); i1_mat[0:M/2, -1] = 1; i1_mat[M/2:, 0] = 1
i2_mat = np.zeros((M, M)); i2_mat[0:int(np.ceil(M/2.0)), -1] = 1; i2_mat[int(np.ceil(M/2.0)):, 0] = 1
i_mat = (i1_mat + i2_mat) / 2.0
cmat = i_mat * weight + (1 - weight) * np.identity(M)
if weight == 0.5: break
if is_invertible(cmat):
flag = False
cnt += 1
return cmat, cnt
class CartpoleProcessor(Processor):
"""
Learning from perturbed rewards -- CartPole
step 1 - Estimate the confusion matrices (2 x 2)
step 2 - Calculate the surrogate rewards
"""
def __init__(self, e_=0.1, e=0.3, smooth=False, surrogate=False, epsilon=1e-6):
assert (np.abs(e_ + e - 1) > epsilon)
self.smooth = smooth
self.surrogate = surrogate
self.r_smooth = {}
self.r_sets = {}
self.e_ = e_
self.e = e
self.r1 = -1
self.r2 = 1
self.counter = 0
self.C = np.identity(2)
self.epsilon = epsilon
if self.e > 0.5:
self.reverse = True
else: self.reverse = False
def noisy_reward(self, reward):
# perturb the true reward
n = np.random.random()
if np.abs(reward - self.r1) < self.epsilon:
if (n < self.e_):
return self.r2
else:
if (n < self.e):
return self.r1
return reward
def smooth_reward(self, state, action, reward):
# variance reduction technique (VRT)
if (state, action) in self.r_smooth:
if len(self.r_smooth[(state, action)]) >= 100:
self.r_smooth[(state, action)].pop(0)
self.r_smooth[(state, action)].append(reward)
return sum(self.r_smooth[(state, action)]) / float(len(self.r_smooth[(state, action)]))
else:
self.r_smooth[(state, action)].append(reward)
else:
self.r_smooth[(state, action)] = [reward]
return reward
def process_reward(self, reward):
# calculate the surrogate reward
if not self.surrogate:
return reward
self.estimate_C()
self.est_e_ = self.C[0, 1]
self.est_e = self.C[1, 0]
if np.abs(reward - self.r1) < self.epsilon:
r_surrogate = ((1 - self.est_e) * self.r1 - self.est_e_ * self.r2) / (1 - self.est_e_ - self.est_e)
else:
r_surrogate = ((1 - self.est_e_) * self.r2 - self.est_e * self.r1) / (1 - self.est_e_ - self.est_e)
return r_surrogate
def estimate_C(self):
# estimate the confusion matrix via majority voting
if self.counter >= 100 and self.counter % 50 == 0:
e_ = 0; e = 0
self.count1 = 0
self.count2 = 0
for k in self.r_sets.keys():
freq_count = collections.Counter(self.r_sets[k])
if self.reverse:
truth, count = freq_count.most_common()[-1]
else:
truth, count = freq_count.most_common()[0]
if truth == self.r1:
self.count1 += len(self.r_sets[k])
else:
self.count2 += len(self.r_sets[k])
# print (self.count1, self.count2)
for k in self.r_sets.keys():
freq_count = collections.Counter(self.r_sets[k])
if self.reverse:
truth, count = freq_count.most_common()[-1]
else:
truth, count = freq_count.most_common()[0]
prob_correct = float(count) / len(self.r_sets[k])
# print (prob_correct)
if truth == self.r1:
if self.count1 > 800:
prob_k = float(len(self.r_sets[k])) / self.count1
e_ += prob_k * (1 - prob_correct)
else: e_ = 0.0
else:
prob_k = float(len(self.r_sets[k])) / self.count2
e += prob_k * (1 - prob_correct)
self.C = np.array([[1-e_, e_], [e, 1-e]])
def collect(self, state, action, reward):
if (state, action) in self.r_sets:
self.r_sets[(state, action)].append(reward)
else:
self.r_sets[(state, action)] = [reward]
self.counter += 1
def process_action(self, action):
self.action = action
return action
def process_step(self, observation, reward, done, info):
n_bins = 8
n_bins_angle = 10
# Number of states is huge so in order to simplify the situation
# we discretize the space to: 10 ** number_of_features
cart_position_bins = pandas.cut([-2.4, 2.4], bins=n_bins, retbins=True)[1][1:-1]
pole_angle_bins = pandas.cut([-2, 2], bins=n_bins_angle, retbins=True)[1][1:-1]
cart_velocity_bins = pandas.cut([-1, 1], bins=n_bins, retbins=True)[1][1:-1]
angle_rate_bins = pandas.cut([-3.5, 3.5], bins=n_bins_angle, retbins=True)[1][1:-1]
cart_position, pole_angle, cart_velocity, angle_rate_of_change = observation
state = build_state([to_bin(cart_position, cart_position_bins),
to_bin(pole_angle, pole_angle_bins),
to_bin(cart_velocity, cart_velocity_bins),
to_bin(angle_rate_of_change, angle_rate_bins)])
reward = self.noisy_reward(reward)
self.collect(state, self.action, reward)
reward = self.process_reward(reward)
if self.smooth:
reward = self.smooth_reward(state, self.action, reward)
return observation, reward, done, info
class CartpoleSurrogateProcessor(Processor):
"""
Learning from perturbed reward (confusion matrix is known) -- CartPole
- calculate the surrogate reward directly
"""
def __init__(self, e_=0.0, e=0.2, surrogate=False, epsilon=1e-6):
assert (e_ + e < 1.0)
self.e_ = e_
self.e = e
self.surrogate = surrogate
self.epsilon = 1e-6
def noisy_reward(self, reward):
n = np.random.random()
if np.abs(reward - 1.0) < self.epsilon:
if (n < self.e):
return -1 * reward
else:
if (n < self.e_):
return -1 * reward
return reward
def process_reward(self, reward):
r = self.noisy_reward(reward)
if not self.surrogate:
return r
if np.abs(r - 1.0) < self.epsilon:
r_surrogate = ((1 - self.e_) * r + self.e * r) / (1 - self.e_ - self.e)
else:
r_surrogate = ((1 - self.e) * r + self.e_ * r) / (1 - self.e_ - self.e)
return r_surrogate
class PendulumProcessor(Processor):
"""
Learning from perturbed rewards -- Pendulum
step 1 - Estimate the confusion matrices (17 x 17)
step 2 - Calculate the surrogate rewards
"""
def __init__(self, weight=0.2, surrogate=False, noise_type="norm_one", epsilon=1e-6):
self.r_sets = {}
self.weight = weight
self.surrogate = surrogate
self.M = 17
self.cmat, _ = initialize_cmat(noise_type, self.M, self.weight)
# assert (is_invertible(self.cmat))
self.cummat = np.cumsum(self.cmat, axis=1)
self.mmat = np.expand_dims(np.asarray(range(0, -1 * self.M, -1)), axis=1)
self.r_sum = 0
self.r_counter = 0
self.counter = 0
self.C = np.identity(self.M)
self.epsilon = epsilon
if self.weight > 0.5:
self.reverse = True
else: self.reverse = False
self.valid = False
def noisy_reward(self, reward):
prob_list = list(self.cummat[abs(reward), :])
n = np.random.random()
prob_list.append(n)
j = sorted(prob_list).index(n)
reward = -1 * j
return reward
def process_reward(self, reward):
if not self.surrogate:
return reward
self.estimate_C()
if self.valid:
return self.phi[int(-reward), 0]
else: return reward
def estimate_C(self):
if self.counter >= 1000 and self.counter % 100 == 0:
self.C = np.zeros((self.M, self.M))
self.count = [0] * self.M
for k in self.r_sets.keys():
freq_count = collections.Counter(self.r_sets[k])
if self.reverse:
truth, count = freq_count.most_common()[-1]
else:
truth, count = freq_count.most_common()[0]
self.count[int(-truth)] += len(self.r_sets[k])
print (self.count)
for k in self.r_sets.keys():
freq_count = collections.Counter(self.r_sets[k])
list_freq = freq_count.most_common()
if self.reverse:
list_freq = sorted(list_freq, reverse=True)
truth, count = list_freq[0]
# if self.first_time[int(-truth)]:
# self.C[int(-truth), int(-truth)] = 0
# self.first_time[int(-truth)] = False
# print (prob_correct)
for pred, count in list_freq:
self.C[int(-truth), int(-pred)] += float(count) / self.count[int(-truth)]
diag = np.diag(self.C)
anti_diag = np.diag(np.fliplr(self.C))
log_string("diag: " + np.array2string(diag, formatter={'float_kind':lambda x: "%.5f" % x}))
log_string("anti_diag:" + np.array2string(anti_diag, formatter={'float_kind':lambda x: "%.5f" % x}))
log_string("sum: " + np.array2string(np.sum(self.C, axis=1), formatter={'float_kind':lambda x: "%.2f" % x}))
if is_invertible(self.C):
self.phi = np.linalg.inv(self.C).dot(self.mmat)
self.valid = True
else: self.valid = False
def collect(self, state, action, reward):
if self.r_sets.has_key((state, action)):
self.r_sets[(state, action)].append(reward)
else:
self.r_sets[(state, action)] = [reward]
self.counter += 1
def process_action(self, action):
# print ("action before:", action)
n_bins = 20
action_bins = pandas.cut([-1.0, 1.0], bins=n_bins, retbins=True)[1][1:-1]
self.action = build_state([to_bin(action, action_bins)])
# print ("action after:", self.action)
return action
def process_step(self, observation, reward, done, info):
n_bins = 20
n_bins_dot = 20
# Number of states is huge so in order to simplify the situation
# we discretize the space to: 10 ** number_of_features
cos_theta_bins = pandas.cut([-1.0, 1.0], bins=n_bins, retbins=True)[1][1:-1]
sin_theta_bins = pandas.cut([-1.0, 1.0], bins=n_bins, retbins=True)[1][1:-1]
theta_dot_bins = pandas.cut([-8.0, 8.0], bins=n_bins_dot, retbins=True)[1][1:-1]
cos_theta, sin_theta, theta_dot = observation
state = build_state([to_bin(cos_theta, cos_theta_bins),
to_bin(sin_theta, sin_theta_bins),
to_bin(theta_dot, theta_dot_bins)])
self.r_sum += reward
self.r_counter += 1
if self.r_counter == 200:
# log_string(str(self.r_sum / float(self.r_counter)))
self.r_counter = 0
self.r_sum = 0
reward = int(np.ceil(reward))
reward = self.noisy_reward(reward)
self.collect(state, self.action, reward)
reward = self.process_reward(reward)
return observation, reward, done, info
class PendulumSurrogateProcessor(Processor):
"""
Learning from perturbed reward (confusion matrix is known) -- Pendulum
- calculate the surrogate reward directly
"""
def __init__(self, weight=0.6, surrogate=False, noise_type="norm_all"):
M = 17
self.weight = weight
self.surrogate = surrogate
self.cmat, _ = initialize_cmat(noise_type, M, self.weight)
# assert (is_invertible(self.cmat))
self.cummat = np.cumsum(self.cmat, axis=1)
# print self.cummat
self.mmat = np.expand_dims(np.asarray(range(0, -1* M, -1)), axis=1)
print (self.cmat.T.shape, self.mmat.shape)
self.phi = np.linalg.inv(self.cmat).dot(self.mmat)
print (self.phi.shape)
self.r_sum = 0
self.r_counter = 0
def noisy_reward(self, reward):
prob_list = list(self.cummat[abs(reward), :])
n = np.random.random()
prob_list.append(n)
j = sorted(prob_list).index(n)
reward = -1 * j
return reward
def process_reward(self, reward):
self.r_sum += reward
self.r_counter += 1
if self.r_counter == 200:
log_string(str(self.r_sum / float(self.r_counter)))
self.r_counter = 0
self.r_sum = 0
reward = int(np.ceil(reward))
r = self.noisy_reward(reward)
if self.surrogate:
return self.phi[int(-r), 0] / 100.0
return r / 100.0
class NAFPendulumProcessor(Processor):
def process_reward(self, reward):
# The magnitude of the reward can be important. Since each step yields a relatively
# high reward, we reduce the magnitude by two orders.
return reward / 100.
| 14,761 | 33.816038 | 120 |
py
|
rl-perturbed-reward
|
rl-perturbed-reward-master/gym-control/cem_cartpole.py
|
import argparse
import collections
import pandas
import numpy as np
import os
import gym
from keras.models import Sequential
from keras.layers import Dense, Activation, Flatten
import tensorflow as tf
from rl.agents.cem import CEMAgent
from rl.memory import EpisodeParameterMemory
from noise_estimator import CartpoleProcessor, CartpoleSurrogateProcessor
from utils import *
parser = argparse.ArgumentParser()
parser.add_argument('--error_positive', type=float, default=0.2,
help='Error positive rate [default: 0.2]')
parser.add_argument('--error_negative', type=float, default=0.0,
help='Error negative rate [default: 0.0]')
parser.add_argument('--log_dir', default='logs',
help='Log dir [default: logs]')
parser.add_argument('--reward', default='normal',
help='reward choice: normal/noisy/surrogate [default: normal]')
parser.add_argument('--smooth', type=str2bool, default=False,
help='Add smoothing to rewards [default: False]')
FLAGS = parser.parse_args()
ERR_P = FLAGS.error_positive
ERR_N = FLAGS.error_negative
REWARD = FLAGS.reward
SMOOTH = FLAGS.smooth
if REWARD == "normal":
LOG_DIR = os.path.join(FLAGS.log_dir, "cem_cartpole")
else:
LOG_DIR = os.path.join(os.path.join(FLAGS.log_dir, "cem_cartpole"), str(ERR_P))
ENV_NAME = 'CartPole-v0'
if not os.path.exists(LOG_DIR):
os.makedirs(LOG_DIR)
os.system('cp cem_cartpole.py %s' % (LOG_DIR)) # bkp of train procedure
LOG_FOUT = open(os.path.join(LOG_DIR, 'setting.txt'), 'w')
LOG_FOUT.write(str(FLAGS)+'\n')
def train():
# Get the environment and extract the number of actions.
env = gym.make(ENV_NAME)
np.random.seed(123)
env.seed(123)
nb_actions = env.action_space.n
obs_dim = env.observation_space.shape[0]
config = tf.ConfigProto()
config.gpu_options.allow_growth = True
sess = tf.Session(config=config)
from keras import backend as K
K.set_session(sess)
# Option 1 : Simple model
# model = Sequential()
# model.add(Flatten(input_shape=(1,) + env.observation_space.shape))
# model.add(Dense(nb_actions))
# model.add(Activation('softmax'))
# Option 2: deep network
model = Sequential()
model.add(Flatten(input_shape=(1,) + env.observation_space.shape))
model.add(Dense(16))
model.add(Activation('relu'))
model.add(Dense(16))
model.add(Activation('relu'))
model.add(Dense(16))
model.add(Activation('relu'))
model.add(Dense(nb_actions))
model.add(Activation('softmax'))
model.summary()
# Finally, we configure and compile our agent. You can use every built-in Keras optimizer and
# even the metrics!
memory = EpisodeParameterMemory(limit=1000, window_length=1)
if REWARD == "normal":
cem = CEMAgent(model=model, nb_actions=nb_actions, memory=memory,
batch_size=50, nb_steps_warmup=2000, train_interval=50, elite_frac=0.05)
cem.compile()
history_normal = cem.fit(env, nb_steps=100000, visualize=False, verbose=2)
cem.save_weights(os.path.join(LOG_DIR, 'cem_normal_{}_params.h5f'.format(ENV_NAME)), overwrite=True)
cem.test(env, nb_episodes=5, visualize=False)
pandas.DataFrame(history_normal.history).to_csv(os.path.join(LOG_DIR, "normal.csv"))
elif REWARD == "noisy":
if not SMOOTH:
processor_noisy = CartpoleProcessor(e_=ERR_N, e=ERR_P, smooth=False, surrogate=False)
else:
processor_noisy = CartpoleProcessor(e_=ERR_N, e=ERR_P, smooth=True, surrogate=False)
# processor_surrogate = CartpoleSurrogateProcessor(e_=ERR_N, e=ERR_P, surrogate=False)
cem = CEMAgent(model=model, nb_actions=nb_actions, memory=memory,
batch_size=50, nb_steps_warmup=2000, train_interval=50, elite_frac=0.05,
processor=processor_noisy)
cem.compile()
history_noisy = cem.fit(env, nb_steps=100000, visualize=False, verbose=2)
if not SMOOTH:
cem.save_weights(os.path.join(LOG_DIR, 'cem_noisy_{}_params.h5f'.format(ENV_NAME)), overwrite=True)
pandas.DataFrame(history_noisy.history).to_csv(os.path.join(LOG_DIR, "noisy.csv"))
else:
cem.save_weights(os.path.join(LOG_DIR, 'cem_noisy_smooth_{}_params.h5f'.format(ENV_NAME)), overwrite=True)
pandas.DataFrame(history_noisy.history).to_csv(os.path.join(LOG_DIR, "noisy_smooth.csv"))
cem.test(env, nb_episodes=5, visualize=False)
elif REWARD == "surrogate":
if not SMOOTH:
processor_surrogate = CartpoleProcessor(e_=ERR_N, e=ERR_P, smooth=False, surrogate=True)
else:
processor_surrogate = CartpoleProcessor(e_=ERR_N, e=ERR_P, smooth=True, surrogate=True)
# processor_surrogate = CartpoleSurrogateProcessor(e_=ERR_N, e=ERR_P, surrogate=True)
cem = CEMAgent(model=model, nb_actions=nb_actions, memory=memory,
batch_size=50, nb_steps_warmup=2000, train_interval=50, elite_frac=0.05,
processor=processor_surrogate)
cem.compile()
history_surrogate = cem.fit(env, nb_steps=100000, visualize=False, verbose=2)
if not SMOOTH:
cem.save_weights(os.path.join(LOG_DIR, 'cem_surrogate_{}_params.h5f'.format(ENV_NAME)), overwrite=True)
pandas.DataFrame(history_surrogate.history).to_csv(os.path.join(LOG_DIR, "surrogate.csv"))
else:
cem.save_weights(os.path.join(LOG_DIR, 'cem_surrogate_smooth_{}_params.h5f'.format(ENV_NAME)), overwrite=True)
pandas.DataFrame(history_surrogate.history).to_csv(os.path.join(LOG_DIR, "surrogate_smooth.csv"))
cem.test(env, nb_episodes=5, visualize=False)
else:
raise NotImplementedError
if __name__ == "__main__":
train()
| 5,860 | 39.42069 | 122 |
py
|
rl-perturbed-reward
|
rl-perturbed-reward-master/gym-control/plot.py
|
import argparse
import pandas
import os
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
sns.set()
sns.set_color_codes()
parser = argparse.ArgumentParser()
parser.add_argument('--log_dir', type=str, default="logs/dqn_cartpole",
help='The path of log directory [default: logs/dqn_cartpole')
parser.add_argument('--all', type=bool, default=False,
help='Plot all the curves (diff errs) [default: False]')
parser.add_argument('--weight', type=float, default=0.2,
help='Weight of noise [default: 0.2]')
FLAGS = parser.parse_args()
LOG_DIR = FLAGS.log_dir
WEIGHT = FLAGS.weight
def smooth(y, weight=0.6):
last = y[0]
smoothed = []
for point in y:
smoothed_val = last * weight + (1 - weight) * point
smoothed.append(smoothed_val)
last = smoothed_val
return smoothed
def plot_qlearn_cartpole_all():
history_normal = pandas.read_csv(os.path.join(LOG_DIR, "normal.csv"))['0']
plt.plot(smooth(list(history_normal)), linewidth=1.5, c=sns.color_palette()[0], label="normal")
plt.plot(list(history_normal), alpha=0.4, linewidth=0.8, c=sns.color_palette()[0])
cnt = 0
for err in [0.2, 0.4, 0.6, 0.8]:
history_noisy = pandas.read_csv(os.path.join(os.path.join(LOG_DIR, str(err)), "noisy.csv"))['0']
history_surrogate = pandas.read_csv(os.path.join(os.path.join(LOG_DIR, str(err)), "surrogate.csv"))['0']
plt.plot(smooth(list(history_noisy)), linewidth=1.5, c=sns.color_palette()[cnt+1], label="noisy (" + str(err) + ")")
plt.plot(list(history_noisy), alpha=0.4, linewidth=0.8, c=sns.color_palette()[cnt+1])
plt.plot(smooth(list(history_surrogate)), linewidth=1.5, c=sns.color_palette()[cnt+2], label="surrogate (" + str(err) + ")")
plt.plot(list(history_surrogate), alpha=0.4, linewidth=0.8, c=sns.color_palette()[cnt+2])
cnt += 2
plt.ylabel('steps per episode')
plt.xlabel('episode')
plt.title('CartPole-v0 (steps)')
plt.legend(loc='best')
plt.savefig(os.path.join(LOG_DIR, "CartPole-v0-reward-all (Q-Learning).png"))
def plot_qlearn_cartpole(weight=0.2):
history_normal = pandas.read_csv(os.path.join(LOG_DIR, "normal.csv"))['0']
history_noisy = pandas.read_csv(os.path.join(os.path.join(LOG_DIR, str(weight)), "noisy.csv"))['0']
history_surrogate = pandas.read_csv(os.path.join(os.path.join(LOG_DIR, str(weight)), "surrogate.csv"))['0']
plt.plot(smooth(list(history_normal)), linewidth=1.5, c=sns.color_palette()[0])
plt.plot(smooth(list(history_noisy)), linewidth=1.5, c=sns.color_palette()[1])
plt.plot(smooth(list(history_surrogate)), linewidth=1.5, c=sns.color_palette()[2])
plt.plot(list(history_normal), alpha=0.4, linewidth=0.8, c=sns.color_palette()[0])
plt.plot(list(history_noisy), alpha=0.4, linewidth=0.8, c=sns.color_palette()[1])
plt.plot(list(history_surrogate), alpha=0.4, linewidth=0.8, c=sns.color_palette()[2])
plt.ylabel('steps per episode')
plt.xlabel('episode')
plt.title('CartPole-v0 (steps-' + str(weight) + ")")
plt.legend(['normal', 'noisy', 'surrogate'], loc='best')
# plt.show()
plt.savefig(os.path.join(os.path.join(LOG_DIR, str(weight)), "CartPole-v0-steps-" + str(weight) + " (Q-Learning).png"))
def plot_dqn_cartpole_all():
history_normal = pandas.read_csv(os.path.join(LOG_DIR, "normal.csv"))
plt.plot(smooth(list(history_normal['nb_episode_steps'])), linewidth=1.5, c=sns.color_palette()[0], label="normal")
plt.plot(list(history_normal['nb_episode_steps']), alpha=0.4, linewidth=0.8, c=sns.color_palette()[0])
cnt = 0
for err in [0.2, 0.4, 0.5]:
history_noisy = pandas.read_csv(os.path.join(os.path.join(LOG_DIR, str(err)), "noisy.csv"))
history_surrogate = pandas.read_csv(os.path.join(os.path.join(LOG_DIR, str(err)), "surrogate.csv"))
plt.plot(smooth(list(history_noisy['nb_episode_steps'])), linewidth=1.5, c=sns.color_palette()[cnt+1], label="noisy (" + str(err) + ")")
plt.plot(list(history_noisy['nb_episode_steps']), alpha=0.4, linewidth=0.8, c=sns.color_palette()[cnt+1])
plt.plot(smooth(list(history_surrogate['nb_episode_steps'])), linewidth=1.5, c=sns.color_palette()[cnt+2], label="surrogate (" + str(err) + ")")
plt.plot(list(history_surrogate['nb_episode_steps']), alpha=0.4, linewidth=0.8, c=sns.color_palette()[cnt+2])
cnt += 2
plt.ylabel('steps per episode')
plt.xlabel('episode')
plt.title('CartPole-v0 (steps)')
plt.legend(loc='best')
plt.savefig(os.path.join(LOG_DIR, "CartPole-v0-reward-all (DQN).png"))
def plot_dqn_cartpole(weight=0.2):
history_normal = pandas.read_csv(os.path.join(LOG_DIR, "normal.csv"))
history_noisy = pandas.read_csv(os.path.join(os.path.join(LOG_DIR, str(weight)), "noisy.csv"))
history_surrogate = pandas.read_csv(os.path.join(os.path.join(LOG_DIR, str(weight)), "surrogate.csv"))
plt.plot(smooth(list(history_normal['nb_episode_steps'])), linewidth=1.5, c=sns.color_palette()[0])
plt.plot(smooth(list(history_noisy['nb_episode_steps'])), linewidth=1.5, c=sns.color_palette()[1])
plt.plot(smooth(list(history_surrogate['nb_episode_steps'])), linewidth=1.5, c=sns.color_palette()[2])
plt.plot(list(history_normal['nb_episode_steps']), alpha=0.4, linewidth=0.8, c=sns.color_palette()[0])
plt.plot(list(history_noisy['nb_episode_steps']), alpha=0.4, linewidth=0.8, c=sns.color_palette()[1])
plt.plot(list(history_surrogate['nb_episode_steps']), alpha=0.4, linewidth=0.8, c=sns.color_palette()[2])
plt.ylabel('steps per episode')
plt.xlabel('episode')
plt.title('CartPole-v0 (steps-' + str(weight) + ")")
plt.legend(['normal', 'noisy', 'surrogate'], loc='best')
# plt.show()
plt.savefig(os.path.join(os.path.join(LOG_DIR, str(weight)), "CartPole-v0-steps-" + str(weight) + " (DQN).png"))
plt.clf()
plt.plot(smooth(list(history_normal['episode_reward'])), linewidth=1.5, c=sns.color_palette()[0])
plt.plot(smooth(list(history_noisy['episode_reward'])), linewidth=1.5, c=sns.color_palette()[1])
plt.plot(smooth(list(history_surrogate['episode_reward'])), linewidth=1.5, c=sns.color_palette()[2])
plt.plot(list(history_normal['episode_reward']), alpha=0.4, linewidth=0.8, c=sns.color_palette()[0])
plt.plot(list(history_noisy['episode_reward']), alpha=0.4, linewidth=0.8, c=sns.color_palette()[1])
plt.plot(list(history_surrogate['episode_reward']), alpha=0.4, linewidth=0.8, c=sns.color_palette()[2])
plt.ylabel('reward per episode')
plt.xlabel('episode')
plt.title('CartPole-v0 (reward-' + str(weight) + ")")
plt.legend(['normal', 'noisy', 'surrogate'], loc='upper right')
# plt.show()
plt.savefig(os.path.join(os.path.join(LOG_DIR, str(weight)), "CartPole-v0-reward-" + str(weight) + " (DQN).png"))
def plot_sarsa_cartpole_all():
history_normal = pandas.read_csv(os.path.join(LOG_DIR, "normal.csv"))
plt.plot(smooth(list(history_normal['nb_episode_steps'])), linewidth=1.5, c=sns.color_palette()[0], label="normal")
plt.plot(list(history_normal['nb_episode_steps']), alpha=0.4, linewidth=0.8, c=sns.color_palette()[0])
cnt = 0
for err in [0.2, 0.4, 0.5]:
history_noisy = pandas.read_csv(os.path.join(os.path.join(LOG_DIR, str(err)), "noisy.csv"))
history_surrogate = pandas.read_csv(os.path.join(os.path.join(LOG_DIR, str(err)), "surrogate.csv"))
plt.plot(smooth(list(history_noisy['nb_episode_steps'])), linewidth=1.5, c=sns.color_palette()[cnt+1], label="noisy (" + str(err) + ")")
plt.plot(list(history_noisy['nb_episode_steps']), alpha=0.4, linewidth=0.8, c=sns.color_palette()[cnt+1])
plt.plot(smooth(list(history_surrogate['nb_episode_steps'])), linewidth=1.5, c=sns.color_palette()[cnt+2], label="surrogate (" + str(err) + ")")
plt.plot(list(history_surrogate['nb_episode_steps']), alpha=0.4, linewidth=0.8, c=sns.color_palette()[cnt+2])
cnt += 2
plt.ylabel('steps per episode')
plt.xlabel('episode')
plt.title('CartPole-v0 (steps)')
plt.legend(loc='best')
plt.savefig(os.path.join(LOG_DIR, "CartPole-v0-steps-all (SARSA).png"))
def plot_sarsa_cartpole(weight=0.2):
history_normal = pandas.read_csv(os.path.join(LOG_DIR, "normal.csv"))
history_noisy = pandas.read_csv(os.path.join(os.path.join(LOG_DIR, str(weight)), "noisy.csv"))
history_surrogate = pandas.read_csv(os.path.join(os.path.join(LOG_DIR, str(weight)), "surrogate.csv"))
plt.plot(smooth(list(history_normal['nb_episode_steps'])), linewidth=1.5, c=sns.color_palette()[0])
plt.plot(smooth(list(history_noisy['nb_episode_steps'])), linewidth=1.5, c=sns.color_palette()[1])
plt.plot(smooth(list(history_surrogate['nb_episode_steps'])), linewidth=1.5, c=sns.color_palette()[2])
plt.plot(list(history_normal['nb_episode_steps']), alpha=0.4, linewidth=0.8, c=sns.color_palette()[0])
plt.plot(list(history_noisy['nb_episode_steps']), alpha=0.4, linewidth=0.8, c=sns.color_palette()[1])
plt.plot(list(history_surrogate['nb_episode_steps']), alpha=0.4, linewidth=0.8, c=sns.color_palette()[2])
plt.ylabel('steps per episode')
plt.xlabel('episode')
plt.title('CartPole-v0 (steps-' + str(weight) + ")")
plt.legend(['normal', 'noisy', 'surrogate'], loc='best')
# plt.show()
plt.savefig(os.path.join(os.path.join(LOG_DIR, str(weight)), "CartPole-v0-steps-" + str(weight) + " (SARSA).png"))
plt.clf()
plt.plot(smooth(list(history_normal['episode_reward'])), linewidth=1.5, c=sns.color_palette()[0])
plt.plot(smooth(list(history_noisy['episode_reward'])), linewidth=1.5, c=sns.color_palette()[1])
plt.plot(smooth(list(history_surrogate['episode_reward'])), linewidth=1.5, c=sns.color_palette()[2])
plt.plot(list(history_normal['episode_reward']), alpha=0.4, linewidth=0.8, c=sns.color_palette()[0])
plt.plot(list(history_noisy['episode_reward']), alpha=0.4, linewidth=0.8, c=sns.color_palette()[1])
plt.plot(list(history_surrogate['episode_reward']), alpha=0.4, linewidth=0.8, c=sns.color_palette()[2])
plt.ylabel('reward per episode')
plt.xlabel('episode')
plt.title('CartPole-v0 (reward-' + str(weight) + ")")
plt.legend(['normal', 'noisy', 'surrogate'], loc='upper right')
# plt.show()
plt.savefig(os.path.join(os.path.join(LOG_DIR, str(weight)), "CartPole-v0-reward-" + str(weight) + " (SARSA).png"))
def plot_cem_cartpole_all():
history_normal = pandas.read_csv(os.path.join(LOG_DIR, "normal.csv"))
plt.plot(smooth(list(history_normal['nb_episode_steps'])), linewidth=1.5, c=sns.color_palette()[0], label="normal")
plt.plot(list(history_normal['nb_episode_steps']), alpha=0.4, linewidth=0.8, c=sns.color_palette()[0])
cnt = 0
for err in [0.2, 0.4, 0.5]:
history_noisy = pandas.read_csv(os.path.join(os.path.join(LOG_DIR, str(err)), "noisy.csv"))
history_surrogate = pandas.read_csv(os.path.join(os.path.join(LOG_DIR, str(err)), "surrogate.csv"))
plt.plot(smooth(list(history_noisy['nb_episode_steps'])), linewidth=1.5, c=sns.color_palette()[cnt+1], label="noisy (" + str(err) + ")")
plt.plot(list(history_noisy['nb_episode_steps']), alpha=0.4, linewidth=0.8, c=sns.color_palette()[cnt+1])
plt.plot(smooth(list(history_surrogate['nb_episode_steps'])), linewidth=1.5, c=sns.color_palette()[cnt+2], label="surrogate (" + str(err) + ")")
plt.plot(list(history_surrogate['nb_episode_steps']), alpha=0.4, linewidth=0.8, c=sns.color_palette()[cnt+2])
cnt += 2
plt.ylabel('steps per episode')
plt.xlabel('episode')
plt.title('CartPole-v0 (steps)')
plt.legend(loc='best')
plt.savefig(os.path.join(LOG_DIR, "CartPole-v0-reward-all (CEM).png"))
def plot_cem_cartpole(weight=0.2):
history_normal = pandas.read_csv(os.path.join(LOG_DIR, "normal.csv"))
history_noisy = pandas.read_csv(os.path.join(os.path.join(LOG_DIR, str(weight)), "noisy.csv"))
history_surrogate = pandas.read_csv(os.path.join(os.path.join(LOG_DIR, str(weight)), "surrogate.csv"))
plt.plot(smooth(list(history_normal['nb_episode_steps'])), linewidth=1.5, c=sns.color_palette()[0])
plt.plot(smooth(list(history_noisy['nb_episode_steps'])), linewidth=1.5, c=sns.color_palette()[1])
plt.plot(smooth(list(history_surrogate['nb_episode_steps'])), linewidth=1.5, c=sns.color_palette()[2])
plt.plot(list(history_normal['nb_episode_steps']), alpha=0.4, linewidth=0.8, c=sns.color_palette()[0])
plt.plot(list(history_noisy['nb_episode_steps']), alpha=0.4, linewidth=0.8, c=sns.color_palette()[1])
plt.plot(list(history_surrogate['nb_episode_steps']), alpha=0.4, linewidth=0.8, c=sns.color_palette()[2])
plt.ylabel('steps per episode')
plt.xlabel('episode')
plt.title('CartPole-v0 (steps-' + str(weight) + ")")
plt.legend(['normal', 'noisy', 'surrogate'], loc='best')
# plt.show()
plt.savefig(os.path.join(os.path.join(LOG_DIR, str(weight)), "CartPole-v0-steps-" + str(weight) + " (CEM).png"))
plt.clf()
plt.plot(smooth(list(history_normal['episode_reward'])), linewidth=1.5, c=sns.color_palette()[0])
plt.plot(smooth(list(history_noisy['episode_reward'])), linewidth=1.5, c=sns.color_palette()[1])
plt.plot(smooth(list(history_surrogate['episode_reward'])), linewidth=1.5, c=sns.color_palette()[2])
plt.plot(list(history_normal['episode_reward']), alpha=0.4, linewidth=0.8, c=sns.color_palette()[0])
plt.plot(list(history_noisy['episode_reward']), alpha=0.4, linewidth=0.8, c=sns.color_palette()[1])
plt.plot(list(history_surrogate['episode_reward']), alpha=0.4, linewidth=0.8, c=sns.color_palette()[2])
plt.ylabel('reward per episode')
plt.xlabel('episode')
plt.title('CartPole-v0 (reward-' + str(weight) + ")")
plt.legend(['normal', 'noisy', 'surrogate'], loc='upper right')
# plt.show()
plt.savefig(os.path.join(os.path.join(LOG_DIR, str(weight)), "CartPole-v0-reward-" + str(weight) + " (CEM).png"))
def plot_ddpg_pendulum_all():
history_normal = pandas.read_csv(os.path.join(LOG_DIR, "normal.csv"))
plt.plot(smooth(list(history_normal['episode_reward'] / 200.0)), linewidth=1.5, c=sns.color_palette()[0], label="normal")
plt.plot(list(history_normal['episode_reward'] / 200.0), alpha=0.4, linewidth=0.8, c=sns.color_palette()[0])
cnt = 0
for err in [0.2, 0.4, 0.5]:
reward_noisy = list(np.loadtxt(os.path.join(os.path.join(LOG_DIR, str(err)), "noisy_reward")))
reward_surrogate = list(np.loadtxt(os.path.join(os.path.join(LOG_DIR, str(err)), "surrogate_reward")))
plt.plot(smooth(reward_noisy), linewidth=1.5, c=sns.color_palette()[cnt+1], label="noisy (" + str(err) + ")")
plt.plot(reward_noisy, alpha=0.4, linewidth=0.8, c=sns.color_palette()[cnt+1])
plt.plot(smooth(reward_surrogate), linewidth=1.5, c=sns.color_palette()[cnt+2], label="surrogate (" + str(err) + ")")
plt.plot(reward_surrogate, alpha=0.4, linewidth=0.8, c=sns.color_palette()[cnt+2])
cnt += 2
plt.ylabel('reward per episode')
plt.xlabel('episode')
plt.title('Pendulum-v0 (reward)')
plt.legend(loc='best')
# plt.show()
plt.savefig(os.path.join(LOG_DIR, "Pendulum-v0-reward-all (DDPG).png"))
def plot_ddpg_pendulum(weight=0.2):
history_normal = pandas.read_csv(os.path.join(LOG_DIR, "normal.csv"))
plt.plot(smooth(list(history_normal['episode_reward'] / 200.0)), linewidth=1.5, c=sns.color_palette()[0], label="normal")
plt.plot(list(history_normal['episode_reward'] / 200.0), alpha=0.4, linewidth=0.8, c=sns.color_palette()[0])
reward_noisy = list(np.loadtxt(os.path.join(os.path.join(LOG_DIR, str(weight)), "noisy_reward")))
reward_surrogate = list(np.loadtxt(os.path.join(os.path.join(LOG_DIR, str(weight)), "surrogate_reward")))
plt.plot(smooth(reward_noisy), linewidth=1.5, c=sns.color_palette()[1], label="noisy")
plt.plot(reward_noisy, alpha=0.4, linewidth=0.8, c=sns.color_palette()[1])
plt.plot(smooth(reward_surrogate), linewidth=1.5, c=sns.color_palette()[2], label="surrogate")
plt.plot(reward_surrogate, alpha=0.4, linewidth=0.8, c=sns.color_palette()[2])
plt.ylabel('reward per episode')
plt.xlabel('episode')
plt.title('Pendulum-v0 (reward-' + str(weight) + ")")
plt.legend(loc='best')
# plt.show()
plt.savefig(os.path.join(os.path.join(LOG_DIR, str(weight)), "Pendulum-v0-reward-" + str(weight) + " (DDPG).png"))
def plot_naf_pendulum_all():
history_normal = pandas.read_csv(os.path.join(LOG_DIR, "normal.csv"))
plt.plot(smooth(list(history_normal['episode_reward'] / 2.0)), linewidth=1.5, c=sns.color_palette()[0], label="normal")
plt.plot(list(history_normal['episode_reward'] / 2.0), alpha=0.4, linewidth=0.8, c=sns.color_palette()[0])
cnt = 0
for err in [0.2, 0.4, 0.5]:
reward_noisy = list(np.loadtxt(os.path.join(os.path.join(LOG_DIR, str(err)), "noisy_reward")))
reward_surrogate = list(np.loadtxt(os.path.join(os.path.join(LOG_DIR, str(err)), "surrogate_reward")))
plt.plot(smooth(reward_noisy), linewidth=1.5, c=sns.color_palette()[cnt+1], label="noisy (" + str(err) + ")")
plt.plot(reward_noisy, alpha=0.4, linewidth=0.8, c=sns.color_palette()[cnt+1])
plt.plot(smooth(reward_surrogate), linewidth=1.5, c=sns.color_palette()[cnt+2], label="surrogate (" + str(err) + ")")
plt.plot(reward_surrogate, alpha=0.4, linewidth=0.8, c=sns.color_palette()[cnt+2])
cnt += 2
plt.ylabel('reward per episode')
plt.xlabel('episode')
plt.title('Pendulum-v0 (reward)')
plt.legend(loc='best')
# plt.show()
plt.savefig(os.path.join(LOG_DIR, "Pendulum-v0-reward-all (NAF).png"))
def plot_naf_pendulum(weight=0.2):
history_normal = pandas.read_csv(os.path.join(LOG_DIR, "normal.csv"))
plt.plot(smooth(list(history_normal['episode_reward'] / 2.0)), linewidth=1.5, c=sns.color_palette()[0], label="normal")
plt.plot(list(history_normal['episode_reward'] / 2.0), alpha=0.4, linewidth=0.8, c=sns.color_palette()[0])
reward_noisy = list(np.loadtxt(os.path.join(os.path.join(LOG_DIR, str(weight)), "noisy_reward")))
reward_surrogate = list(np.loadtxt(os.path.join(os.path.join(LOG_DIR, str(weight)), "surrogate_reward")))
plt.plot(smooth(reward_noisy), linewidth=1.5, c=sns.color_palette()[1], label="noisy")
plt.plot(reward_noisy, alpha=0.4, linewidth=0.8, c=sns.color_palette()[1])
plt.plot(smooth(reward_surrogate), linewidth=1.5, c=sns.color_palette()[2], label="surrogate")
plt.plot(reward_surrogate, alpha=0.4, linewidth=0.8, c=sns.color_palette()[2])
plt.ylabel('reward per episode')
plt.xlabel('episode')
plt.title('Pendulum-v0 (reward-' + str(weight) + ")")
plt.legend(loc='best')
# plt.show()
plt.savefig(os.path.join(os.path.join(LOG_DIR, str(weight)), "Pendulum-v0-reward-" + str(weight) + " (NAF).png"))
def plot():
if "qlearn" in LOG_DIR and "cartpole" in LOG_DIR:
plot_qlearn_cartpole(weight=WEIGHT)
elif "dqn" in LOG_DIR and "cartpole" in LOG_DIR:
plot_dqn_cartpole(weight=WEIGHT)
elif "sarsa" in LOG_DIR and "cartpole" in LOG_DIR:
plot_sarsa_cartpole(weight=WEIGHT)
elif "cem" in LOG_DIR and "cartpole" in LOG_DIR:
plot_cem_cartpole(weight=WEIGHT)
elif "ddpg" in LOG_DIR and "pendulum" in LOG_DIR:
plot_ddpg_pendulum(weight=WEIGHT)
elif "naf" in LOG_DIR and "pendulum" in LOG_DIR:
plot_naf_pendulum(weight=WEIGHT)
else:
raise NotImplementedError
def plot_all():
if "qlearn" in LOG_DIR and "cartpole" in LOG_DIR:
plot_qlearn_cartpole_all()
elif "dqn" in LOG_DIR and "cartpole" in LOG_DIR:
plot_dqn_cartpole_all()
elif "sarsa" in LOG_DIR and "cartpole" in LOG_DIR:
plot_sarsa_cartpole_all()
elif "cem" in LOG_DIR and "cartpole" in LOG_DIR:
plot_cem_cartpole_all()
elif "ddpg" in LOG_DIR and "pendulum" in LOG_DIR:
plot_ddpg_pendulum_all()
elif "naf" in LOG_DIR and "pendulum" in LOG_DIR:
plot_naf_pendulum_all()
else:
raise NotImplementedError
if __name__ == "__main__":
if FLAGS.all:
plot_all()
else:
plot()
| 20,248 | 53.727027 | 152 |
py
|
rl-perturbed-reward
|
rl-perturbed-reward-master/gym-control/rl/callbacks.py
|
from __future__ import division
from __future__ import print_function
import warnings
import timeit
import json
from tempfile import mkdtemp
import numpy as np
from keras import __version__ as KERAS_VERSION
from keras.callbacks import Callback as KerasCallback, CallbackList as KerasCallbackList
from keras.utils.generic_utils import Progbar
class Callback(KerasCallback):
def _set_env(self, env):
self.env = env
def on_episode_begin(self, episode, logs={}):
"""Called at beginning of each episode"""
pass
def on_episode_end(self, episode, logs={}):
"""Called at end of each episode"""
pass
def on_step_begin(self, step, logs={}):
"""Called at beginning of each step"""
pass
def on_step_end(self, step, logs={}):
"""Called at end of each step"""
pass
def on_action_begin(self, action, logs={}):
"""Called at beginning of each action"""
pass
def on_action_end(self, action, logs={}):
"""Called at end of each action"""
pass
class CallbackList(KerasCallbackList):
def _set_env(self, env):
""" Set environment for each callback in callbackList """
for callback in self.callbacks:
if callable(getattr(callback, '_set_env', None)):
callback._set_env(env)
def on_episode_begin(self, episode, logs={}):
""" Called at beginning of each episode for each callback in callbackList"""
for callback in self.callbacks:
# Check if callback supports the more appropriate `on_episode_begin` callback.
# If not, fall back to `on_epoch_begin` to be compatible with built-in Keras callbacks.
if callable(getattr(callback, 'on_episode_begin', None)):
callback.on_episode_begin(episode, logs=logs)
else:
callback.on_epoch_begin(episode, logs=logs)
def on_episode_end(self, episode, logs={}):
""" Called at end of each episode for each callback in callbackList"""
for callback in self.callbacks:
# Check if callback supports the more appropriate `on_episode_end` callback.
# If not, fall back to `on_epoch_end` to be compatible with built-in Keras callbacks.
if callable(getattr(callback, 'on_episode_end', None)):
callback.on_episode_end(episode, logs=logs)
else:
callback.on_epoch_end(episode, logs=logs)
def on_step_begin(self, step, logs={}):
""" Called at beginning of each step for each callback in callbackList"""
for callback in self.callbacks:
# Check if callback supports the more appropriate `on_step_begin` callback.
# If not, fall back to `on_batch_begin` to be compatible with built-in Keras callbacks.
if callable(getattr(callback, 'on_step_begin', None)):
callback.on_step_begin(step, logs=logs)
else:
callback.on_batch_begin(step, logs=logs)
def on_step_end(self, step, logs={}):
""" Called at end of each step for each callback in callbackList"""
for callback in self.callbacks:
# Check if callback supports the more appropriate `on_step_end` callback.
# If not, fall back to `on_batch_end` to be compatible with built-in Keras callbacks.
if callable(getattr(callback, 'on_step_end', None)):
callback.on_step_end(step, logs=logs)
else:
callback.on_batch_end(step, logs=logs)
def on_action_begin(self, action, logs={}):
""" Called at beginning of each action for each callback in callbackList"""
for callback in self.callbacks:
if callable(getattr(callback, 'on_action_begin', None)):
callback.on_action_begin(action, logs=logs)
def on_action_end(self, action, logs={}):
""" Called at end of each action for each callback in callbackList"""
for callback in self.callbacks:
if callable(getattr(callback, 'on_action_end', None)):
callback.on_action_end(action, logs=logs)
class TestLogger(Callback):
""" Logger Class for Test """
def on_train_begin(self, logs):
""" Print logs at beginning of training"""
print('Testing for {} episodes ...'.format(self.params['nb_episodes']))
def on_episode_end(self, episode, logs):
""" Print logs at end of each episode """
template = 'Episode {0}: reward: {1:.3f}, steps: {2}'
variables = [
episode + 1,
logs['episode_reward'],
logs['nb_steps'],
]
print(template.format(*variables))
class TrainEpisodeLogger(Callback):
def __init__(self):
# Some algorithms compute multiple episodes at once since they are multi-threaded.
# We therefore use a dictionary that is indexed by the episode to separate episodes
# from each other.
self.episode_start = {}
self.observations = {}
self.rewards = {}
self.actions = {}
self.metrics = {}
self.step = 0
def on_train_begin(self, logs):
""" Print training values at beginning of training """
self.train_start = timeit.default_timer()
self.metrics_names = self.model.metrics_names
print('Training for {} steps ...'.format(self.params['nb_steps']))
def on_train_end(self, logs):
""" Print training time at end of training """
duration = timeit.default_timer() - self.train_start
print('done, took {:.3f} seconds'.format(duration))
def on_episode_begin(self, episode, logs):
""" Reset environment variables at beginning of each episode """
self.episode_start[episode] = timeit.default_timer()
self.observations[episode] = []
self.rewards[episode] = []
self.actions[episode] = []
self.metrics[episode] = []
def on_episode_end(self, episode, logs):
""" Compute and print training statistics of the episode when done """
duration = timeit.default_timer() - self.episode_start[episode]
episode_steps = len(self.observations[episode])
# Format all metrics.
metrics = np.array(self.metrics[episode])
metrics_template = ''
metrics_variables = []
with warnings.catch_warnings():
warnings.filterwarnings('error')
for idx, name in enumerate(self.metrics_names):
if idx > 0:
metrics_template += ', '
try:
value = np.nanmean(metrics[:, idx])
metrics_template += '{}: {:f}'
except Warning:
value = '--'
metrics_template += '{}: {}'
metrics_variables += [name, value]
metrics_text = metrics_template.format(*metrics_variables)
nb_step_digits = str(int(np.ceil(np.log10(self.params['nb_steps']))) + 1)
template = '{step: ' + nb_step_digits + 'd}/{nb_steps}: episode: {episode}, duration: {duration:.3f}s, episode steps: {episode_steps}, steps per second: {sps:.0f}, episode reward: {episode_reward:.3f}, mean reward: {reward_mean:.3f} [{reward_min:.3f}, {reward_max:.3f}], mean action: {action_mean:.3f} [{action_min:.3f}, {action_max:.3f}], mean observation: {obs_mean:.3f} [{obs_min:.3f}, {obs_max:.3f}], {metrics}'
variables = {
'step': self.step,
'nb_steps': self.params['nb_steps'],
'episode': episode + 1,
'duration': duration,
'episode_steps': episode_steps,
'sps': float(episode_steps) / duration,
'episode_reward': np.sum(self.rewards[episode]),
'reward_mean': np.mean(self.rewards[episode]),
'reward_min': np.min(self.rewards[episode]),
'reward_max': np.max(self.rewards[episode]),
'action_mean': np.mean(self.actions[episode]),
'action_min': np.min(self.actions[episode]),
'action_max': np.max(self.actions[episode]),
'obs_mean': np.mean(self.observations[episode]),
'obs_min': np.min(self.observations[episode]),
'obs_max': np.max(self.observations[episode]),
'metrics': metrics_text,
}
print(template.format(**variables))
# Free up resources.
del self.episode_start[episode]
del self.observations[episode]
del self.rewards[episode]
del self.actions[episode]
del self.metrics[episode]
def on_step_end(self, step, logs):
""" Update statistics of episode after each step """
episode = logs['episode']
self.observations[episode].append(logs['observation'])
self.rewards[episode].append(logs['reward'])
self.actions[episode].append(logs['action'])
self.metrics[episode].append(logs['metrics'])
self.step += 1
class TrainIntervalLogger(Callback):
def __init__(self, interval=10000):
self.interval = interval
self.step = 0
self.reset()
def reset(self):
""" Reset statistics """
self.interval_start = timeit.default_timer()
self.progbar = Progbar(target=self.interval)
self.metrics = []
self.infos = []
self.info_names = None
self.episode_rewards = []
def on_train_begin(self, logs):
""" Initialize training statistics at beginning of training """
self.train_start = timeit.default_timer()
self.metrics_names = self.model.metrics_names
print('Training for {} steps ...'.format(self.params['nb_steps']))
def on_train_end(self, logs):
""" Print training duration at end of training """
duration = timeit.default_timer() - self.train_start
print('done, took {:.3f} seconds'.format(duration))
def on_step_begin(self, step, logs):
""" Print metrics if interval is over """
if self.step % self.interval == 0:
if len(self.episode_rewards) > 0:
metrics = np.array(self.metrics)
assert metrics.shape == (self.interval, len(self.metrics_names))
formatted_metrics = ''
if not np.isnan(metrics).all(): # not all values are means
means = np.nanmean(self.metrics, axis=0)
assert means.shape == (len(self.metrics_names),)
for name, mean in zip(self.metrics_names, means):
formatted_metrics += ' - {}: {:.3f}'.format(name, mean)
formatted_infos = ''
if len(self.infos) > 0:
infos = np.array(self.infos)
if not np.isnan(infos).all(): # not all values are means
means = np.nanmean(self.infos, axis=0)
assert means.shape == (len(self.info_names),)
for name, mean in zip(self.info_names, means):
formatted_infos += ' - {}: {:.3f}'.format(name, mean)
print('{} episodes - episode_reward: {:.3f} [{:.3f}, {:.3f}]{}{}'.format(len(self.episode_rewards), np.mean(self.episode_rewards), np.min(self.episode_rewards), np.max(self.episode_rewards), formatted_metrics, formatted_infos))
print('')
self.reset()
print('Interval {} ({} steps performed)'.format(self.step // self.interval + 1, self.step))
def on_step_end(self, step, logs):
""" Update progression bar at the end of each step """
if self.info_names is None:
self.info_names = logs['info'].keys()
values = [('reward', logs['reward'])]
if KERAS_VERSION > '2.1.3':
self.progbar.update((self.step % self.interval) + 1, values=values)
else:
self.progbar.update((self.step % self.interval) + 1, values=values, force=True)
self.step += 1
self.metrics.append(logs['metrics'])
if len(self.info_names) > 0:
self.infos.append([logs['info'][k] for k in self.info_names])
def on_episode_end(self, episode, logs):
""" Update reward value at the end of each episode """
self.episode_rewards.append(logs['episode_reward'])
class FileLogger(Callback):
def __init__(self, filepath, interval=None):
self.filepath = filepath
self.interval = interval
# Some algorithms compute multiple episodes at once since they are multi-threaded.
# We therefore use a dict that maps from episode to metrics array.
self.metrics = {}
self.starts = {}
self.data = {}
def on_train_begin(self, logs):
""" Initialize model metrics before training """
self.metrics_names = self.model.metrics_names
def on_train_end(self, logs):
""" Save model at the end of training """
self.save_data()
def on_episode_begin(self, episode, logs):
""" Initialize metrics at the beginning of each episode """
assert episode not in self.metrics
assert episode not in self.starts
self.metrics[episode] = []
self.starts[episode] = timeit.default_timer()
def on_episode_end(self, episode, logs):
""" Compute and print metrics at the end of each episode """
duration = timeit.default_timer() - self.starts[episode]
metrics = self.metrics[episode]
if np.isnan(metrics).all():
mean_metrics = np.array([np.nan for _ in self.metrics_names])
else:
mean_metrics = np.nanmean(metrics, axis=0)
assert len(mean_metrics) == len(self.metrics_names)
data = list(zip(self.metrics_names, mean_metrics))
data += list(logs.items())
data += [('episode', episode), ('duration', duration)]
for key, value in data:
if key not in self.data:
self.data[key] = []
self.data[key].append(value)
if self.interval is not None and episode % self.interval == 0:
self.save_data()
# Clean up.
del self.metrics[episode]
del self.starts[episode]
def on_step_end(self, step, logs):
""" Append metric at the end of each step """
self.metrics[logs['episode']].append(logs['metrics'])
def save_data(self):
""" Save metrics in a json file """
if len(self.data.keys()) == 0:
return
# Sort everything by episode.
assert 'episode' in self.data
sorted_indexes = np.argsort(self.data['episode'])
sorted_data = {}
for key, values in self.data.items():
assert len(self.data[key]) == len(sorted_indexes)
# We convert to np.array() and then to list to convert from np datatypes to native datatypes.
# This is necessary because json.dump cannot handle np.float32, for example.
sorted_data[key] = np.array([self.data[key][idx] for idx in sorted_indexes]).tolist()
# Overwrite already open file. We can simply seek to the beginning since the file will
# grow strictly monotonously.
with open(self.filepath, 'w') as f:
json.dump(sorted_data, f)
class Visualizer(Callback):
def on_action_end(self, action, logs):
""" Render environment at the end of each action """
self.env.render(mode='human')
class ModelIntervalCheckpoint(Callback):
def __init__(self, filepath, interval, verbose=0):
super(ModelIntervalCheckpoint, self).__init__()
self.filepath = filepath
self.interval = interval
self.verbose = verbose
self.total_steps = 0
def on_step_end(self, step, logs={}):
""" Save weights at interval steps during training """
self.total_steps += 1
if self.total_steps % self.interval != 0:
# Nothing to do.
return
filepath = self.filepath.format(step=self.total_steps, **logs)
if self.verbose > 0:
print('Step {}: saving model to {}'.format(self.total_steps, filepath))
self.model.save_weights(filepath, overwrite=True)
| 16,229 | 40.829897 | 423 |
py
|
rl-perturbed-reward
|
rl-perturbed-reward-master/gym-control/rl/core.py
|
# -*- coding: utf-8 -*-
import warnings
from copy import deepcopy
import numpy as np
from keras.callbacks import History
from rl.callbacks import (
CallbackList,
TestLogger,
TrainEpisodeLogger,
TrainIntervalLogger,
Visualizer
)
class Agent(object):
"""Abstract base class for all implemented agents.
Each agent interacts with the environment (as defined by the `Env` class) by first observing the
state of the environment. Based on this observation the agent changes the environment by performing
an action.
Do not use this abstract base class directly but instead use one of the concrete agents implemented.
Each agent realizes a reinforcement learning algorithm. Since all agents conform to the same
interface, you can use them interchangeably.
To implement your own agent, you have to implement the following methods:
- `forward`
- `backward`
- `compile`
- `load_weights`
- `save_weights`
- `layers`
# Arguments
processor (`Processor` instance): See [Processor](#processor) for details.
"""
def __init__(self, processor=None):
self.processor = processor
self.training = False
self.step = 0
def get_config(self):
"""Configuration of the agent for serialization.
# Returns
Dictionnary with agent configuration
"""
return {}
def fit(self, env, nb_steps, action_repetition=1, callbacks=None, verbose=1,
visualize=False, nb_max_start_steps=0, start_step_policy=None, log_interval=10000,
nb_max_episode_steps=None):
"""Trains the agent on the given environment.
# Arguments
env: (`Env` instance): Environment that the agent interacts with. See [Env](#env) for details.
nb_steps (integer): Number of training steps to be performed.
action_repetition (integer): Number of times the agent repeats the same action without
observing the environment again. Setting this to a value > 1 can be useful
if a single action only has a very small effect on the environment.
callbacks (list of `keras.callbacks.Callback` or `rl.callbacks.Callback` instances):
List of callbacks to apply during training. See [callbacks](/callbacks) for details.
verbose (integer): 0 for no logging, 1 for interval logging (compare `log_interval`), 2 for episode logging
visualize (boolean): If `True`, the environment is visualized during training. However,
this is likely going to slow down training significantly and is thus intended to be
a debugging instrument.
nb_max_start_steps (integer): Number of maximum steps that the agent performs at the beginning
of each episode using `start_step_policy`. Notice that this is an upper limit since
the exact number of steps to be performed is sampled uniformly from [0, max_start_steps]
at the beginning of each episode.
start_step_policy (`lambda observation: action`): The policy
to follow if `nb_max_start_steps` > 0. If set to `None`, a random action is performed.
log_interval (integer): If `verbose` = 1, the number of steps that are considered to be an interval.
nb_max_episode_steps (integer): Number of steps per episode that the agent performs before
automatically resetting the environment. Set to `None` if each episode should run
(potentially indefinitely) until the environment signals a terminal state.
# Returns
A `keras.callbacks.History` instance that recorded the entire training process.
"""
if not self.compiled:
raise RuntimeError('Your tried to fit your agent but it hasn\'t been compiled yet. Please call `compile()` before `fit()`.')
if action_repetition < 1:
raise ValueError('action_repetition must be >= 1, is {}'.format(action_repetition))
self.training = True
callbacks = [] if not callbacks else callbacks[:]
if verbose == 1:
callbacks += [TrainIntervalLogger(interval=log_interval)]
elif verbose > 1:
callbacks += [TrainEpisodeLogger()]
if visualize:
callbacks += [Visualizer()]
history = History()
callbacks += [history]
callbacks = CallbackList(callbacks)
if hasattr(callbacks, 'set_model'):
callbacks.set_model(self)
else:
callbacks._set_model(self)
callbacks._set_env(env)
params = {
'nb_steps': nb_steps,
}
if hasattr(callbacks, 'set_params'):
callbacks.set_params(params)
else:
callbacks._set_params(params)
self._on_train_begin()
callbacks.on_train_begin()
episode = np.int16(0)
self.step = np.int16(0)
observation = None
episode_reward = None
episode_step = None
did_abort = False
try:
while self.step < nb_steps:
if observation is None: # start of a new episode
callbacks.on_episode_begin(episode)
episode_step = np.int16(0)
episode_reward = np.float32(0)
# Obtain the initial observation by resetting the environment.
self.reset_states()
observation = deepcopy(env.reset())
if self.processor is not None:
observation = self.processor.process_observation(observation)
assert observation is not None
# Perform random starts at beginning of episode and do not record them into the experience.
# This slightly changes the start position between games.
nb_random_start_steps = 0 if nb_max_start_steps == 0 else np.random.randint(nb_max_start_steps)
for _ in range(nb_random_start_steps):
if start_step_policy is None:
action = env.action_space.sample()
else:
action = start_step_policy(observation)
if self.processor is not None:
action = self.processor.process_action(action)
callbacks.on_action_begin(action)
observation, reward, done, info = env.step(action)
observation = deepcopy(observation)
if self.processor is not None:
observation, reward, done, info = self.processor.process_step(observation, reward, done, info)
callbacks.on_action_end(action)
if done:
warnings.warn('Env ended before {} random steps could be performed at the start. You should probably lower the `nb_max_start_steps` parameter.'.format(nb_random_start_steps))
observation = deepcopy(env.reset())
if self.processor is not None:
observation = self.processor.process_observation(observation)
break
# At this point, we expect to be fully initialized.
assert episode_reward is not None
assert episode_step is not None
assert observation is not None
# Run a single step.
callbacks.on_step_begin(episode_step)
# This is were all of the work happens. We first perceive and compute the action
# (forward step) and then use the reward to improve (backward step).
action = self.forward(observation)
if self.processor is not None:
action = self.processor.process_action(action)
reward = np.float32(0)
accumulated_info = {}
done = False
for _ in range(action_repetition):
callbacks.on_action_begin(action)
observation, r, done, info = env.step(action)
# print (r, done)
observation = deepcopy(observation)
if self.processor is not None:
observation, r, done, info = self.processor.process_step(observation, r, done, info)
for key, value in info.items():
if not np.isreal(value):
continue
if key not in accumulated_info:
accumulated_info[key] = np.zeros_like(value)
accumulated_info[key] += value
callbacks.on_action_end(action)
reward += r
if done:
break
if nb_max_episode_steps and episode_step >= nb_max_episode_steps - 1:
# Force a terminal state.
done = True
metrics = self.backward(reward, terminal=done)
episode_reward += reward
step_logs = {
'action': action,
'observation': observation,
'reward': reward,
'metrics': metrics,
'episode': episode,
'info': accumulated_info,
}
callbacks.on_step_end(episode_step, step_logs)
episode_step += 1
self.step += 1
if done:
# We are in a terminal state but the agent hasn't yet seen it. We therefore
# perform one more forward-backward call and simply ignore the action before
# resetting the environment. We need to pass in `terminal=False` here since
# the *next* state, that is the state of the newly reset environment, is
# always non-terminal by convention.
self.forward(observation)
self.backward(0., terminal=False)
# This episode is finished, report and reset.
episode_logs = {
'episode_reward': episode_reward,
'nb_episode_steps': episode_step,
'nb_steps': self.step,
}
callbacks.on_episode_end(episode, episode_logs)
episode += 1
observation = None
episode_step = None
episode_reward = None
except KeyboardInterrupt:
# We catch keyboard interrupts here so that training can be be safely aborted.
# This is so common that we've built this right into this function, which ensures that
# the `on_train_end` method is properly called.
did_abort = True
callbacks.on_train_end(logs={'did_abort': did_abort})
self._on_train_end()
return history
def test(self, env, nb_episodes=1, action_repetition=1, callbacks=None, visualize=True,
nb_max_episode_steps=None, nb_max_start_steps=0, start_step_policy=None, verbose=1):
"""Callback that is called before training begins.
# Arguments
env: (`Env` instance): Environment that the agent interacts with. See [Env](#env) for details.
nb_episodes (integer): Number of episodes to perform.
action_repetition (integer): Number of times the agent repeats the same action without
observing the environment again. Setting this to a value > 1 can be useful
if a single action only has a very small effect on the environment.
callbacks (list of `keras.callbacks.Callback` or `rl.callbacks.Callback` instances):
List of callbacks to apply during training. See [callbacks](/callbacks) for details.
verbose (integer): 0 for no logging, 1 for interval logging (compare `log_interval`), 2 for episode logging
visualize (boolean): If `True`, the environment is visualized during training. However,
this is likely going to slow down training significantly and is thus intended to be
a debugging instrument.
nb_max_start_steps (integer): Number of maximum steps that the agent performs at the beginning
of each episode using `start_step_policy`. Notice that this is an upper limit since
the exact number of steps to be performed is sampled uniformly from [0, max_start_steps]
at the beginning of each episode.
start_step_policy (`lambda observation: action`): The policy
to follow if `nb_max_start_steps` > 0. If set to `None`, a random action is performed.
log_interval (integer): If `verbose` = 1, the number of steps that are considered to be an interval.
nb_max_episode_steps (integer): Number of steps per episode that the agent performs before
automatically resetting the environment. Set to `None` if each episode should run
(potentially indefinitely) until the environment signals a terminal state.
# Returns
A `keras.callbacks.History` instance that recorded the entire training process.
"""
if not self.compiled:
raise RuntimeError('Your tried to test your agent but it hasn\'t been compiled yet. Please call `compile()` before `test()`.')
if action_repetition < 1:
raise ValueError('action_repetition must be >= 1, is {}'.format(action_repetition))
self.training = False
self.step = 0
callbacks = [] if not callbacks else callbacks[:]
if verbose >= 1:
callbacks += [TestLogger()]
if visualize:
callbacks += [Visualizer()]
history = History()
callbacks += [history]
callbacks = CallbackList(callbacks)
if hasattr(callbacks, 'set_model'):
callbacks.set_model(self)
else:
callbacks._set_model(self)
callbacks._set_env(env)
params = {
'nb_episodes': nb_episodes,
}
if hasattr(callbacks, 'set_params'):
callbacks.set_params(params)
else:
callbacks._set_params(params)
self._on_test_begin()
callbacks.on_train_begin()
for episode in range(nb_episodes):
callbacks.on_episode_begin(episode)
episode_reward = 0.
episode_step = 0
# Obtain the initial observation by resetting the environment.
self.reset_states()
observation = deepcopy(env.reset())
if self.processor is not None:
observation = self.processor.process_observation(observation)
assert observation is not None
# Perform random starts at beginning of episode and do not record them into the experience.
# This slightly changes the start position between games.
nb_random_start_steps = 0 if nb_max_start_steps == 0 else np.random.randint(nb_max_start_steps)
for _ in range(nb_random_start_steps):
if start_step_policy is None:
action = env.action_space.sample()
else:
action = start_step_policy(observation)
if self.processor is not None:
action = self.processor.process_action(action)
callbacks.on_action_begin(action)
observation, r, done, info = env.step(action)
observation = deepcopy(observation)
if self.processor is not None:
observation, r, done, info = self.processor.process_step(observation, r, done, info)
callbacks.on_action_end(action)
if done:
warnings.warn('Env ended before {} random steps could be performed at the start. You should probably lower the `nb_max_start_steps` parameter.'.format(nb_random_start_steps))
observation = deepcopy(env.reset())
if self.processor is not None:
observation = self.processor.process_observation(observation)
break
# Run the episode until we're done.
done = False
while not done:
callbacks.on_step_begin(episode_step)
action = self.forward(observation)
if self.processor is not None:
action = self.processor.process_action(action)
reward = 0.
accumulated_info = {}
for _ in range(action_repetition):
callbacks.on_action_begin(action)
observation, r, d, info = env.step(action)
observation = deepcopy(observation)
if self.processor is not None:
observation, r, d, info = self.processor.process_step(observation, r, d, info)
callbacks.on_action_end(action)
reward += r
for key, value in info.items():
if not np.isreal(value):
continue
if key not in accumulated_info:
accumulated_info[key] = np.zeros_like(value)
accumulated_info[key] += value
if d:
done = True
break
if nb_max_episode_steps and episode_step >= nb_max_episode_steps - 1:
done = True
self.backward(reward, terminal=done)
episode_reward += reward
step_logs = {
'action': action,
'observation': observation,
'reward': reward,
'episode': episode,
'info': accumulated_info,
}
callbacks.on_step_end(episode_step, step_logs)
episode_step += 1
self.step += 1
# We are in a terminal state but the agent hasn't yet seen it. We therefore
# perform one more forward-backward call and simply ignore the action before
# resetting the environment. We need to pass in `terminal=False` here since
# the *next* state, that is the state of the newly reset environment, is
# always non-terminal by convention.
self.forward(observation)
self.backward(0., terminal=False)
# Report end of episode.
episode_logs = {
'episode_reward': episode_reward,
'nb_steps': episode_step,
}
callbacks.on_episode_end(episode, episode_logs)
callbacks.on_train_end()
self._on_test_end()
return history
def reset_states(self):
"""Resets all internally kept states after an episode is completed.
"""
pass
def forward(self, observation):
"""Takes the an observation from the environment and returns the action to be taken next.
If the policy is implemented by a neural network, this corresponds to a forward (inference) pass.
# Argument
observation (object): The current observation from the environment.
# Returns
The next action to be executed in the environment.
"""
raise NotImplementedError()
def backward(self, reward, terminal):
"""Updates the agent after having executed the action returned by `forward`.
If the policy is implemented by a neural network, this corresponds to a weight update using back-prop.
# Argument
reward (float): The observed reward after executing the action returned by `forward`.
terminal (boolean): `True` if the new state of the environment is terminal.
# Returns
List of metrics values
"""
raise NotImplementedError()
def compile(self, optimizer, metrics=[]):
"""Compiles an agent and the underlaying models to be used for training and testing.
# Arguments
optimizer (`keras.optimizers.Optimizer` instance): The optimizer to be used during training.
metrics (list of functions `lambda y_true, y_pred: metric`): The metrics to run during training.
"""
raise NotImplementedError()
def load_weights(self, filepath):
"""Loads the weights of an agent from an HDF5 file.
# Arguments
filepath (str): The path to the HDF5 file.
"""
raise NotImplementedError()
def save_weights(self, filepath, overwrite=False):
"""Saves the weights of an agent as an HDF5 file.
# Arguments
filepath (str): The path to where the weights should be saved.
overwrite (boolean): If `False` and `filepath` already exists, raises an error.
"""
raise NotImplementedError()
@property
def layers(self):
"""Returns all layers of the underlying model(s).
If the concrete implementation uses multiple internal models,
this method returns them in a concatenated list.
# Returns
A list of the model's layers
"""
raise NotImplementedError()
@property
def metrics_names(self):
"""The human-readable names of the agent's metrics. Must return as many names as there
are metrics (see also `compile`).
# Returns
A list of metric's names (string)
"""
return []
def _on_train_begin(self):
"""Callback that is called before training begins."
"""
pass
def _on_train_end(self):
"""Callback that is called after training ends."
"""
pass
def _on_test_begin(self):
"""Callback that is called before testing begins."
"""
pass
def _on_test_end(self):
"""Callback that is called after testing ends."
"""
pass
class Processor(object):
"""Abstract base class for implementing processors.
A processor acts as a coupling mechanism between an `Agent` and its `Env`. This can
be necessary if your agent has different requirements with respect to the form of the
observations, actions, and rewards of the environment. By implementing a custom processor,
you can effectively translate between the two without having to change the underlaying
implementation of the agent or environment.
Do not use this abstract base class directly but instead use one of the concrete implementations
or write your own.
"""
def process_step(self, observation, reward, done, info):
"""Processes an entire step by applying the processor to the observation, reward, and info arguments.
# Arguments
observation (object): An observation as obtained by the environment.
reward (float): A reward as obtained by the environment.
done (boolean): `True` if the environment is in a terminal state, `False` otherwise.
info (dict): The debug info dictionary as obtained by the environment.
# Returns
The tupel (observation, reward, done, reward) with with all elements after being processed.
"""
observation = self.process_observation(observation)
reward = self.process_reward(reward)
info = self.process_info(info)
return observation, reward, done, info
def process_observation(self, observation):
"""Processes the observation as obtained from the environment for use in an agent and
returns it.
# Arguments
observation (object): An observation as obtained by the environment
# Returns
Observation obtained by the environment processed
"""
return observation
def process_reward(self, reward):
"""Processes the reward as obtained from the environment for use in an agent and
returns it.
# Arguments
reward (float): A reward as obtained by the environment
# Returns
Reward obtained by the environment processed
"""
return reward
def process_info(self, info):
"""Processes the info as obtained from the environment for use in an agent and
returns it.
# Arguments
info (dict): An info as obtained by the environment
# Returns
Info obtained by the environment processed
"""
return info
def process_action(self, action):
"""Processes an action predicted by an agent but before execution in an environment.
# Arguments
action (int): Action given to the environment
# Returns
Processed action given to the environment
"""
return action
def process_state_batch(self, batch):
"""Processes an entire batch of states and returns it.
# Arguments
batch (list): List of states
# Returns
Processed list of states
"""
return batch
@property
def metrics(self):
"""The metrics of the processor, which will be reported during training.
# Returns
List of `lambda y_true, y_pred: metric` functions.
"""
return []
@property
def metrics_names(self):
"""The human-readable names of the agent's metrics. Must return as many names as there
are metrics (see also `compile`).
"""
return []
# Note: the API of the `Env` and `Space` classes are taken from the OpenAI Gym implementation.
# https://github.com/openai/gym/blob/master/gym/core.py
class Env(object):
"""The abstract environment class that is used by all agents. This class has the exact
same API that OpenAI Gym uses so that integrating with it is trivial. In contrast to the
OpenAI Gym implementation, this class only defines the abstract methods without any actual
implementation.
To implement your own environment, you need to define the following methods:
- `step`
- `reset`
- `render`
- `close`
Refer to the [Gym documentation](https://gym.openai.com/docs/#environments).
"""
reward_range = (-np.inf, np.inf)
action_space = None
observation_space = None
def step(self, action):
"""Run one timestep of the environment's dynamics.
Accepts an action and returns a tuple (observation, reward, done, info).
# Arguments
action (object): An action provided by the environment.
# Returns
observation (object): Agent's observation of the current environment.
reward (float) : Amount of reward returned after previous action.
done (boolean): Whether the episode has ended, in which case further step() calls will return undefined results.
info (dict): Contains auxiliary diagnostic information (helpful for debugging, and sometimes learning).
"""
raise NotImplementedError()
def reset(self):
"""
Resets the state of the environment and returns an initial observation.
# Returns
observation (object): The initial observation of the space. Initial reward is assumed to be 0.
"""
raise NotImplementedError()
def render(self, mode='human', close=False):
"""Renders the environment.
The set of supported modes varies per environment. (And some
environments do not support rendering at all.)
# Arguments
mode (str): The mode to render with.
close (bool): Close all open renderings.
"""
raise NotImplementedError()
def close(self):
"""Override in your subclass to perform any necessary cleanup.
Environments will automatically close() themselves when
garbage collected or when the program exits.
"""
raise NotImplementedError()
def seed(self, seed=None):
"""Sets the seed for this env's random number generator(s).
# Returns
Returns the list of seeds used in this env's random number generators
"""
raise NotImplementedError()
def configure(self, *args, **kwargs):
"""Provides runtime configuration to the environment.
This configuration should consist of data that tells your
environment how to run (such as an address of a remote server,
or path to your ImageNet data). It should not affect the
semantics of the environment.
"""
raise NotImplementedError()
def __del__(self):
self.close()
def __str__(self):
return '<{} instance>'.format(type(self).__name__)
class Space(object):
"""Abstract model for a space that is used for the state and action spaces. This class has the
exact same API that OpenAI Gym uses so that integrating with it is trivial.
Please refer to [Gym Documentation](https://gym.openai.com/docs/#spaces)
"""
def sample(self, seed=None):
"""Uniformly randomly sample a random element of this space.
"""
raise NotImplementedError()
def contains(self, x):
"""Return boolean specifying if x is a valid member of this space
"""
raise NotImplementedError()
| 29,790 | 41.018336 | 202 |
py
|
rl-perturbed-reward
|
rl-perturbed-reward-master/gym-control/rl/memory.py
|
from __future__ import absolute_import
from collections import deque, namedtuple
import warnings
import random
import numpy as np
# This is to be understood as a transition: Given `state0`, performing `action`
# yields `reward` and results in `state1`, which might be `terminal`.
Experience = namedtuple('Experience', 'state0, action, reward, state1, terminal1')
def sample_batch_indexes(low, high, size):
"""Return a sample of (size) unique elements between low and high
# Argument
low (int): The minimum value for our samples
high (int): The maximum value for our samples
size (int): The number of samples to pick
# Returns
A list of samples of length size, with values between low and high
"""
if high - low >= size:
# We have enough data. Draw without replacement, that is each index is unique in the
# batch. We cannot use `np.random.choice` here because it is horribly inefficient as
# the memory grows. See https://github.com/numpy/numpy/issues/2764 for a discussion.
# `random.sample` does the same thing (drawing without replacement) and is way faster.
try:
r = xrange(low, high)
except NameError:
r = range(low, high)
batch_idxs = random.sample(r, size)
else:
# Not enough data. Help ourselves with sampling from the range, but the same index
# can occur multiple times. This is not good and should be avoided by picking a
# large enough warm-up phase.
warnings.warn('Not enough entries to sample without replacement. Consider increasing your warm-up phase to avoid oversampling!')
batch_idxs = np.random.random_integers(low, high - 1, size=size)
assert len(batch_idxs) == size
return batch_idxs
class RingBuffer(object):
def __init__(self, maxlen):
self.maxlen = maxlen
self.start = 0
self.length = 0
self.data = [None for _ in range(maxlen)]
def __len__(self):
return self.length
def __getitem__(self, idx):
"""Return element of buffer at specific index
# Argument
idx (int): Index wanted
# Returns
The element of buffer at given index
"""
if idx < 0 or idx >= self.length:
raise KeyError()
return self.data[(self.start + idx) % self.maxlen]
def append(self, v):
"""Append an element to the buffer
# Argument
v (object): Element to append
"""
if self.length < self.maxlen:
# We have space, simply increase the length.
self.length += 1
elif self.length == self.maxlen:
# No space, "remove" the first item.
self.start = (self.start + 1) % self.maxlen
else:
# This should never happen.
raise RuntimeError()
self.data[(self.start + self.length - 1) % self.maxlen] = v
def zeroed_observation(observation):
"""Return an array of zeros with same shape as given observation
# Argument
observation (list): List of observation
# Return
A np.ndarray of zeros with observation.shape
"""
if hasattr(observation, 'shape'):
return np.zeros(observation.shape)
elif hasattr(observation, '__iter__'):
out = []
for x in observation:
out.append(zeroed_observation(x))
return out
else:
return 0.
class Memory(object):
def __init__(self, window_length, ignore_episode_boundaries=False):
self.window_length = window_length
self.ignore_episode_boundaries = ignore_episode_boundaries
self.recent_observations = deque(maxlen=window_length)
self.recent_terminals = deque(maxlen=window_length)
def sample(self, batch_size, batch_idxs=None):
raise NotImplementedError()
def append(self, observation, action, reward, terminal, training=True):
self.recent_observations.append(observation)
self.recent_terminals.append(terminal)
def get_recent_state(self, current_observation):
"""Return list of last observations
# Argument
current_observation (object): Last observation
# Returns
A list of the last observations
"""
# This code is slightly complicated by the fact that subsequent observations might be
# from different episodes. We ensure that an experience never spans multiple episodes.
# This is probably not that important in practice but it seems cleaner.
state = [current_observation]
idx = len(self.recent_observations) - 1
for offset in range(0, self.window_length - 1):
current_idx = idx - offset
current_terminal = self.recent_terminals[current_idx - 1] if current_idx - 1 >= 0 else False
if current_idx < 0 or (not self.ignore_episode_boundaries and current_terminal):
# The previously handled observation was terminal, don't add the current one.
# Otherwise we would leak into a different episode.
break
state.insert(0, self.recent_observations[current_idx])
while len(state) < self.window_length:
state.insert(0, zeroed_observation(state[0]))
return state
def get_config(self):
"""Return configuration (window_length, ignore_episode_boundaries) for Memory
# Return
A dict with keys window_length and ignore_episode_boundaries
"""
config = {
'window_length': self.window_length,
'ignore_episode_boundaries': self.ignore_episode_boundaries,
}
return config
class SequentialMemory(Memory):
def __init__(self, limit, **kwargs):
super(SequentialMemory, self).__init__(**kwargs)
self.limit = limit
# Do not use deque to implement the memory. This data structure may seem convenient but
# it is way too slow on random access. Instead, we use our own ring buffer implementation.
self.actions = RingBuffer(limit)
self.rewards = RingBuffer(limit)
self.terminals = RingBuffer(limit)
self.observations = RingBuffer(limit)
def sample(self, batch_size, batch_idxs=None):
"""Return a randomized batch of experiences
# Argument
batch_size (int): Size of the all batch
batch_idxs (int): Indexes to extract
# Returns
A list of experiences randomly selected
"""
# It is not possible to tell whether the first state in the memory is terminal, because it
# would require access to the "terminal" flag associated to the previous state. As a result
# we will never return this first state (only using `self.terminals[0]` to know whether the
# second state is terminal).
# In addition we need enough entries to fill the desired window length.
assert self.nb_entries >= self.window_length + 2, 'not enough entries in the memory'
if batch_idxs is None:
# Draw random indexes such that we have enough entries before each index to fill the
# desired window length.
batch_idxs = sample_batch_indexes(
self.window_length, self.nb_entries - 1, size=batch_size)
batch_idxs = np.array(batch_idxs) + 1
assert np.min(batch_idxs) >= self.window_length + 1
assert np.max(batch_idxs) < self.nb_entries
assert len(batch_idxs) == batch_size
# Create experiences
experiences = []
for idx in batch_idxs:
terminal0 = self.terminals[idx - 2]
while terminal0:
# Skip this transition because the environment was reset here. Select a new, random
# transition and use this instead. This may cause the batch to contain the same
# transition twice.
idx = sample_batch_indexes(self.window_length + 1, self.nb_entries, size=1)[0]
terminal0 = self.terminals[idx - 2]
assert self.window_length + 1 <= idx < self.nb_entries
# This code is slightly complicated by the fact that subsequent observations might be
# from different episodes. We ensure that an experience never spans multiple episodes.
# This is probably not that important in practice but it seems cleaner.
state0 = [self.observations[idx - 1]]
for offset in range(0, self.window_length - 1):
current_idx = idx - 2 - offset
assert current_idx >= 1
current_terminal = self.terminals[current_idx - 1]
if current_terminal and not self.ignore_episode_boundaries:
# The previously handled observation was terminal, don't add the current one.
# Otherwise we would leak into a different episode.
break
state0.insert(0, self.observations[current_idx])
while len(state0) < self.window_length:
state0.insert(0, zeroed_observation(state0[0]))
action = self.actions[idx - 1]
reward = self.rewards[idx - 1]
terminal1 = self.terminals[idx - 1]
# Okay, now we need to create the follow-up state. This is state0 shifted on timestep
# to the right. Again, we need to be careful to not include an observation from the next
# episode if the last state is terminal.
state1 = [np.copy(x) for x in state0[1:]]
state1.append(self.observations[idx])
assert len(state0) == self.window_length
assert len(state1) == len(state0)
experiences.append(Experience(state0=state0, action=action, reward=reward,
state1=state1, terminal1=terminal1))
assert len(experiences) == batch_size
return experiences
def append(self, observation, action, reward, terminal, training=True):
"""Append an observation to the memory
# Argument
observation (dict): Observation returned by environment
action (int): Action taken to obtain this observation
reward (float): Reward obtained by taking this action
terminal (boolean): Is the state terminal
"""
super(SequentialMemory, self).append(observation, action, reward, terminal, training=training)
# This needs to be understood as follows: in `observation`, take `action`, obtain `reward`
# and weather the next state is `terminal` or not.
if training:
self.observations.append(observation)
self.actions.append(action)
self.rewards.append(reward)
self.terminals.append(terminal)
@property
def nb_entries(self):
"""Return number of observations
# Returns
Number of observations
"""
return len(self.observations)
def get_config(self):
"""Return configurations of SequentialMemory
# Returns
Dict of config
"""
config = super(SequentialMemory, self).get_config()
config['limit'] = self.limit
return config
class EpisodeParameterMemory(Memory):
def __init__(self, limit, **kwargs):
super(EpisodeParameterMemory, self).__init__(**kwargs)
self.limit = limit
self.params = RingBuffer(limit)
self.intermediate_rewards = []
self.total_rewards = RingBuffer(limit)
def sample(self, batch_size, batch_idxs=None):
"""Return a randomized batch of params and rewards
# Argument
batch_size (int): Size of the all batch
batch_idxs (int): Indexes to extract
# Returns
A list of params randomly selected and a list of associated rewards
"""
if batch_idxs is None:
batch_idxs = sample_batch_indexes(0, self.nb_entries, size=batch_size)
assert len(batch_idxs) == batch_size
batch_params = []
batch_total_rewards = []
for idx in batch_idxs:
batch_params.append(self.params[idx])
batch_total_rewards.append(self.total_rewards[idx])
return batch_params, batch_total_rewards
def append(self, observation, action, reward, terminal, training=True):
"""Append a reward to the memory
# Argument
observation (dict): Observation returned by environment
action (int): Action taken to obtain this observation
reward (float): Reward obtained by taking this action
terminal (boolean): Is the state terminal
"""
super(EpisodeParameterMemory, self).append(observation, action, reward, terminal, training=training)
if training:
self.intermediate_rewards.append(reward)
def finalize_episode(self, params):
"""Append an observation to the memory
# Argument
observation (dict): Observation returned by environment
action (int): Action taken to obtain this observation
reward (float): Reward obtained by taking this action
terminal (boolean): Is the state terminal
"""
total_reward = sum(self.intermediate_rewards)
self.total_rewards.append(total_reward)
self.params.append(params)
self.intermediate_rewards = []
@property
def nb_entries(self):
"""Return number of episode rewards
# Returns
Number of episode rewards
"""
return len(self.total_rewards)
def get_config(self):
"""Return configurations of SequentialMemory
# Returns
Dict of config
"""
config = super(SequentialMemory, self).get_config()
config['limit'] = self.limit
return config
| 14,004 | 38.450704 | 136 |
py
|
rl-perturbed-reward
|
rl-perturbed-reward-master/gym-control/rl/policy.py
|
from __future__ import division
import numpy as np
from rl.util import *
class Policy(object):
"""Abstract base class for all implemented policies.
Each policy helps with selection of action to take on an environment.
Do not use this abstract base class directly but instead use one of the concrete policies implemented.
To implement your own policy, you have to implement the following methods:
- `select_action`
# Arguments
agent (rl.core.Agent): Agent used
"""
def _set_agent(self, agent):
self.agent = agent
@property
def metrics_names(self):
return []
@property
def metrics(self):
return []
def select_action(self, **kwargs):
raise NotImplementedError()
def get_config(self):
"""Return configuration of the policy
# Returns
Configuration as dict
"""
return {}
class LinearAnnealedPolicy(Policy):
"""Implement the linear annealing policy
Linear Annealing Policy computes a current threshold value and
transfers it to an inner policy which chooses the action. The threshold
value is following a linear function decreasing over time."""
def __init__(self, inner_policy, attr, value_max, value_min, value_test, nb_steps):
if not hasattr(inner_policy, attr):
raise ValueError('Policy does not have attribute "{}".'.format(attr))
super(LinearAnnealedPolicy, self).__init__()
self.inner_policy = inner_policy
self.attr = attr
self.value_max = value_max
self.value_min = value_min
self.value_test = value_test
self.nb_steps = nb_steps
def get_current_value(self):
"""Return current annealing value
# Returns
Value to use in annealing
"""
if self.agent.training:
# Linear annealed: f(x) = ax + b.
a = -float(self.value_max - self.value_min) / float(self.nb_steps)
b = float(self.value_max)
value = max(self.value_min, a * float(self.agent.step) + b)
else:
value = self.value_test
return value
def select_action(self, **kwargs):
"""Choose an action to perform
# Returns
Action to take (int)
"""
setattr(self.inner_policy, self.attr, self.get_current_value())
return self.inner_policy.select_action(**kwargs)
@property
def metrics_names(self):
"""Return names of metrics
# Returns
List of metric names
"""
return ['mean_{}'.format(self.attr)]
@property
def metrics(self):
"""Return metrics values
# Returns
List of metric values
"""
return [getattr(self.inner_policy, self.attr)]
def get_config(self):
"""Return configurations of LinearAnnealedPolicy
# Returns
Dict of config
"""
config = super(LinearAnnealedPolicy, self).get_config()
config['attr'] = self.attr
config['value_max'] = self.value_max
config['value_min'] = self.value_min
config['value_test'] = self.value_test
config['nb_steps'] = self.nb_steps
config['inner_policy'] = get_object_config(self.inner_policy)
return config
class EpsGreedyQPolicy(Policy):
"""Implement the epsilon greedy policy
Eps Greedy policy either:
- takes a random action with probability epsilon
- takes current best action with prob (1 - epsilon)
"""
def __init__(self, eps=.1):
super(EpsGreedyQPolicy, self).__init__()
self.eps = eps
def select_action(self, q_values):
"""Return the selected action
# Arguments
q_values (np.ndarray): List of the estimations of Q for each action
# Returns
Selection action
"""
assert q_values.ndim == 1
nb_actions = q_values.shape[0]
if np.random.uniform() < self.eps:
action = np.random.random_integers(0, nb_actions-1)
else:
action = np.argmax(q_values)
return action
def get_config(self):
"""Return configurations of EpsGreedyPolicy
# Returns
Dict of config
"""
config = super(EpsGreedyQPolicy, self).get_config()
config['eps'] = self.eps
return config
class GreedyQPolicy(Policy):
"""Implement the greedy policy
Greedy policy returns the current best action according to q_values
"""
def select_action(self, q_values):
"""Return the selected action
# Arguments
q_values (np.ndarray): List of the estimations of Q for each action
# Returns
Selection action
"""
assert q_values.ndim == 1
action = np.argmax(q_values)
return action
class BoltzmannQPolicy(Policy):
"""Implement the Boltzmann Q Policy
Boltzmann Q Policy builds a probability law on q values and returns
an action selected randomly according to this law.
"""
def __init__(self, tau=1., clip=(-500., 500.)):
super(BoltzmannQPolicy, self).__init__()
self.tau = tau
self.clip = clip
def select_action(self, q_values):
"""Return the selected action
# Arguments
q_values (np.ndarray): List of the estimations of Q for each action
# Returns
Selection action
"""
assert q_values.ndim == 1
q_values = q_values.astype('float64')
nb_actions = q_values.shape[0]
exp_values = np.exp(np.clip(q_values / self.tau, self.clip[0], self.clip[1]))
probs = exp_values / np.sum(exp_values)
action = np.random.choice(range(nb_actions), p=probs)
return action
def get_config(self):
"""Return configurations of EpsGreedyPolicy
# Returns
Dict of config
"""
config = super(BoltzmannQPolicy, self).get_config()
config['tau'] = self.tau
config['clip'] = self.clip
return config
class MaxBoltzmannQPolicy(Policy):
"""
A combination of the eps-greedy and Boltzman q-policy.
Wiering, M.: Explorations in Efficient Reinforcement Learning.
PhD thesis, University of Amsterdam, Amsterdam (1999)
https://pure.uva.nl/ws/files/3153478/8461_UBA003000033.pdf
"""
def __init__(self, eps=.1, tau=1., clip=(-500., 500.)):
super(MaxBoltzmannQPolicy, self).__init__()
self.eps = eps
self.tau = tau
self.clip = clip
def select_action(self, q_values):
"""Return the selected action
The selected action follows the BoltzmannQPolicy with probability epsilon
or return the Greedy Policy with probability (1 - epsilon)
# Arguments
q_values (np.ndarray): List of the estimations of Q for each action
# Returns
Selection action
"""
assert q_values.ndim == 1
q_values = q_values.astype('float64')
nb_actions = q_values.shape[0]
if np.random.uniform() < self.eps:
exp_values = np.exp(np.clip(q_values / self.tau, self.clip[0], self.clip[1]))
probs = exp_values / np.sum(exp_values)
action = np.random.choice(range(nb_actions), p=probs)
else:
action = np.argmax(q_values)
return action
def get_config(self):
"""Return configurations of EpsGreedyPolicy
# Returns
Dict of config
"""
config = super(MaxBoltzmannQPolicy, self).get_config()
config['eps'] = self.eps
config['tau'] = self.tau
config['clip'] = self.clip
return config
class BoltzmannGumbelQPolicy(Policy):
"""Implements Boltzmann-Gumbel exploration (BGE) adapted for Q learning
based on the paper Boltzmann Exploration Done Right
(https://arxiv.org/pdf/1705.10257.pdf).
BGE is invariant with respect to the mean of the rewards but not their
variance. The parameter C, which defaults to 1, can be used to correct for
this, and should be set to the least upper bound on the standard deviation
of the rewards.
BGE is only available for training, not testing. For testing purposes, you
can achieve approximately the same result as BGE after training for N steps
on K actions with parameter C by using the BoltzmannQPolicy and setting
tau = C/sqrt(N/K)."""
def __init__(self, C=1.0):
assert C > 0, "BoltzmannGumbelQPolicy C parameter must be > 0, not " + repr(C)
super(BoltzmannGumbelQPolicy, self).__init__()
self.C = C
self.action_counts = None
def select_action(self, q_values):
"""Return the selected action
# Arguments
q_values (np.ndarray): List of the estimations of Q for each action
# Returns
Selection action
"""
# We can't use BGE during testing, since we don't have access to the
# action_counts at the end of training.
assert self.agent.training, "BoltzmannGumbelQPolicy should only be used for training, not testing"
assert q_values.ndim == 1, q_values.ndim
q_values = q_values.astype('float64')
# If we are starting training, we should reset the action_counts.
# Otherwise, action_counts should already be initialized, since we
# always do so when we begin training.
if self.agent.step == 0:
self.action_counts = np.ones(q_values.shape)
assert self.action_counts is not None, self.agent.step
assert self.action_counts.shape == q_values.shape, (self.action_counts.shape, q_values.shape)
beta = self.C/np.sqrt(self.action_counts)
Z = np.random.gumbel(size=q_values.shape)
perturbation = beta * Z
perturbed_q_values = q_values + perturbation
action = np.argmax(perturbed_q_values)
self.action_counts[action] += 1
return action
def get_config(self):
"""Return configurations of EpsGreedyPolicy
# Returns
Dict of config
"""
config = super(BoltzmannGumbelQPolicy, self).get_config()
config['C'] = self.C
return config
| 10,299 | 29.563798 | 106 |
py
|
rl-perturbed-reward
|
rl-perturbed-reward-master/gym-control/rl/random.py
|
from __future__ import division
import numpy as np
class RandomProcess(object):
def reset_states(self):
pass
class AnnealedGaussianProcess(RandomProcess):
def __init__(self, mu, sigma, sigma_min, n_steps_annealing):
self.mu = mu
self.sigma = sigma
self.n_steps = 0
if sigma_min is not None:
self.m = -float(sigma - sigma_min) / float(n_steps_annealing)
self.c = sigma
self.sigma_min = sigma_min
else:
self.m = 0.
self.c = sigma
self.sigma_min = sigma
@property
def current_sigma(self):
sigma = max(self.sigma_min, self.m * float(self.n_steps) + self.c)
return sigma
class GaussianWhiteNoiseProcess(AnnealedGaussianProcess):
def __init__(self, mu=0., sigma=1., sigma_min=None, n_steps_annealing=1000, size=1):
super(GaussianWhiteNoiseProcess, self).__init__(mu=mu, sigma=sigma, sigma_min=sigma_min, n_steps_annealing=n_steps_annealing)
self.size = size
def sample(self):
sample = np.random.normal(self.mu, self.current_sigma, self.size)
self.n_steps += 1
return sample
# Based on http://math.stackexchange.com/questions/1287634/implementing-ornstein-uhlenbeck-in-matlab
class OrnsteinUhlenbeckProcess(AnnealedGaussianProcess):
def __init__(self, theta, mu=0., sigma=1., dt=1e-2, size=1, sigma_min=None, n_steps_annealing=1000):
super(OrnsteinUhlenbeckProcess, self).__init__(mu=mu, sigma=sigma, sigma_min=sigma_min, n_steps_annealing=n_steps_annealing)
self.theta = theta
self.mu = mu
self.dt = dt
self.size = size
self.reset_states()
def sample(self):
x = self.x_prev + self.theta * (self.mu - self.x_prev) * self.dt + self.current_sigma * np.sqrt(self.dt) * np.random.normal(size=self.size)
self.x_prev = x
self.n_steps += 1
return x
def reset_states(self):
self.x_prev = np.random.normal(self.mu,self.current_sigma,self.size)
| 2,040 | 33.59322 | 147 |
py
|
rl-perturbed-reward
|
rl-perturbed-reward-master/gym-control/rl/processors.py
|
import random
import numpy as np
from rl.core import Processor
from rl.util import WhiteningNormalizer
class MultiInputProcessor(Processor):
"""Converts observations from an environment with multiple observations for use in a neural network
policy.
In some cases, you have environments that return multiple different observations per timestep
(in a robotics context, for example, a camera may be used to view the scene and a joint encoder may
be used to report the angles for each joint). Usually, this can be handled by a policy that has
multiple inputs, one for each modality. However, observations are returned by the environment
in the form of a tuple `[(modality1_t, modality2_t, ..., modalityn_t) for t in T]` but the neural network
expects them in per-modality batches like so: `[[modality1_1, ..., modality1_T], ..., [[modalityn_1, ..., modalityn_T]]`.
This processor converts observations appropriate for this use case.
# Arguments
nb_inputs (integer): The number of inputs, that is different modalities, to be used.
Your neural network that you use for the policy must have a corresponding number of
inputs.
"""
def __init__(self, nb_inputs):
self.nb_inputs = nb_inputs
def process_state_batch(self, state_batch):
input_batches = [[] for x in range(self.nb_inputs)]
for state in state_batch:
processed_state = [[] for x in range(self.nb_inputs)]
for observation in state:
assert len(observation) == self.nb_inputs
for o, s in zip(observation, processed_state):
s.append(o)
for idx, s in enumerate(processed_state):
input_batches[idx].append(s)
return [np.array(x) for x in input_batches]
class WhiteningNormalizerProcessor(Processor):
"""Normalizes the observations to have zero mean and standard deviation of one,
i.e. it applies whitening to the inputs.
This typically helps significantly with learning, especially if different dimensions are
on different scales. However, it complicates training in the sense that you will have to store
these weights alongside the policy if you intend to load it later. It is the responsibility of
the user to do so.
"""
def __init__(self):
self.normalizer = None
def process_state_batch(self, batch):
if self.normalizer is None:
self.normalizer = WhiteningNormalizer(shape=batch.shape[1:], dtype=batch.dtype)
self.normalizer.update(batch)
return self.normalizer.normalize(batch)
| 2,639 | 43.745763 | 125 |
py
|
rl-perturbed-reward
|
rl-perturbed-reward-master/gym-control/rl/util.py
|
import numpy as np
from keras.models import model_from_config, Sequential, Model, model_from_config
import keras.optimizers as optimizers
import keras.backend as K
def clone_model(model, custom_objects={}):
# Requires Keras 1.0.7 since get_config has breaking changes.
config = {
'class_name': model.__class__.__name__,
'config': model.get_config(),
}
clone = model_from_config(config, custom_objects=custom_objects)
clone.set_weights(model.get_weights())
return clone
def clone_optimizer(optimizer):
if type(optimizer) is str:
return optimizers.get(optimizer)
# Requires Keras 1.0.7 since get_config has breaking changes.
params = dict([(k, v) for k, v in optimizer.get_config().items()])
config = {
'class_name': optimizer.__class__.__name__,
'config': params,
}
if hasattr(optimizers, 'optimizer_from_config'):
# COMPATIBILITY: Keras < 2.0
clone = optimizers.optimizer_from_config(config)
else:
clone = optimizers.deserialize(config)
return clone
def get_soft_target_model_updates(target, source, tau):
target_weights = target.trainable_weights + sum([l.non_trainable_weights for l in target.layers], [])
source_weights = source.trainable_weights + sum([l.non_trainable_weights for l in source.layers], [])
assert len(target_weights) == len(source_weights)
# Create updates.
updates = []
for tw, sw in zip(target_weights, source_weights):
updates.append((tw, tau * sw + (1. - tau) * tw))
return updates
def get_object_config(o):
if o is None:
return None
config = {
'class_name': o.__class__.__name__,
'config': o.get_config()
}
return config
def huber_loss(y_true, y_pred, clip_value):
# Huber loss, see https://en.wikipedia.org/wiki/Huber_loss and
# https://medium.com/@karpathy/yes-you-should-understand-backprop-e2f06eab496b
# for details.
assert clip_value > 0.
x = y_true - y_pred
if np.isinf(clip_value):
# Spacial case for infinity since Tensorflow does have problems
# if we compare `K.abs(x) < np.inf`.
return .5 * K.square(x)
condition = K.abs(x) < clip_value
squared_loss = .5 * K.square(x)
linear_loss = clip_value * (K.abs(x) - .5 * clip_value)
if K.backend() == 'tensorflow':
import tensorflow as tf
if hasattr(tf, 'select'):
return tf.select(condition, squared_loss, linear_loss) # condition, true, false
else:
return tf.where(condition, squared_loss, linear_loss) # condition, true, false
elif K.backend() == 'theano':
from theano import tensor as T
return T.switch(condition, squared_loss, linear_loss)
else:
raise RuntimeError('Unknown backend "{}".'.format(K.backend()))
class AdditionalUpdatesOptimizer(optimizers.Optimizer):
def __init__(self, optimizer, additional_updates):
super(AdditionalUpdatesOptimizer, self).__init__()
self.optimizer = optimizer
self.additional_updates = additional_updates
def get_updates(self, params, loss):
updates = self.optimizer.get_updates(params=params, loss=loss)
updates += self.additional_updates
self.updates = updates
return self.updates
def get_config(self):
return self.optimizer.get_config()
# Based on https://github.com/openai/baselines/blob/master/baselines/common/mpi_running_mean_std.py
class WhiteningNormalizer(object):
def __init__(self, shape, eps=1e-2, dtype=np.float64):
self.eps = eps
self.shape = shape
self.dtype = dtype
self._sum = np.zeros(shape, dtype=dtype)
self._sumsq = np.zeros(shape, dtype=dtype)
self._count = 0
self.mean = np.zeros(shape, dtype=dtype)
self.std = np.ones(shape, dtype=dtype)
def normalize(self, x):
return (x - self.mean) / self.std
def denormalize(self, x):
return self.std * x + self.mean
def update(self, x):
if x.ndim == len(self.shape):
x = x.reshape(-1, *self.shape)
assert x.shape[1:] == self.shape
self._count += x.shape[0]
self._sum += np.sum(x, axis=0)
self._sumsq += np.sum(np.square(x), axis=0)
self.mean = self._sum / float(self._count)
self.std = np.sqrt(np.maximum(np.square(self.eps), self._sumsq / float(self._count) - np.square(self.mean)))
| 4,476 | 32.410448 | 116 |
py
|
rl-perturbed-reward
|
rl-perturbed-reward-master/gym-control/rl/__init__.py
| 0 | 0 | 0 |
py
|
|
rl-perturbed-reward
|
rl-perturbed-reward-master/gym-control/rl/agents/ddpg.py
|
from __future__ import division
from collections import deque
import os
import warnings
import numpy as np
import keras.backend as K
import keras.optimizers as optimizers
from rl.core import Agent
from rl.random import OrnsteinUhlenbeckProcess
from rl.util import *
def mean_q(y_true, y_pred):
return K.mean(K.max(y_pred, axis=-1))
# Deep DPG as described by Lillicrap et al. (2015)
# http://arxiv.org/pdf/1509.02971v2.pdf
# http://citeseerx.ist.psu.edu/viewdoc/download?doi=10.1.1.646.4324&rep=rep1&type=pdf
class DDPGAgent(Agent):
"""Write me
"""
def __init__(self, nb_actions, actor, critic, critic_action_input, memory,
gamma=.99, batch_size=32, nb_steps_warmup_critic=1000, nb_steps_warmup_actor=1000,
train_interval=1, memory_interval=1, delta_range=None, delta_clip=np.inf,
random_process=None, custom_model_objects={}, target_model_update=.001, **kwargs):
if hasattr(actor.output, '__len__') and len(actor.output) > 1:
raise ValueError('Actor "{}" has more than one output. DDPG expects an actor that has a single output.'.format(actor))
if hasattr(critic.output, '__len__') and len(critic.output) > 1:
raise ValueError('Critic "{}" has more than one output. DDPG expects a critic that has a single output.'.format(critic))
if critic_action_input not in critic.input:
raise ValueError('Critic "{}" does not have designated action input "{}".'.format(critic, critic_action_input))
if not hasattr(critic.input, '__len__') or len(critic.input) < 2:
raise ValueError('Critic "{}" does not have enough inputs. The critic must have at exactly two inputs, one for the action and one for the observation.'.format(critic))
super(DDPGAgent, self).__init__(**kwargs)
# Soft vs hard target model updates.
if target_model_update < 0:
raise ValueError('`target_model_update` must be >= 0.')
elif target_model_update >= 1:
# Hard update every `target_model_update` steps.
target_model_update = int(target_model_update)
else:
# Soft update with `(1 - target_model_update) * old + target_model_update * new`.
target_model_update = float(target_model_update)
if delta_range is not None:
warnings.warn('`delta_range` is deprecated. Please use `delta_clip` instead, which takes a single scalar. For now we\'re falling back to `delta_range[1] = {}`'.format(delta_range[1]))
delta_clip = delta_range[1]
# Parameters.
self.nb_actions = nb_actions
self.nb_steps_warmup_actor = nb_steps_warmup_actor
self.nb_steps_warmup_critic = nb_steps_warmup_critic
self.random_process = random_process
self.delta_clip = delta_clip
self.gamma = gamma
self.target_model_update = target_model_update
self.batch_size = batch_size
self.train_interval = train_interval
self.memory_interval = memory_interval
self.custom_model_objects = custom_model_objects
# Related objects.
self.actor = actor
self.critic = critic
self.critic_action_input = critic_action_input
self.critic_action_input_idx = self.critic.input.index(critic_action_input)
self.memory = memory
# State.
self.compiled = False
self.reset_states()
@property
def uses_learning_phase(self):
return self.actor.uses_learning_phase or self.critic.uses_learning_phase
def compile(self, optimizer, metrics=[]):
metrics += [mean_q]
if type(optimizer) in (list, tuple):
if len(optimizer) != 2:
raise ValueError('More than two optimizers provided. Please only provide a maximum of two optimizers, the first one for the actor and the second one for the critic.')
actor_optimizer, critic_optimizer = optimizer
else:
actor_optimizer = optimizer
critic_optimizer = clone_optimizer(optimizer)
if type(actor_optimizer) is str:
actor_optimizer = optimizers.get(actor_optimizer)
if type(critic_optimizer) is str:
critic_optimizer = optimizers.get(critic_optimizer)
assert actor_optimizer != critic_optimizer
if len(metrics) == 2 and hasattr(metrics[0], '__len__') and hasattr(metrics[1], '__len__'):
actor_metrics, critic_metrics = metrics
else:
actor_metrics = critic_metrics = metrics
def clipped_error(y_true, y_pred):
return K.mean(huber_loss(y_true, y_pred, self.delta_clip), axis=-1)
# Compile target networks. We only use them in feed-forward mode, hence we can pass any
# optimizer and loss since we never use it anyway.
self.target_actor = clone_model(self.actor, self.custom_model_objects)
self.target_actor.compile(optimizer='sgd', loss='mse')
self.target_critic = clone_model(self.critic, self.custom_model_objects)
self.target_critic.compile(optimizer='sgd', loss='mse')
# We also compile the actor. We never optimize the actor using Keras but instead compute
# the policy gradient ourselves. However, we need the actor in feed-forward mode, hence
# we also compile it with any optimzer and
self.actor.compile(optimizer='sgd', loss='mse')
# Compile the critic.
if self.target_model_update < 1.:
# We use the `AdditionalUpdatesOptimizer` to efficiently soft-update the target model.
critic_updates = get_soft_target_model_updates(self.target_critic, self.critic, self.target_model_update)
critic_optimizer = AdditionalUpdatesOptimizer(critic_optimizer, critic_updates)
self.critic.compile(optimizer=critic_optimizer, loss=clipped_error, metrics=critic_metrics)
# Combine actor and critic so that we can get the policy gradient.
# Assuming critic's state inputs are the same as actor's.
combined_inputs = []
critic_inputs = []
for i in self.critic.input:
if i == self.critic_action_input:
combined_inputs.append([])
else:
combined_inputs.append(i)
critic_inputs.append(i)
combined_inputs[self.critic_action_input_idx] = self.actor(critic_inputs)
combined_output = self.critic(combined_inputs)
updates = actor_optimizer.get_updates(
params=self.actor.trainable_weights, loss=-K.mean(combined_output))
if self.target_model_update < 1.:
# Include soft target model updates.
updates += get_soft_target_model_updates(self.target_actor, self.actor, self.target_model_update)
updates += self.actor.updates # include other updates of the actor, e.g. for BN
# Finally, combine it all into a callable function.
if K.backend() == 'tensorflow':
self.actor_train_fn = K.function(critic_inputs + [K.learning_phase()],
[self.actor(critic_inputs)], updates=updates)
else:
if self.uses_learning_phase:
critic_inputs += [K.learning_phase()]
self.actor_train_fn = K.function(critic_inputs, [self.actor(critic_inputs)], updates=updates)
self.actor_optimizer = actor_optimizer
self.compiled = True
def load_weights(self, filepath):
filename, extension = os.path.splitext(filepath)
actor_filepath = filename + '_actor' + extension
critic_filepath = filename + '_critic' + extension
self.actor.load_weights(actor_filepath)
self.critic.load_weights(critic_filepath)
self.update_target_models_hard()
def save_weights(self, filepath, overwrite=False):
filename, extension = os.path.splitext(filepath)
actor_filepath = filename + '_actor' + extension
critic_filepath = filename + '_critic' + extension
self.actor.save_weights(actor_filepath, overwrite=overwrite)
self.critic.save_weights(critic_filepath, overwrite=overwrite)
def update_target_models_hard(self):
self.target_critic.set_weights(self.critic.get_weights())
self.target_actor.set_weights(self.actor.get_weights())
# TODO: implement pickle
def reset_states(self):
if self.random_process is not None:
self.random_process.reset_states()
self.recent_action = None
self.recent_observation = None
if self.compiled:
self.actor.reset_states()
self.critic.reset_states()
self.target_actor.reset_states()
self.target_critic.reset_states()
def process_state_batch(self, batch):
batch = np.array(batch)
if self.processor is None:
return batch
return self.processor.process_state_batch(batch)
def select_action(self, state):
batch = self.process_state_batch([state])
action = self.actor.predict_on_batch(batch).flatten()
assert action.shape == (self.nb_actions,)
# Apply noise, if a random process is set.
if self.training and self.random_process is not None:
noise = self.random_process.sample()
assert noise.shape == action.shape
action += noise
return action
def forward(self, observation):
# Select an action.
state = self.memory.get_recent_state(observation)
action = self.select_action(state) # TODO: move this into policy
# Book-keeping.
self.recent_observation = observation
self.recent_action = action
return action
@property
def layers(self):
return self.actor.layers[:] + self.critic.layers[:]
@property
def metrics_names(self):
names = self.critic.metrics_names[:]
if self.processor is not None:
names += self.processor.metrics_names[:]
return names
def backward(self, reward, terminal=False):
# Store most recent experience in memory.
if self.step % self.memory_interval == 0:
self.memory.append(self.recent_observation, self.recent_action, reward, terminal,
training=self.training)
metrics = [np.nan for _ in self.metrics_names]
if not self.training:
# We're done here. No need to update the experience memory since we only use the working
# memory to obtain the state over the most recent observations.
return metrics
# Train the network on a single stochastic batch.
can_train_either = self.step > self.nb_steps_warmup_critic or self.step > self.nb_steps_warmup_actor
if can_train_either and self.step % self.train_interval == 0:
experiences = self.memory.sample(self.batch_size)
assert len(experiences) == self.batch_size
# Start by extracting the necessary parameters (we use a vectorized implementation).
state0_batch = []
reward_batch = []
action_batch = []
terminal1_batch = []
state1_batch = []
for e in experiences:
state0_batch.append(e.state0)
state1_batch.append(e.state1)
reward_batch.append(e.reward)
action_batch.append(e.action)
terminal1_batch.append(0. if e.terminal1 else 1.)
# Prepare and validate parameters.
state0_batch = self.process_state_batch(state0_batch)
state1_batch = self.process_state_batch(state1_batch)
terminal1_batch = np.array(terminal1_batch)
reward_batch = np.array(reward_batch)
action_batch = np.array(action_batch)
assert reward_batch.shape == (self.batch_size,)
assert terminal1_batch.shape == reward_batch.shape
assert action_batch.shape == (self.batch_size, self.nb_actions)
# Update critic, if warm up is over.
if self.step > self.nb_steps_warmup_critic:
target_actions = self.target_actor.predict_on_batch(state1_batch)
assert target_actions.shape == (self.batch_size, self.nb_actions)
if len(self.critic.inputs) >= 3:
state1_batch_with_action = state1_batch[:]
else:
state1_batch_with_action = [state1_batch]
state1_batch_with_action.insert(self.critic_action_input_idx, target_actions)
target_q_values = self.target_critic.predict_on_batch(state1_batch_with_action).flatten()
assert target_q_values.shape == (self.batch_size,)
# Compute r_t + gamma * max_a Q(s_t+1, a) and update the target ys accordingly,
# but only for the affected output units (as given by action_batch).
discounted_reward_batch = self.gamma * target_q_values
discounted_reward_batch *= terminal1_batch
assert discounted_reward_batch.shape == reward_batch.shape
targets = (reward_batch + discounted_reward_batch).reshape(self.batch_size, 1)
# Perform a single batch update on the critic network.
if len(self.critic.inputs) >= 3:
state0_batch_with_action = state0_batch[:]
else:
state0_batch_with_action = [state0_batch]
state0_batch_with_action.insert(self.critic_action_input_idx, action_batch)
metrics = self.critic.train_on_batch(state0_batch_with_action, targets)
if self.processor is not None:
metrics += self.processor.metrics
# Update actor, if warm up is over.
if self.step > self.nb_steps_warmup_actor:
# TODO: implement metrics for actor
if len(self.actor.inputs) >= 2:
inputs = state0_batch[:]
else:
inputs = [state0_batch]
if self.uses_learning_phase:
inputs += [self.training]
action_values = self.actor_train_fn(inputs)[0]
assert action_values.shape == (self.batch_size, self.nb_actions)
if self.target_model_update >= 1 and self.step % self.target_model_update == 0:
self.update_target_models_hard()
return metrics
| 14,524 | 44.820189 | 195 |
py
|
rl-perturbed-reward
|
rl-perturbed-reward-master/gym-control/rl/agents/sarsa.py
|
import collections
import numpy as np
from keras.callbacks import History
from keras.models import Model
from keras.layers import Input, Lambda
import keras.backend as K
from rl.core import Agent
from rl.agents.dqn import mean_q
from rl.util import huber_loss
from rl.policy import EpsGreedyQPolicy, GreedyQPolicy
from rl.util import get_object_config
class SARSAAgent(Agent):
"""Write me
"""
def __init__(self, model, nb_actions, policy=None, test_policy=None, gamma=.99, nb_steps_warmup=10,
train_interval=1, delta_clip=np.inf, *args, **kwargs):
super(SarsaAgent, self).__init__(*args, **kwargs)
# Do not use defaults in constructor because that would mean that each instance shares the same
# policy.
if policy is None:
policy = EpsGreedyQPolicy()
if test_policy is None:
test_policy = GreedyQPolicy()
self.model = model
self.nb_actions = nb_actions
self.policy = policy
self.test_policy = test_policy
self.gamma = gamma
self.nb_steps_warmup = nb_steps_warmup
self.train_interval = train_interval
self.delta_clip = delta_clip
self.compiled = False
self.actions = None
self.observations = None
self.rewards = None
def compute_batch_q_values(self, state_batch):
batch = self.process_state_batch(state_batch)
q_values = self.model.predict_on_batch(batch)
assert q_values.shape == (len(state_batch), self.nb_actions)
return q_values
def compute_q_values(self, state):
q_values = self.compute_batch_q_values([state]).flatten()
assert q_values.shape == (self.nb_actions,)
return q_values
def process_state_batch(self, batch):
batch = np.array(batch)
if self.processor is None:
return batch
return self.processor.process_state_batch(batch)
def get_config(self):
config = super(SarsaAgent, self).get_config()
config['nb_actions'] = self.nb_actions
config['gamma'] = self.gamma
config['nb_steps_warmup'] = self.nb_steps_warmup
config['train_interval'] = self.train_interval
config['delta_clip'] = self.delta_clip
config['model'] = get_object_config(self.model)
config['policy'] = get_object_config(self.policy)
config['test_policy'] = get_object_config(self.test_policy)
return config
def compile(self, optimizer, metrics=[]):
metrics += [mean_q] # register default metrics
def clipped_masked_error(args):
y_true, y_pred, mask = args
loss = huber_loss(y_true, y_pred, self.delta_clip)
loss *= mask # apply element-wise mask
return K.sum(loss, axis=-1)
# Create trainable model. The problem is that we need to mask the output since we only
# ever want to update the Q values for a certain action. The way we achieve this is by
# using a custom Lambda layer that computes the loss. This gives us the necessary flexibility
# to mask out certain parameters by passing in multiple inputs to the Lambda layer.
y_pred = self.model.output
y_true = Input(name='y_true', shape=(self.nb_actions,))
mask = Input(name='mask', shape=(self.nb_actions,))
loss_out = Lambda(clipped_masked_error, output_shape=(1,), name='loss')([y_pred, y_true, mask])
ins = [self.model.input] if type(self.model.input) is not list else self.model.input
trainable_model = Model(inputs=ins + [y_true, mask], outputs=[loss_out, y_pred])
assert len(trainable_model.output_names) == 2
combined_metrics = {trainable_model.output_names[1]: metrics}
losses = [
lambda y_true, y_pred: y_pred, # loss is computed in Lambda layer
lambda y_true, y_pred: K.zeros_like(y_pred), # we only include this for the metrics
]
trainable_model.compile(optimizer=optimizer, loss=losses, metrics=combined_metrics)
self.trainable_model = trainable_model
self.compiled = True
def load_weights(self, filepath):
self.model.load_weights(filepath)
def save_weights(self, filepath, overwrite=False):
self.model.save_weights(filepath, overwrite=overwrite)
def reset_states(self):
self.actions = collections.deque(maxlen=2)
self.observations = collections.deque(maxlen=2)
self.rewards = collections.deque(maxlen=2)
if self.compiled:
self.model.reset_states()
def forward(self, observation):
# Select an action.
q_values = self.compute_q_values([observation])
if self.training:
action = self.policy.select_action(q_values=q_values)
else:
action = self.test_policy.select_action(q_values=q_values)
# Book-keeping.
self.observations.append(observation)
self.actions.append(action)
return action
def backward(self, reward, terminal):
metrics = [np.nan for _ in self.metrics_names]
if not self.training:
# We're done here. No need to update the experience memory since we only use the working
# memory to obtain the state over the most recent observations.
return metrics
# Train the network on a single stochastic batch.
if self.step > self.nb_steps_warmup and self.step % self.train_interval == 0:
# Start by extracting the necessary parameters (we use a vectorized implementation).
self.rewards.append(reward)
if len(self.observations) < 2:
return metrics # not enough data yet
state0_batch = [self.observations[0]]
reward_batch = [self.rewards[0]]
action_batch = [self.actions[0]]
terminal1_batch = [0.] if terminal else [1.]
state1_batch = [self.observations[1]]
action1_batch = [self.actions[1]]
# Prepare and validate parameters.
state0_batch = self.process_state_batch(state0_batch)
state1_batch = self.process_state_batch(state1_batch)
terminal1_batch = np.array(terminal1_batch)
reward_batch = np.array(reward_batch)
assert reward_batch.shape == (1,)
assert terminal1_batch.shape == reward_batch.shape
assert len(action_batch) == len(reward_batch)
batch = self.process_state_batch(state1_batch)
q_values = self.compute_q_values(batch)
q_values = q_values.reshape((1, self.nb_actions))
q_batch = q_values[0, action1_batch]
assert q_batch.shape == (1,)
targets = np.zeros((1, self.nb_actions))
dummy_targets = np.zeros((1,))
masks = np.zeros((1, self.nb_actions))
# Compute r_t + gamma * Q(s_t+1, a_t+1)
discounted_reward_batch = self.gamma * q_batch
# Set discounted reward to zero for all states that were terminal.
discounted_reward_batch *= terminal1_batch
assert discounted_reward_batch.shape == reward_batch.shape
Rs = reward_batch + discounted_reward_batch
for idx, (target, mask, R, action) in enumerate(zip(targets, masks, Rs, action_batch)):
target[action] = R # update action with estimated accumulated reward
dummy_targets[idx] = R
mask[action] = 1. # enable loss for this specific action
targets = np.array(targets).astype('float32')
masks = np.array(masks).astype('float32')
# Finally, perform a single update on the entire batch. We use a dummy target since
# the actual loss is computed in a Lambda layer that needs more complex input. However,
# it is still useful to know the actual target to compute metrics properly.
state0_batch = state0_batch.reshape((1,) + state0_batch.shape)
ins = [state0_batch] if type(self.model.input) is not list else state0_batch
metrics = self.trainable_model.train_on_batch(ins + [targets, masks], [dummy_targets, targets])
metrics = [metric for idx, metric in enumerate(metrics) if idx not in (1, 2)] # throw away individual losses
metrics += self.policy.metrics
if self.processor is not None:
metrics += self.processor.metrics
return metrics
@property
def layers(self):
return self.model.layers[:]
@property
def metrics_names(self):
# Throw away individual losses and replace output name since this is hidden from the user.
assert len(self.trainable_model.output_names) == 2
dummy_output_name = self.trainable_model.output_names[1]
model_metrics = [name for idx, name in enumerate(self.trainable_model.metrics_names) if idx not in (1, 2)]
model_metrics = [name.replace(dummy_output_name + '_', '') for name in model_metrics]
names = model_metrics + self.policy.metrics_names[:]
if self.processor is not None:
names += self.processor.metrics_names[:]
return names
@property
def policy(self):
return self.__policy
@policy.setter
def policy(self, policy):
self.__policy = policy
self.__policy._set_agent(self)
@property
def test_policy(self):
return self.__test_policy
@test_policy.setter
def test_policy(self, policy):
self.__test_policy = policy
self.__test_policy._set_agent(self)
# Aliases
SarsaAgent = SARSAAgent
| 9,668 | 40.320513 | 121 |
py
|
rl-perturbed-reward
|
rl-perturbed-reward-master/gym-control/rl/agents/dqn.py
|
from __future__ import division
import warnings
import keras.backend as K
from keras.models import Model
from keras.layers import Lambda, Input, Layer, Dense
from rl.core import Agent
from rl.policy import EpsGreedyQPolicy, GreedyQPolicy
from rl.util import *
def mean_q(y_true, y_pred):
return K.mean(K.max(y_pred, axis=-1))
class AbstractDQNAgent(Agent):
"""Write me
"""
def __init__(self, nb_actions, memory, gamma=.99, batch_size=32, nb_steps_warmup=1000,
train_interval=1, memory_interval=1, target_model_update=10000,
delta_range=None, delta_clip=np.inf, custom_model_objects={}, **kwargs):
super(AbstractDQNAgent, self).__init__(**kwargs)
# Soft vs hard target model updates.
if target_model_update < 0:
raise ValueError('`target_model_update` must be >= 0.')
elif target_model_update >= 1:
# Hard update every `target_model_update` steps.
target_model_update = int(target_model_update)
else:
# Soft update with `(1 - target_model_update) * old + target_model_update * new`.
target_model_update = float(target_model_update)
if delta_range is not None:
warnings.warn('`delta_range` is deprecated. Please use `delta_clip` instead, which takes a single scalar. For now we\'re falling back to `delta_range[1] = {}`'.format(delta_range[1]))
delta_clip = delta_range[1]
# Parameters.
self.nb_actions = nb_actions
self.gamma = gamma
self.batch_size = batch_size
self.nb_steps_warmup = nb_steps_warmup
self.train_interval = train_interval
self.memory_interval = memory_interval
self.target_model_update = target_model_update
self.delta_clip = delta_clip
self.custom_model_objects = custom_model_objects
# Related objects.
self.memory = memory
# State.
self.compiled = False
def process_state_batch(self, batch):
batch = np.array(batch)
if self.processor is None:
return batch
return self.processor.process_state_batch(batch)
def compute_batch_q_values(self, state_batch):
batch = self.process_state_batch(state_batch)
q_values = self.model.predict_on_batch(batch)
assert q_values.shape == (len(state_batch), self.nb_actions)
return q_values
def compute_q_values(self, state):
q_values = self.compute_batch_q_values([state]).flatten()
assert q_values.shape == (self.nb_actions,)
return q_values
def get_config(self):
return {
'nb_actions': self.nb_actions,
'gamma': self.gamma,
'batch_size': self.batch_size,
'nb_steps_warmup': self.nb_steps_warmup,
'train_interval': self.train_interval,
'memory_interval': self.memory_interval,
'target_model_update': self.target_model_update,
'delta_clip': self.delta_clip,
'memory': get_object_config(self.memory),
}
# An implementation of the DQN agent as described in Mnih (2013) and Mnih (2015).
# http://arxiv.org/pdf/1312.5602.pdf
# http://arxiv.org/abs/1509.06461
class DQNAgent(AbstractDQNAgent):
"""
# Arguments
model__: A Keras model.
policy__: A Keras-rl policy that are defined in [policy](https://github.com/keras-rl/keras-rl/blob/master/rl/policy.py).
test_policy__: A Keras-rl policy.
enable_double_dqn__: A boolean which enable target network as a second network proposed by van Hasselt et al. to decrease overfitting.
enable_dueling_dqn__: A boolean which enable dueling architecture proposed by Mnih et al.
dueling_type__: If `enable_dueling_dqn` is set to `True`, a type of dueling architecture must be chosen which calculate Q(s,a) from V(s) and A(s,a) differently. Note that `avg` is recommanded in the [paper](https://arxiv.org/abs/1511.06581).
`avg`: Q(s,a;theta) = V(s;theta) + (A(s,a;theta)-Avg_a(A(s,a;theta)))
`max`: Q(s,a;theta) = V(s;theta) + (A(s,a;theta)-max_a(A(s,a;theta)))
`naive`: Q(s,a;theta) = V(s;theta) + A(s,a;theta)
"""
def __init__(self, model, policy=None, test_policy=None, enable_double_dqn=True, enable_dueling_network=False,
dueling_type='avg', *args, **kwargs):
super(DQNAgent, self).__init__(*args, **kwargs)
# Validate (important) input.
if hasattr(model.output, '__len__') and len(model.output) > 1:
raise ValueError('Model "{}" has more than one output. DQN expects a model that has a single output.'.format(model))
if model.output._keras_shape != (None, self.nb_actions):
raise ValueError('Model output "{}" has invalid shape. DQN expects a model that has one dimension for each action, in this case {}.'.format(model.output, self.nb_actions))
# Parameters.
self.enable_double_dqn = enable_double_dqn
self.enable_dueling_network = enable_dueling_network
self.dueling_type = dueling_type
if self.enable_dueling_network:
# get the second last layer of the model, abandon the last layer
layer = model.layers[-2]
nb_action = model.output._keras_shape[-1]
# layer y has a shape (nb_action+1,)
# y[:,0] represents V(s;theta)
# y[:,1:] represents A(s,a;theta)
y = Dense(nb_action + 1, activation='linear')(layer.output)
# caculate the Q(s,a;theta)
# dueling_type == 'avg'
# Q(s,a;theta) = V(s;theta) + (A(s,a;theta)-Avg_a(A(s,a;theta)))
# dueling_type == 'max'
# Q(s,a;theta) = V(s;theta) + (A(s,a;theta)-max_a(A(s,a;theta)))
# dueling_type == 'naive'
# Q(s,a;theta) = V(s;theta) + A(s,a;theta)
if self.dueling_type == 'avg':
outputlayer = Lambda(lambda a: K.expand_dims(a[:, 0], -1) + a[:, 1:] - K.mean(a[:, 1:], keepdims=True), output_shape=(nb_action,))(y)
elif self.dueling_type == 'max':
outputlayer = Lambda(lambda a: K.expand_dims(a[:, 0], -1) + a[:, 1:] - K.max(a[:, 1:], keepdims=True), output_shape=(nb_action,))(y)
elif self.dueling_type == 'naive':
outputlayer = Lambda(lambda a: K.expand_dims(a[:, 0], -1) + a[:, 1:], output_shape=(nb_action,))(y)
else:
assert False, "dueling_type must be one of {'avg','max','naive'}"
model = Model(inputs=model.input, outputs=outputlayer)
# Related objects.
self.model = model
if policy is None:
policy = EpsGreedyQPolicy()
if test_policy is None:
test_policy = GreedyQPolicy()
self.policy = policy
self.test_policy = test_policy
# State.
self.reset_states()
def get_config(self):
config = super(DQNAgent, self).get_config()
config['enable_double_dqn'] = self.enable_double_dqn
config['dueling_type'] = self.dueling_type
config['enable_dueling_network'] = self.enable_dueling_network
config['model'] = get_object_config(self.model)
config['policy'] = get_object_config(self.policy)
config['test_policy'] = get_object_config(self.test_policy)
if self.compiled:
config['target_model'] = get_object_config(self.target_model)
return config
def compile(self, optimizer, metrics=[]):
metrics += [mean_q] # register default metrics
# We never train the target model, hence we can set the optimizer and loss arbitrarily.
self.target_model = clone_model(self.model, self.custom_model_objects)
self.target_model.compile(optimizer='sgd', loss='mse')
self.model.compile(optimizer='sgd', loss='mse')
# Compile model.
if self.target_model_update < 1.:
# We use the `AdditionalUpdatesOptimizer` to efficiently soft-update the target model.
updates = get_soft_target_model_updates(self.target_model, self.model, self.target_model_update)
optimizer = AdditionalUpdatesOptimizer(optimizer, updates)
def clipped_masked_error(args):
y_true, y_pred, mask = args
loss = huber_loss(y_true, y_pred, self.delta_clip)
loss *= mask # apply element-wise mask
return K.sum(loss, axis=-1)
# Create trainable model. The problem is that we need to mask the output since we only
# ever want to update the Q values for a certain action. The way we achieve this is by
# using a custom Lambda layer that computes the loss. This gives us the necessary flexibility
# to mask out certain parameters by passing in multiple inputs to the Lambda layer.
y_pred = self.model.output
y_true = Input(name='y_true', shape=(self.nb_actions,))
mask = Input(name='mask', shape=(self.nb_actions,))
loss_out = Lambda(clipped_masked_error, output_shape=(1,), name='loss')([y_true, y_pred, mask])
ins = [self.model.input] if type(self.model.input) is not list else self.model.input
trainable_model = Model(inputs=ins + [y_true, mask], outputs=[loss_out, y_pred])
assert len(trainable_model.output_names) == 2
combined_metrics = {trainable_model.output_names[1]: metrics}
losses = [
lambda y_true, y_pred: y_pred, # loss is computed in Lambda layer
lambda y_true, y_pred: K.zeros_like(y_pred), # we only include this for the metrics
]
trainable_model.compile(optimizer=optimizer, loss=losses, metrics=combined_metrics)
self.trainable_model = trainable_model
self.compiled = True
def load_weights(self, filepath):
self.model.load_weights(filepath)
self.update_target_model_hard()
def save_weights(self, filepath, overwrite=False):
self.model.save_weights(filepath, overwrite=overwrite)
def reset_states(self):
self.recent_action = None
self.recent_observation = None
if self.compiled:
self.model.reset_states()
self.target_model.reset_states()
def update_target_model_hard(self):
self.target_model.set_weights(self.model.get_weights())
def forward(self, observation):
# Select an action.
state = self.memory.get_recent_state(observation)
q_values = self.compute_q_values(state)
if self.training:
action = self.policy.select_action(q_values=q_values)
else:
action = self.test_policy.select_action(q_values=q_values)
# Book-keeping.
self.recent_observation = observation
self.recent_action = action
return action
def backward(self, reward, terminal):
# Store most recent experience in memory.
if self.step % self.memory_interval == 0:
self.memory.append(self.recent_observation, self.recent_action, reward, terminal,
training=self.training)
metrics = [np.nan for _ in self.metrics_names]
if not self.training:
# We're done here. No need to update the experience memory since we only use the working
# memory to obtain the state over the most recent observations.
return metrics
# Train the network on a single stochastic batch.
if self.step > self.nb_steps_warmup and self.step % self.train_interval == 0:
experiences = self.memory.sample(self.batch_size)
assert len(experiences) == self.batch_size
# Start by extracting the necessary parameters (we use a vectorized implementation).
state0_batch = []
reward_batch = []
action_batch = []
terminal1_batch = []
state1_batch = []
for e in experiences:
state0_batch.append(e.state0)
state1_batch.append(e.state1)
reward_batch.append(e.reward)
action_batch.append(e.action)
terminal1_batch.append(0. if e.terminal1 else 1.)
# Prepare and validate parameters.
state0_batch = self.process_state_batch(state0_batch)
state1_batch = self.process_state_batch(state1_batch)
terminal1_batch = np.array(terminal1_batch)
reward_batch = np.array(reward_batch)
assert reward_batch.shape == (self.batch_size,)
assert terminal1_batch.shape == reward_batch.shape
assert len(action_batch) == len(reward_batch)
# Compute Q values for mini-batch update.
if self.enable_double_dqn:
# According to the paper "Deep Reinforcement Learning with Double Q-learning"
# (van Hasselt et al., 2015), in Double DQN, the online network predicts the actions
# while the target network is used to estimate the Q value.
q_values = self.model.predict_on_batch(state1_batch)
assert q_values.shape == (self.batch_size, self.nb_actions)
actions = np.argmax(q_values, axis=1)
assert actions.shape == (self.batch_size,)
# Now, estimate Q values using the target network but select the values with the
# highest Q value wrt to the online model (as computed above).
target_q_values = self.target_model.predict_on_batch(state1_batch)
assert target_q_values.shape == (self.batch_size, self.nb_actions)
q_batch = target_q_values[range(self.batch_size), actions]
else:
# Compute the q_values given state1, and extract the maximum for each sample in the batch.
# We perform this prediction on the target_model instead of the model for reasons
# outlined in Mnih (2015). In short: it makes the algorithm more stable.
target_q_values = self.target_model.predict_on_batch(state1_batch)
assert target_q_values.shape == (self.batch_size, self.nb_actions)
q_batch = np.max(target_q_values, axis=1).flatten()
assert q_batch.shape == (self.batch_size,)
targets = np.zeros((self.batch_size, self.nb_actions))
dummy_targets = np.zeros((self.batch_size,))
masks = np.zeros((self.batch_size, self.nb_actions))
# Compute r_t + gamma * max_a Q(s_t+1, a) and update the target targets accordingly,
# but only for the affected output units (as given by action_batch).
discounted_reward_batch = self.gamma * q_batch
# Set discounted reward to zero for all states that were terminal.
discounted_reward_batch *= terminal1_batch
assert discounted_reward_batch.shape == reward_batch.shape
Rs = reward_batch + discounted_reward_batch
for idx, (target, mask, R, action) in enumerate(zip(targets, masks, Rs, action_batch)):
target[action] = R # update action with estimated accumulated reward
dummy_targets[idx] = R
mask[action] = 1. # enable loss for this specific action
targets = np.array(targets).astype('float32')
masks = np.array(masks).astype('float32')
# Finally, perform a single update on the entire batch. We use a dummy target since
# the actual loss is computed in a Lambda layer that needs more complex input. However,
# it is still useful to know the actual target to compute metrics properly.
ins = [state0_batch] if type(self.model.input) is not list else state0_batch
metrics = self.trainable_model.train_on_batch(ins + [targets, masks], [dummy_targets, targets])
metrics = [metric for idx, metric in enumerate(metrics) if idx not in (1, 2)] # throw away individual losses
metrics += self.policy.metrics
if self.processor is not None:
metrics += self.processor.metrics
if self.target_model_update >= 1 and self.step % self.target_model_update == 0:
self.update_target_model_hard()
return metrics
@property
def layers(self):
return self.model.layers[:]
@property
def metrics_names(self):
# Throw away individual losses and replace output name since this is hidden from the user.
assert len(self.trainable_model.output_names) == 2
dummy_output_name = self.trainable_model.output_names[1]
model_metrics = [name for idx, name in enumerate(self.trainable_model.metrics_names) if idx not in (1, 2)]
model_metrics = [name.replace(dummy_output_name + '_', '') for name in model_metrics]
names = model_metrics + self.policy.metrics_names[:]
if self.processor is not None:
names += self.processor.metrics_names[:]
return names
@property
def policy(self):
return self.__policy
@policy.setter
def policy(self, policy):
self.__policy = policy
self.__policy._set_agent(self)
@property
def test_policy(self):
return self.__test_policy
@test_policy.setter
def test_policy(self, policy):
self.__test_policy = policy
self.__test_policy._set_agent(self)
class NAFLayer(Layer):
"""Write me
"""
def __init__(self, nb_actions, mode='full', **kwargs):
if mode not in ('full', 'diag'):
raise RuntimeError('Unknown mode "{}" in NAFLayer.'.format(self.mode))
self.nb_actions = nb_actions
self.mode = mode
super(NAFLayer, self).__init__(**kwargs)
def call(self, x, mask=None):
# TODO: validate input shape
assert (len(x) == 3)
L_flat = x[0]
mu = x[1]
a = x[2]
if self.mode == 'full':
# Create L and L^T matrix, which we use to construct the positive-definite matrix P.
L = None
LT = None
if K.backend() == 'theano':
import theano.tensor as T
import theano
def fn(x, L_acc, LT_acc):
x_ = K.zeros((self.nb_actions, self.nb_actions))
x_ = T.set_subtensor(x_[np.tril_indices(self.nb_actions)], x)
diag = K.exp(T.diag(x_)) + K.epsilon()
x_ = T.set_subtensor(x_[np.diag_indices(self.nb_actions)], diag)
return x_, x_.T
outputs_info = [
K.zeros((self.nb_actions, self.nb_actions)),
K.zeros((self.nb_actions, self.nb_actions)),
]
results, _ = theano.scan(fn=fn, sequences=L_flat, outputs_info=outputs_info)
L, LT = results
elif K.backend() == 'tensorflow':
import tensorflow as tf
# Number of elements in a triangular matrix.
nb_elems = (self.nb_actions * self.nb_actions + self.nb_actions) // 2
# Create mask for the diagonal elements in L_flat. This is used to exponentiate
# only the diagonal elements, which is done before gathering.
diag_indeces = [0]
for row in range(1, self.nb_actions):
diag_indeces.append(diag_indeces[-1] + (row + 1))
diag_mask = np.zeros(1 + nb_elems) # +1 for the leading zero
diag_mask[np.array(diag_indeces) + 1] = 1
diag_mask = K.variable(diag_mask)
# Add leading zero element to each element in the L_flat. We use this zero
# element when gathering L_flat into a lower triangular matrix L.
nb_rows = tf.shape(L_flat)[0]
zeros = tf.expand_dims(tf.tile(K.zeros((1,)), [nb_rows]), 1)
try:
# Old TF behavior.
L_flat = tf.concat(1, [zeros, L_flat])
except TypeError:
# New TF behavior
L_flat = tf.concat([zeros, L_flat], 1)
# Create mask that can be used to gather elements from L_flat and put them
# into a lower triangular matrix.
tril_mask = np.zeros((self.nb_actions, self.nb_actions), dtype='int32')
tril_mask[np.tril_indices(self.nb_actions)] = range(1, nb_elems + 1)
# Finally, process each element of the batch.
init = [
K.zeros((self.nb_actions, self.nb_actions)),
K.zeros((self.nb_actions, self.nb_actions)),
]
def fn(a, x):
# Exponentiate everything. This is much easier than only exponentiating
# the diagonal elements, and, usually, the action space is relatively low.
x_ = K.exp(x) + K.epsilon()
# Only keep the diagonal elements.
x_ *= diag_mask
# Add the original, non-diagonal elements.
x_ += x * (1. - diag_mask)
# Finally, gather everything into a lower triangular matrix.
L_ = tf.gather(x_, tril_mask)
return [L_, tf.transpose(L_)]
tmp = tf.scan(fn, L_flat, initializer=init)
if isinstance(tmp, (list, tuple)):
# TensorFlow 0.10 now returns a tuple of tensors.
L, LT = tmp
else:
# Old TensorFlow < 0.10 returns a shared tensor.
L = tmp[:, 0, :, :]
LT = tmp[:, 1, :, :]
else:
raise RuntimeError('Unknown Keras backend "{}".'.format(K.backend()))
assert L is not None
assert LT is not None
P = K.batch_dot(L, LT)
elif self.mode == 'diag':
if K.backend() == 'theano':
import theano.tensor as T
import theano
def fn(x, P_acc):
x_ = K.zeros((self.nb_actions, self.nb_actions))
x_ = T.set_subtensor(x_[np.diag_indices(self.nb_actions)], x)
return x_
outputs_info = [
K.zeros((self.nb_actions, self.nb_actions)),
]
P, _ = theano.scan(fn=fn, sequences=L_flat, outputs_info=outputs_info)
elif K.backend() == 'tensorflow':
import tensorflow as tf
# Create mask that can be used to gather elements from L_flat and put them
# into a diagonal matrix.
diag_mask = np.zeros((self.nb_actions, self.nb_actions), dtype='int32')
diag_mask[np.diag_indices(self.nb_actions)] = range(1, self.nb_actions + 1)
# Add leading zero element to each element in the L_flat. We use this zero
# element when gathering L_flat into a lower triangular matrix L.
nb_rows = tf.shape(L_flat)[0]
zeros = tf.expand_dims(tf.tile(K.zeros((1,)), [nb_rows]), 1)
try:
# Old TF behavior.
L_flat = tf.concat(1, [zeros, L_flat])
except TypeError:
# New TF behavior
L_flat = tf.concat([zeros, L_flat], 1)
# Finally, process each element of the batch.
def fn(a, x):
x_ = tf.gather(x, diag_mask)
return x_
P = tf.scan(fn, L_flat, initializer=K.zeros((self.nb_actions, self.nb_actions)))
else:
raise RuntimeError('Unknown Keras backend "{}".'.format(K.backend()))
assert P is not None
assert K.ndim(P) == 3
# Combine a, mu and P into a scalar (over the batches). What we compute here is
# -.5 * (a - mu)^T * P * (a - mu), where * denotes the dot-product. Unfortunately
# TensorFlow handles vector * P slightly suboptimal, hence we convert the vectors to
# 1xd/dx1 matrices and finally flatten the resulting 1x1 matrix into a scalar. All
# operations happen over the batch size, which is dimension 0.
prod = K.batch_dot(K.expand_dims(a - mu, 1), P)
prod = K.batch_dot(prod, K.expand_dims(a - mu, -1))
A = -.5 * K.batch_flatten(prod)
assert K.ndim(A) == 2
return A
def get_output_shape_for(self, input_shape):
return self.compute_output_shape(input_shape)
def compute_output_shape(self, input_shape):
if len(input_shape) != 3:
raise RuntimeError("Expects 3 inputs: L, mu, a")
for i, shape in enumerate(input_shape):
if len(shape) != 2:
raise RuntimeError("Input {} has {} dimensions but should have 2".format(i, len(shape)))
assert self.mode in ('full','diag')
if self.mode == 'full':
expected_elements = (self.nb_actions * self.nb_actions + self.nb_actions) // 2
elif self.mode == 'diag':
expected_elements = self.nb_actions
else:
expected_elements = None
assert expected_elements is not None
if input_shape[0][1] != expected_elements:
raise RuntimeError("Input 0 (L) should have {} elements but has {}".format(input_shape[0][1]))
if input_shape[1][1] != self.nb_actions:
raise RuntimeError(
"Input 1 (mu) should have {} elements but has {}".format(self.nb_actions, input_shape[1][1]))
if input_shape[2][1] != self.nb_actions:
raise RuntimeError(
"Input 2 (action) should have {} elements but has {}".format(self.nb_actions, input_shape[1][1]))
return input_shape[0][0], 1
class NAFAgent(AbstractDQNAgent):
"""Write me
"""
def __init__(self, V_model, L_model, mu_model, random_process=None,
covariance_mode='full', *args, **kwargs):
super(NAFAgent, self).__init__(*args, **kwargs)
# TODO: Validate (important) input.
# Parameters.
self.random_process = random_process
self.covariance_mode = covariance_mode
# Related objects.
self.V_model = V_model
self.L_model = L_model
self.mu_model = mu_model
# State.
self.reset_states()
def update_target_model_hard(self):
self.target_V_model.set_weights(self.V_model.get_weights())
def load_weights(self, filepath):
self.combined_model.load_weights(filepath) # updates V, L and mu model since the weights are shared
self.update_target_model_hard()
def save_weights(self, filepath, overwrite=False):
self.combined_model.save_weights(filepath, overwrite=overwrite)
def reset_states(self):
if self.random_process is not None:
self.random_process.reset_states()
self.recent_action = None
self.recent_observation = None
if self.compiled:
self.combined_model.reset_states()
self.target_V_model.reset_states()
def compile(self, optimizer, metrics=[]):
metrics += [mean_q] # register default metrics
# Create target V model. We don't need targets for mu or L.
self.target_V_model = clone_model(self.V_model, self.custom_model_objects)
self.target_V_model.compile(optimizer='sgd', loss='mse')
# Build combined model.
a_in = Input(shape=(self.nb_actions,), name='action_input')
if type(self.V_model.input) is list:
observation_shapes = [i._keras_shape[1:] for i in self.V_model.input]
else:
observation_shapes = [self.V_model.input._keras_shape[1:]]
os_in = [Input(shape=shape, name='observation_input_{}'.format(idx)) for idx, shape in enumerate(observation_shapes)]
L_out = self.L_model([a_in] + os_in)
V_out = self.V_model(os_in)
mu_out = self.mu_model(os_in)
A_out = NAFLayer(self.nb_actions, mode=self.covariance_mode)([L_out, mu_out, a_in])
combined_out = Lambda(lambda x: x[0]+x[1], output_shape=lambda x: x[0])([A_out, V_out])
combined = Model(inputs=[a_in] + os_in, outputs=[combined_out])
# Compile combined model.
if self.target_model_update < 1.:
# We use the `AdditionalUpdatesOptimizer` to efficiently soft-update the target model.
updates = get_soft_target_model_updates(self.target_V_model, self.V_model, self.target_model_update)
optimizer = AdditionalUpdatesOptimizer(optimizer, updates)
def clipped_error(y_true, y_pred):
return K.mean(huber_loss(y_true, y_pred, self.delta_clip), axis=-1)
combined.compile(loss=clipped_error, optimizer=optimizer, metrics=metrics)
self.combined_model = combined
self.compiled = True
def select_action(self, state):
batch = self.process_state_batch([state])
action = self.mu_model.predict_on_batch(batch).flatten()
assert action.shape == (self.nb_actions,)
# Apply noise, if a random process is set.
if self.training and self.random_process is not None:
noise = self.random_process.sample()
assert noise.shape == action.shape
action += noise
return action
def forward(self, observation):
# Select an action.
state = self.memory.get_recent_state(observation)
action = self.select_action(state)
# Book-keeping.
self.recent_observation = observation
self.recent_action = action
return action
def backward(self, reward, terminal):
# Store most recent experience in memory.
if self.step % self.memory_interval == 0:
self.memory.append(self.recent_observation, self.recent_action, reward, terminal,
training=self.training)
metrics = [np.nan for _ in self.metrics_names]
if not self.training:
# We're done here. No need to update the experience memory since we only use the working
# memory to obtain the state over the most recent observations.
return metrics
# Train the network on a single stochastic batch.
if self.step > self.nb_steps_warmup and self.step % self.train_interval == 0:
experiences = self.memory.sample(self.batch_size)
assert len(experiences) == self.batch_size
# Start by extracting the necessary parameters (we use a vectorized implementation).
state0_batch = []
reward_batch = []
action_batch = []
terminal1_batch = []
state1_batch = []
for e in experiences:
state0_batch.append(e.state0)
state1_batch.append(e.state1)
reward_batch.append(e.reward)
action_batch.append(e.action)
terminal1_batch.append(0. if e.terminal1 else 1.)
# Prepare and validate parameters.
state0_batch = self.process_state_batch(state0_batch)
state1_batch = self.process_state_batch(state1_batch)
terminal1_batch = np.array(terminal1_batch)
reward_batch = np.array(reward_batch)
action_batch = np.array(action_batch)
assert reward_batch.shape == (self.batch_size,)
assert terminal1_batch.shape == reward_batch.shape
assert action_batch.shape == (self.batch_size, self.nb_actions)
# Compute Q values for mini-batch update.
q_batch = self.target_V_model.predict_on_batch(state1_batch).flatten()
assert q_batch.shape == (self.batch_size,)
# Compute discounted reward.
discounted_reward_batch = self.gamma * q_batch
# Set discounted reward to zero for all states that were terminal.
discounted_reward_batch *= terminal1_batch
assert discounted_reward_batch.shape == reward_batch.shape
Rs = reward_batch + discounted_reward_batch
assert Rs.shape == (self.batch_size,)
# Finally, perform a single update on the entire batch.
if len(self.combined_model.input) == 2:
metrics = self.combined_model.train_on_batch([action_batch, state0_batch], Rs)
else:
metrics = self.combined_model.train_on_batch([action_batch] + state0_batch, Rs)
if self.processor is not None:
metrics += self.processor.metrics
if self.target_model_update >= 1 and self.step % self.target_model_update == 0:
self.update_target_model_hard()
return metrics
@property
def layers(self):
return self.combined_model.layers[:]
def get_config(self):
config = super(NAFAgent, self).get_config()
config['V_model'] = get_object_config(self.V_model)
config['mu_model'] = get_object_config(self.mu_model)
config['L_model'] = get_object_config(self.L_model)
if self.compiled:
config['target_V_model'] = get_object_config(self.target_V_model)
return config
@property
def metrics_names(self):
names = self.combined_model.metrics_names[:]
if self.processor is not None:
names += self.processor.metrics_names[:]
return names
# Aliases
ContinuousDQNAgent = NAFAgent
| 33,631 | 44.204301 | 250 |
py
|
rl-perturbed-reward
|
rl-perturbed-reward-master/gym-control/rl/agents/__init__.py
|
from __future__ import absolute_import
from .dqn import DQNAgent, NAFAgent, ContinuousDQNAgent
from .ddpg import DDPGAgent
from .cem import CEMAgent
from .sarsa import SarsaAgent, SARSAAgent
| 191 | 31 | 55 |
py
|
rl-perturbed-reward
|
rl-perturbed-reward-master/gym-control/rl/agents/cem.py
|
from __future__ import division
from collections import deque
from copy import deepcopy
import numpy as np
import keras.backend as K
from keras.models import Model
from rl.core import Agent
from rl.util import *
class CEMAgent(Agent):
"""Write me
"""
def __init__(self, model, nb_actions, memory, batch_size=50, nb_steps_warmup=1000,
train_interval=50, elite_frac=0.05, memory_interval=1, theta_init=None,
noise_decay_const=0.0, noise_ampl=0.0, **kwargs):
super(CEMAgent, self).__init__(**kwargs)
# Parameters.
self.nb_actions = nb_actions
self.batch_size = batch_size
self.elite_frac = elite_frac
self.num_best = int(self.batch_size * self.elite_frac)
self.nb_steps_warmup = nb_steps_warmup
self.train_interval = train_interval
self.memory_interval = memory_interval
# if using noisy CEM, the minimum standard deviation will be ampl * exp (- decay_const * step )
self.noise_decay_const = noise_decay_const
self.noise_ampl = noise_ampl
# default initial mean & cov, override this by passing an theta_init argument
self.init_mean = 0.0
self.init_stdev = 1.0
# Related objects.
self.memory = memory
self.model = model
self.shapes = [w.shape for w in model.get_weights()]
self.sizes = [w.size for w in model.get_weights()]
self.num_weights = sum(self.sizes)
# store the best result seen during training, as a tuple (reward, flat_weights)
self.best_seen = (-np.inf, np.zeros(self.num_weights))
self.theta = np.zeros(self.num_weights*2)
self.update_theta(theta_init)
# State.
self.episode = 0
self.compiled = False
self.reset_states()
def compile(self):
self.model.compile(optimizer='sgd', loss='mse')
self.compiled = True
def load_weights(self, filepath):
self.model.load_weights(filepath)
def save_weights(self, filepath, overwrite=False):
self.model.save_weights(filepath, overwrite=overwrite)
def get_weights_flat(self,weights):
weights_flat = np.zeros(self.num_weights)
pos = 0
for i_layer, size in enumerate(self.sizes):
weights_flat[pos:pos+size] = weights[i_layer].flatten()
pos += size
return weights_flat
def get_weights_list(self,weights_flat):
weights = []
pos = 0
for i_layer, size in enumerate(self.sizes):
arr = weights_flat[pos:pos+size].reshape(self.shapes[i_layer])
weights.append(arr)
pos += size
return weights
def reset_states(self):
self.recent_observation = None
self.recent_action = None
def select_action(self, state, stochastic=False):
batch = np.array([state])
if self.processor is not None:
batch = self.processor.process_state_batch(batch)
action = self.model.predict_on_batch(batch).flatten()
if stochastic or self.training:
return np.random.choice(np.arange(self.nb_actions), p=np.exp(action) / np.sum(np.exp(action)))
return np.argmax(action)
def update_theta(self,theta):
if (theta is not None):
assert theta.shape == self.theta.shape, "Invalid theta, shape is {0} but should be {1}".format(theta.shape,self.theta.shape)
assert (not np.isnan(theta).any()), "Invalid theta, NaN encountered"
assert (theta[self.num_weights:] >= 0.).all(), "Invalid theta, standard deviations must be nonnegative"
self.theta = theta
else:
means = np.ones(self.num_weights) * self.init_mean
stdevs = np.ones(self.num_weights) * self.init_stdev
self.theta = np.hstack((means,stdevs))
def choose_weights(self):
mean = self.theta[:self.num_weights]
std = self.theta[self.num_weights:]
weights_flat = std * np.random.randn(self.num_weights) + mean
sampled_weights = self.get_weights_list(weights_flat)
self.model.set_weights(sampled_weights)
def forward(self, observation):
# Select an action.
state = self.memory.get_recent_state(observation)
action = self.select_action(state)
# Book-keeping.
self.recent_observation = observation
self.recent_action = action
return action
@property
def layers(self):
return self.model.layers[:]
def backward(self, reward, terminal):
# Store most recent experience in memory.
if self.step % self.memory_interval == 0:
self.memory.append(self.recent_observation, self.recent_action, reward, terminal,
training=self.training)
metrics = [np.nan for _ in self.metrics_names]
if not self.training:
# We're done here. No need to update the experience memory since we only use the working
# memory to obtain the state over the most recent observations.
return metrics
if terminal:
params = self.get_weights_flat(self.model.get_weights())
self.memory.finalize_episode(params)
if self.step > self.nb_steps_warmup and self.episode % self.train_interval == 0:
params, reward_totals = self.memory.sample(self.batch_size)
best_idx = np.argsort(np.array(reward_totals))[-self.num_best:]
best = np.vstack([params[i] for i in best_idx])
if reward_totals[best_idx[-1]] > self.best_seen[0]:
self.best_seen = (reward_totals[best_idx[-1]], params[best_idx[-1]])
metrics = [np.mean(np.array(reward_totals)[best_idx])]
if self.processor is not None:
metrics += self.processor.metrics
min_std = self.noise_ampl * np.exp(-self.step * self.noise_decay_const)
mean = np.mean(best, axis=0)
std = np.std(best, axis=0) + min_std
new_theta = np.hstack((mean, std))
self.update_theta(new_theta)
self.choose_weights()
self.episode += 1
return metrics
def _on_train_end(self):
self.model.set_weights(self.get_weights_list(self.best_seen[1]))
@property
def metrics_names(self):
names = ['mean_best_reward']
if self.processor is not None:
names += self.processor.metrics_names[:]
return names
| 6,679 | 36.740113 | 136 |
py
|
rl-perturbed-reward
|
rl-perturbed-reward-master/gym-atari/baselines/setup.py
|
from setuptools import setup, find_packages
import sys
if sys.version_info.major != 3:
print('This Python is only compatible with Python 3, but you are running '
'Python {}. The installation will likely fail.'.format(sys.version_info.major))
setup(name='baselines',
packages=[package for package in find_packages()
if package.startswith('baselines')],
install_requires=[
'gym[atari,classic_control]',
'scipy',
'tqdm',
'joblib',
'dill',
'progressbar2',
'mpi4py',
'cloudpickle',
'tensorflow-gpu==1.10.0',
'click',
'opencv-python',
],
extras_require={
'test': [
'filelock',
'pytest'
]
},
description='OpenAI baselines: high quality implementations of reinforcement learning algorithms',
author='OpenAI',
url='https://github.com/openai/baselines',
author_email='[email protected]',
version='0.1.5')
| 1,035 | 27.777778 | 104 |
py
|
rl-perturbed-reward
|
rl-perturbed-reward-master/gym-atari/baselines/baselines/results_plotter.py
|
import numpy as np
import matplotlib
matplotlib.use('TkAgg') # Can change to 'Agg' for non-interactive mode
import matplotlib.pyplot as plt
plt.rcParams['svg.fonttype'] = 'none'
from baselines.bench.monitor import load_results
X_TIMESTEPS = 'timesteps'
X_EPISODES = 'episodes'
X_WALLTIME = 'walltime_hrs'
POSSIBLE_X_AXES = [X_TIMESTEPS, X_EPISODES, X_WALLTIME]
EPISODES_WINDOW = 100
COLORS = ['blue', 'green', 'red', 'cyan', 'magenta', 'yellow', 'black', 'purple', 'pink',
'brown', 'orange', 'teal', 'coral', 'lightblue', 'lime', 'lavender', 'turquoise',
'darkgreen', 'tan', 'salmon', 'gold', 'lightpurple', 'darkred', 'darkblue']
def rolling_window(a, window):
shape = a.shape[:-1] + (a.shape[-1] - window + 1, window)
strides = a.strides + (a.strides[-1],)
return np.lib.stride_tricks.as_strided(a, shape=shape, strides=strides)
def window_func(x, y, window, func):
yw = rolling_window(y, window)
yw_func = func(yw, axis=-1)
return x[window-1:], yw_func
def ts2xy(ts, xaxis):
if xaxis == X_TIMESTEPS:
x = np.cumsum(ts.l.values)
y = ts.r.values
elif xaxis == X_EPISODES:
x = np.arange(len(ts))
y = ts.r.values
elif xaxis == X_WALLTIME:
x = ts.t.values / 3600.
y = ts.r.values
else:
raise NotImplementedError
return x, y
def plot_curves(xy_list, xaxis, title):
plt.figure(figsize=(8,2))
maxx = max(xy[0][-1] for xy in xy_list)
minx = 0
for (i, (x, y)) in enumerate(xy_list):
color = COLORS[i]
plt.scatter(x, y, s=2)
x, y_mean = window_func(x, y, EPISODES_WINDOW, np.mean) #So returns average of last EPISODE_WINDOW episodes
plt.plot(x, y_mean, color=color)
plt.xlim(minx, maxx)
plt.title(title)
plt.xlabel(xaxis)
plt.ylabel("Episode Rewards")
plt.tight_layout()
def plot_results(dirs, num_timesteps, xaxis, task_name):
tslist = []
for dir in dirs:
ts = load_results(dir)
ts = ts[ts.l.cumsum() <= num_timesteps]
tslist.append(ts)
xy_list = [ts2xy(ts, xaxis) for ts in tslist]
plot_curves(xy_list, xaxis, task_name)
# Example usage in jupyter-notebook
# from baselines import log_viewer
# %matplotlib inline
# log_viewer.plot_results(["./log"], 10e6, log_viewer.X_TIMESTEPS, "Breakout")
# Here ./log is a directory containing the monitor.csv files
def main():
import argparse
import os
import glob
parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('--log_dir', help='Path of log directory', default='logs')
parser.add_argument('--num_timesteps', type=int, default=int(10e6))
parser.add_argument('--xaxis', help = 'Varible on X-axis', default = X_TIMESTEPS)
parser.add_argument('--task_name', help = 'Title of plot', default = 'PongNoFrameskip-v4')
parser.add_argument('--weight', help = 'Weight of noise', default = 0.2, type=float)
parser.add_argument('--save_dir', help = 'Didrectory of output plots', default = 'results')
args = parser.parse_args()
if not os.path.exists(args.save_dir):
os.makedirs(args.save_dir)
dirs = glob.glob(os.path.join(args.log_dir, "*"))
sorted(dirs)
cnt = 0
for directory in dirs:
print directory
with open(os.path.join(directory, "setting.txt"), "r") as f:
line = f.readlines()[-1].rstrip()
print (line.split())
normal = line.split()[1][0:-1].split(',')[0]
weight = float(line.split()[3][0:-1].split(',')[0])
surrogate = line.split()[5][0:-1].split(',')[0]
noise_type = line.split()[7][0:-1].split(')')[0]
print (normal, weight, surrogate, noise_type)
if normal == 'True':
title = args.task_name + " (normal)"
elif surrogate == 'False':
title = args.task_name + " (noisy-" + str(weight) + "-" + noise_type + ")"
else:
title = args.task_name + " (surrogate-" + str(weight) + "-" + noise_type + ")"
print (weight, args.weight)
if weight == args.weight:
print (args.weight)
plot_results([directory], args.num_timesteps, args.xaxis, title)
plt.savefig(os.path.join(args.save_dir, title + ".png"))
cnt += 1
print cnt
if __name__ == '__main__':
main()
| 4,381 | 35.516667 | 115 |
py
|
rl-perturbed-reward
|
rl-perturbed-reward-master/gym-atari/baselines/baselines/logger.py
|
import os
import sys
import shutil
import os.path as osp
import json
import time
import datetime
import tempfile
from collections import defaultdict
DEBUG = 10
INFO = 20
WARN = 30
ERROR = 40
DISABLED = 50
class KVWriter(object):
def writekvs(self, kvs):
raise NotImplementedError
class SeqWriter(object):
def writeseq(self, seq):
raise NotImplementedError
class HumanOutputFormat(KVWriter, SeqWriter):
def __init__(self, filename_or_file):
if isinstance(filename_or_file, str):
self.file = open(filename_or_file, 'wt')
self.own_file = True
else:
assert hasattr(filename_or_file, 'read'), 'expected file or str, got %s'%filename_or_file
self.file = filename_or_file
self.own_file = False
def writekvs(self, kvs):
# Create strings for printing
key2str = {}
for (key, val) in sorted(kvs.items()):
if isinstance(val, float):
valstr = '%-8.3g' % (val,)
else:
valstr = str(val)
key2str[self._truncate(key)] = self._truncate(valstr)
# Find max widths
if len(key2str) == 0:
print('WARNING: tried to write empty key-value dict')
return
else:
keywidth = max(map(len, key2str.keys()))
valwidth = max(map(len, key2str.values()))
# Write out the data
dashes = '-' * (keywidth + valwidth + 7)
lines = [dashes]
for (key, val) in sorted(key2str.items()):
lines.append('| %s%s | %s%s |' % (
key,
' ' * (keywidth - len(key)),
val,
' ' * (valwidth - len(val)),
))
lines.append(dashes)
self.file.write('\n'.join(lines) + '\n')
# Flush the output to the file
self.file.flush()
def _truncate(self, s):
return s[:20] + '...' if len(s) > 23 else s
def writeseq(self, seq):
seq = list(seq)
for (i, elem) in enumerate(seq):
self.file.write(elem)
if i < len(seq) - 1: # add space unless this is the last one
self.file.write(' ')
self.file.write('\n')
self.file.flush()
def close(self):
if self.own_file:
self.file.close()
class JSONOutputFormat(KVWriter):
def __init__(self, filename):
self.file = open(filename, 'wt')
def writekvs(self, kvs):
for k, v in sorted(kvs.items()):
if hasattr(v, 'dtype'):
v = v.tolist()
kvs[k] = float(v)
self.file.write(json.dumps(kvs) + '\n')
self.file.flush()
def close(self):
self.file.close()
class CSVOutputFormat(KVWriter):
def __init__(self, filename):
self.file = open(filename, 'w+t')
self.keys = []
self.sep = ','
def writekvs(self, kvs):
# Add our current row to the history
extra_keys = kvs.keys() - self.keys
if extra_keys:
self.keys.extend(extra_keys)
self.file.seek(0)
lines = self.file.readlines()
self.file.seek(0)
for (i, k) in enumerate(self.keys):
if i > 0:
self.file.write(',')
self.file.write(k)
self.file.write('\n')
for line in lines[1:]:
self.file.write(line[:-1])
self.file.write(self.sep * len(extra_keys))
self.file.write('\n')
for (i, k) in enumerate(self.keys):
if i > 0:
self.file.write(',')
v = kvs.get(k)
if v is not None:
self.file.write(str(v))
self.file.write('\n')
self.file.flush()
def close(self):
self.file.close()
class TensorBoardOutputFormat(KVWriter):
"""
Dumps key/value pairs into TensorBoard's numeric format.
"""
def __init__(self, dir):
os.makedirs(dir, exist_ok=True)
self.dir = dir
self.step = 1
prefix = 'events'
path = osp.join(osp.abspath(dir), prefix)
import tensorflow as tf
from tensorflow.python import pywrap_tensorflow
from tensorflow.core.util import event_pb2
from tensorflow.python.util import compat
self.tf = tf
self.event_pb2 = event_pb2
self.pywrap_tensorflow = pywrap_tensorflow
self.writer = pywrap_tensorflow.EventsWriter(compat.as_bytes(path))
def writekvs(self, kvs):
def summary_val(k, v):
kwargs = {'tag': k, 'simple_value': float(v)}
return self.tf.Summary.Value(**kwargs)
summary = self.tf.Summary(value=[summary_val(k, v) for k, v in kvs.items()])
event = self.event_pb2.Event(wall_time=time.time(), summary=summary)
event.step = self.step # is there any reason why you'd want to specify the step?
self.writer.WriteEvent(event)
self.writer.Flush()
self.step += 1
def close(self):
if self.writer:
self.writer.Close()
self.writer = None
def make_output_format(format, ev_dir, log_suffix=''):
os.makedirs(ev_dir, exist_ok=True)
if format == 'stdout':
return HumanOutputFormat(sys.stdout)
elif format == 'log':
return HumanOutputFormat(osp.join(ev_dir, 'log%s.txt' % log_suffix))
elif format == 'json':
return JSONOutputFormat(osp.join(ev_dir, 'progress%s.json' % log_suffix))
elif format == 'csv':
return CSVOutputFormat(osp.join(ev_dir, 'progress%s.csv' % log_suffix))
elif format == 'tensorboard':
return TensorBoardOutputFormat(osp.join(ev_dir, 'tb%s' % log_suffix))
else:
raise ValueError('Unknown format specified: %s' % (format,))
# ================================================================
# API
# ================================================================
def logkv(key, val):
"""
Log a value of some diagnostic
Call this once for each diagnostic quantity, each iteration
If called many times, last value will be used.
"""
Logger.CURRENT.logkv(key, val)
def logkv_mean(key, val):
"""
The same as logkv(), but if called many times, values averaged.
"""
Logger.CURRENT.logkv_mean(key, val)
def logkvs(d):
"""
Log a dictionary of key-value pairs
"""
for (k, v) in d.items():
logkv(k, v)
def dumpkvs():
"""
Write all of the diagnostics from the current iteration
level: int. (see logger.py docs) If the global logger level is higher than
the level argument here, don't print to stdout.
"""
Logger.CURRENT.dumpkvs()
def getkvs():
return Logger.CURRENT.name2val
def log(*args, level=INFO):
"""
Write the sequence of args, with no separators, to the console and output files (if you've configured an output file).
"""
Logger.CURRENT.log(*args, level=level)
def debug(*args):
log(*args, level=DEBUG)
def info(*args):
log(*args, level=INFO)
def warn(*args):
log(*args, level=WARN)
def error(*args):
log(*args, level=ERROR)
def set_level(level):
"""
Set logging threshold on current logger.
"""
Logger.CURRENT.set_level(level)
def get_dir():
"""
Get directory that log files are being written to.
will be None if there is no output directory (i.e., if you didn't call start)
"""
return Logger.CURRENT.get_dir()
record_tabular = logkv
dump_tabular = dumpkvs
class ProfileKV:
"""
Usage:
with logger.ProfileKV("interesting_scope"):
code
"""
def __init__(self, n):
self.n = "wait_" + n
def __enter__(self):
self.t1 = time.time()
def __exit__(self ,type, value, traceback):
Logger.CURRENT.name2val[self.n] += time.time() - self.t1
def profile(n):
"""
Usage:
@profile("my_func")
def my_func(): code
"""
def decorator_with_name(func):
def func_wrapper(*args, **kwargs):
with ProfileKV(n):
return func(*args, **kwargs)
return func_wrapper
return decorator_with_name
# ================================================================
# Backend
# ================================================================
class Logger(object):
DEFAULT = None # A logger with no output files. (See right below class definition)
# So that you can still log to the terminal without setting up any output files
CURRENT = None # Current logger being used by the free functions above
def __init__(self, dir, output_formats):
self.name2val = defaultdict(float) # values this iteration
self.name2cnt = defaultdict(int)
self.level = INFO
self.dir = dir
self.output_formats = output_formats
# Logging API, forwarded
# ----------------------------------------
def logkv(self, key, val):
self.name2val[key] = val
def logkv_mean(self, key, val):
if val is None:
self.name2val[key] = None
return
oldval, cnt = self.name2val[key], self.name2cnt[key]
self.name2val[key] = oldval*cnt/(cnt+1) + val/(cnt+1)
self.name2cnt[key] = cnt + 1
def dumpkvs(self):
if self.level == DISABLED: return
for fmt in self.output_formats:
if isinstance(fmt, KVWriter):
fmt.writekvs(self.name2val)
self.name2val.clear()
self.name2cnt.clear()
def log(self, *args, level=INFO):
if self.level <= level:
self._do_log(args)
# Configuration
# ----------------------------------------
def set_level(self, level):
self.level = level
def get_dir(self):
return self.dir
def close(self):
for fmt in self.output_formats:
fmt.close()
# Misc
# ----------------------------------------
def _do_log(self, args):
for fmt in self.output_formats:
if isinstance(fmt, SeqWriter):
fmt.writeseq(map(str, args))
Logger.DEFAULT = Logger.CURRENT = Logger(dir=None, output_formats=[HumanOutputFormat(sys.stdout)])
def configure(dir=None, format_strs=None, env_name="PongNoFrameskip-v4", normal=True):
# if dir is None:
# dir = os.getenv('OPENAI_LOGDIR')
if dir is None:
# dir = osp.join("tempfile.gettempdir()",
if normal:
dir = osp.join("logs-normal", env_name.split("No")[0].lower(),
datetime.datetime.now().strftime("openai-%Y-%m-%d-%H-%M-%S-%f"))
else:
dir = osp.join("logs-" + env_name.split("No")[0].lower(),
datetime.datetime.now().strftime("openai-%Y-%m-%d-%H-%M-%S-%f"))
assert isinstance(dir, str)
os.makedirs(dir, exist_ok=True)
log_suffix = ''
from mpi4py import MPI
rank = MPI.COMM_WORLD.Get_rank()
if rank > 0:
log_suffix = "-rank%03i" % rank
if format_strs is None:
if rank == 0:
format_strs = os.getenv('OPENAI_LOG_FORMAT', 'stdout,log,csv').split(',')
else:
format_strs = os.getenv('OPENAI_LOG_FORMAT_MPI', 'log').split(',')
format_strs = filter(None, format_strs)
output_formats = [make_output_format(f, dir, log_suffix) for f in format_strs]
Logger.CURRENT = Logger(dir=dir, output_formats=output_formats)
log('Logging to %s'%dir)
def reset():
if Logger.CURRENT is not Logger.DEFAULT:
Logger.CURRENT.close()
Logger.CURRENT = Logger.DEFAULT
log('Reset logger')
class scoped_configure(object):
def __init__(self, dir=None, format_strs=None):
self.dir = dir
self.format_strs = format_strs
self.prevlogger = None
def __enter__(self):
self.prevlogger = Logger.CURRENT
configure(dir=self.dir, format_strs=self.format_strs)
def __exit__(self, *args):
Logger.CURRENT.close()
Logger.CURRENT = self.prevlogger
# ================================================================
def _demo():
info("hi")
debug("shouldn't appear")
set_level(DEBUG)
debug("should appear")
dir = "/tmp/testlogging"
if os.path.exists(dir):
shutil.rmtree(dir)
configure(dir=dir)
logkv("a", 3)
logkv("b", 2.5)
dumpkvs()
logkv("b", -2.5)
logkv("a", 5.5)
dumpkvs()
info("^^^ should see a = 5.5")
logkv_mean("b", -22.5)
logkv_mean("b", -44.4)
logkv("a", 5.5)
dumpkvs()
info("^^^ should see b = 33.3")
logkv("b", -2.5)
dumpkvs()
logkv("a", "longasslongasslongasslongasslongasslongassvalue")
dumpkvs()
# ================================================================
# Readers
# ================================================================
def read_json(fname):
import pandas
ds = []
with open(fname, 'rt') as fh:
for line in fh:
ds.append(json.loads(line))
return pandas.DataFrame(ds)
def read_csv(fname):
import pandas
return pandas.read_csv(fname, index_col=None, comment='#')
def read_tb(path):
"""
path : a tensorboard file OR a directory, where we will find all TB files
of the form events.*
"""
import pandas
import numpy as np
from glob import glob
from collections import defaultdict
import tensorflow as tf
if osp.isdir(path):
fnames = glob(osp.join(path, "events.*"))
elif osp.basename(path).startswith("events."):
fnames = [path]
else:
raise NotImplementedError("Expected tensorboard file or directory containing them. Got %s"%path)
tag2pairs = defaultdict(list)
maxstep = 0
for fname in fnames:
for summary in tf.train.summary_iterator(fname):
if summary.step > 0:
for v in summary.summary.value:
pair = (summary.step, v.simple_value)
tag2pairs[v.tag].append(pair)
maxstep = max(summary.step, maxstep)
data = np.empty((maxstep, len(tag2pairs)))
data[:] = np.nan
tags = sorted(tag2pairs.keys())
for (colidx,tag) in enumerate(tags):
pairs = tag2pairs[tag]
for (step, value) in pairs:
data[step-1, colidx] = value
return pandas.DataFrame(data, columns=tags)
if __name__ == "__main__":
_demo()
| 14,390 | 28.918919 | 122 |
py
|
rl-perturbed-reward
|
rl-perturbed-reward-master/gym-atari/baselines/baselines/results_compare.py
|
import numpy as np
import matplotlib
matplotlib.use('TkAgg') # Can change to 'Agg' for non-interactive mode
import matplotlib.pyplot as plt
plt.rcParams['svg.fonttype'] = 'none'
from baselines.bench.monitor import load_results
X_TIMESTEPS = 'timesteps'
X_EPISODES = 'episodes'
X_WALLTIME = 'walltime_hrs'
POSSIBLE_X_AXES = [X_TIMESTEPS, X_EPISODES, X_WALLTIME]
EPISODES_WINDOW = 100
COLORS = ['blue', 'green', 'red', 'cyan', 'magenta', 'yellow', 'black', 'purple', 'pink',
'brown', 'orange', 'teal', 'coral', 'lightblue', 'lime', 'lavender', 'turquoise',
'darkgreen', 'tan', 'salmon', 'gold', 'lightpurple', 'darkred', 'darkblue']
def rolling_window(a, window):
shape = a.shape[:-1] + (a.shape[-1] - window + 1, window)
strides = a.strides + (a.strides[-1],)
return np.lib.stride_tricks.as_strided(a, shape=shape, strides=strides)
def window_func(x, y, window, func):
yw = rolling_window(y, window)
yw_func = func(yw, axis=-1)
return x[window-1:], yw_func
def ts2xy(ts, xaxis):
if xaxis == X_TIMESTEPS:
x = np.cumsum(ts.l.values)
y = ts.r.values
elif xaxis == X_EPISODES:
x = np.arange(len(ts))
y = ts.r.values
elif xaxis == X_WALLTIME:
x = ts.t.values / 3600.
y = ts.r.values
else:
raise NotImplementedError
return x, y
def plot_curves(xy_list, xaxis, title):
plt.figure(figsize=(8,2))
maxx = max(xy[0][-1] for xy in xy_list)
minx = 0
for (i, (x, y)) in enumerate(xy_list):
color = COLORS[i]
plt.scatter(x, y, s=2)
x, y_mean = window_func(x, y, EPISODES_WINDOW, np.mean) #So returns average of last EPISODE_WINDOW episodes
plt.plot(x, y_mean, color=color)
plt.xlim(minx, maxx)
plt.title(title)
plt.xlabel(xaxis)
plt.ylabel("Episode Rewards")
plt.tight_layout()
def plot_curves_fancy(xy_list, xaxis, title):
import seaborn as sns
sns.set()
sns.set_color_codes()
plt.figure()
# maxx = max(xy[0][-1] for xy in xy_list)
# minx = 0
for (i, (x, y)) in enumerate(xy_list):
plt.plot(x, y, alpha=0.4, linewidth=0.8, c=sns.color_palette()[i])
x, y_mean = window_func(x, y, EPISODES_WINDOW, np.mean) #So returns average of last EPISODE_WINDOW episodes
plt.plot(x, y_mean, linewidth=0.8, c=sns.color_palette()[i])
# plt.xlim(minx, maxx)
plt.title(title)
plt.xlabel(xaxis)
plt.ylabel("Episode Rewards")
plt.tight_layout()
def plot_results(dirs, num_timesteps, xaxis, task_name):
tslist = []
for dir in dirs:
ts = load_results(dir)
ts = ts[ts.l.cumsum() <= num_timesteps]
tslist.append(ts)
xy_list = [ts2xy(ts, xaxis) for ts in tslist]
plot_curves_fancy(xy_list, xaxis, task_name)
def plot_results_compare(dirs, num_timesteps, xaxis, title):
import seaborn as sns
sns.set()
sns.set_color_codes()
ts = load_results(dirs["noisy"])
ts = ts[ts.l.cumsum() <= num_timesteps]
xy_list = ts2xy(ts, xaxis)
x = xy_list[0]
y = xy_list[1]
plt.plot(x, y, alpha=0.4, linewidth=0.8, c=sns.color_palette()[1])
x, y_mean = window_func(x, y, EPISODES_WINDOW, np.mean) #So returns average of last EPISODE_WINDOW episodes
plt.plot(x, y_mean, linewidth=0.8, c=sns.color_palette()[1], label='noisy')
ts = load_results(dirs["surrogate"])
ts = ts[ts.l.cumsum() <= num_timesteps]
xy_list = ts2xy(ts, xaxis)
x = xy_list[0]
y = xy_list[1]
plt.plot(x, y, alpha=0.4, linewidth=0.8, c=sns.color_palette()[2])
x, y_mean = window_func(x, y, EPISODES_WINDOW, np.mean) #So returns average of last EPISODE_WINDOW episodes
plt.plot(x, y_mean, linewidth=0.8, c=sns.color_palette()[2], label='surrogate')
plt.title(title)
plt.xlabel(xaxis)
plt.ylabel("Episode Rewards")
plt.legend()
plt.tight_layout()
# Example usage in jupyter-notebook
# from baselines import log_viewer
# %matplotlib inline
# log_viewer.plot_results(["./log"], 10e6, log_viewer.X_TIMESTEPS, "Breakout")
# Here ./log is a directory containing the monitor.csv files
def main():
import argparse
import os
import glob
parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('--log_dir', help='Path of log directory', default='logs')
parser.add_argument('--num_timesteps', type=int, default=int(10e6))
parser.add_argument('--xaxis', help='Varible on X-axis', default = X_TIMESTEPS)
parser.add_argument('--task_name', help='Name of atari game', default='PongNoFrameskip-v4')
parser.add_argument('--weight', type=float, help='Weight of noise', default=0.2)
parser.add_argument('--save_dir', help = 'Directory of output plots', default='results')
parser.add_argument('--noise_type', type=str, help='noise type (norm_one/norm_all/anti_iden)',
default='norm_one')
args = parser.parse_args()
if not os.path.exists(args.save_dir):
os.makedirs(args.save_dir)
dirs = glob.glob(os.path.join(args.log_dir, "openai*"))
sorted(dirs)
cnt = 0
input_dirs = {}
for directory in dirs:
# print directory
with open(os.path.join(directory, "setting.txt"), "r") as f:
line = f.readlines()[-1].rstrip()
# print (line.split())
normal = line.split()[1][0:-1].split(',')[0]
weight = float(line.split()[3][0:-1].split(',')[0])
surrogate = line.split()[5][0:-1].split(',')[0]
noise_type = line.split()[7][0:-1].split(')')[0]
# print (normal, weight, surrogate, noise_type)
if weight != args.weight or noise_type != args.noise_type or normal == 'True':
continue
print (directory)
print (normal, weight, surrogate, noise_type)
if surrogate == 'False':
title = args.task_name + " (noisy-" + str(weight) + "-" + noise_type + ")"
input_dirs['noisy'] = directory
else:
title = args.task_name + " (surrogate-" + str(weight) + "-" + noise_type + ")"
input_dirs['surrogate'] = directory
# plot_results([directory], args.num_timesteps, args.xaxis, title)
# plt.savefig(os.path.join(args.save_dir, title + ".png"))
cnt += 1
print str(cnt) + " directories found"
title = args.task_name + "(" + args.noise_type + "-" + str(args.weight) + ")"
plot_results_compare(input_dirs, args.num_timesteps, args.xaxis, title)
plt.savefig(os.path.join(args.save_dir, title + ".png"))
plt.show()
if __name__ == '__main__':
main()
| 6,613 | 35.340659 | 115 |
py
|
rl-perturbed-reward
|
rl-perturbed-reward-master/gym-atari/baselines/baselines/run.py
|
import sys
import multiprocessing
import os
import os.path as osp
import gym
from collections import defaultdict
import tensorflow as tf
from baselines.common.vec_env.vec_frame_stack import VecFrameStack
from baselines.common.cmd_util import common_arg_parser, parse_unknown_args, make_mujoco_env, make_atari_env
from baselines.common.tf_util import save_state, load_state, get_session
from baselines import bench, logger
from importlib import import_module
from baselines.common.vec_env.vec_normalize import VecNormalize
from baselines.common.vec_env.dummy_vec_env import DummyVecEnv
from baselines.common.vec_env.subproc_vec_env import SubprocVecEnv
from baselines.common import atari_wrappers, retro_wrappers
try:
from mpi4py import MPI
except ImportError:
MPI = None
_game_envs = defaultdict(set)
for env in gym.envs.registry.all():
# solve this with regexes
env_type = env._entry_point.split(':')[0].split('.')[-1]
_game_envs[env_type].add(env.id)
# reading benchmark names directly from retro requires
# importing retro here, and for some reason that crashes tensorflow
# in ubuntu
_game_envs['retro'] = set([
'BubbleBobble-Nes',
'SuperMarioBros-Nes',
'TwinBee3PokoPokoDaimaou-Nes',
'SpaceHarrier-Nes',
'SonicTheHedgehog-Genesis',
'Vectorman-Genesis',
'FinalFight-Snes',
'SpaceInvaders-Snes',
])
def train(args, extra_args, save_dir):
env_type, env_id = get_env_type(args.env)
total_timesteps = int(args.num_timesteps)
seed = args.seed
weight = args.weight
normal = args.normal
surrogate = args.surrogate
noise_type = args.noise_type
learn = get_learn_function(args.alg)
alg_kwargs = get_learn_function_defaults(args.alg, env_type)
alg_kwargs.update(extra_args)
env_name = args.env
env = build_env(args)
if args.network:
alg_kwargs['network'] = args.network
else:
if alg_kwargs.get('network') is None:
alg_kwargs['network'] = get_default_network(env_type)
reward_setting = '{\'normal\': ' + str(normal) + ', \'weight\': ' + str(weight) + \
', \'surrogate\': ' + str(surrogate) + ', \'noise_type\': ' + noise_type + '}'
setting = 'Training {} on {}:{} with arguments \n{} \n{}'.format(args.alg, env_type, env_id, alg_kwargs, reward_setting)
with open(os.path.join(save_dir, "setting.txt"), "w") as f:
print (setting)
f.write(setting)
f.write("\n")
model = learn(
env=env,
seed=seed,
total_timesteps=total_timesteps,
weight=weight,
normal=normal,
surrogate=surrogate,
noise_type=noise_type,
env_name=env_name,
**alg_kwargs
)
return model, env
def build_env(args, render=False):
ncpu = multiprocessing.cpu_count()
if sys.platform == 'darwin': ncpu //= 2
nenv = args.num_env or ncpu if not render else 1
alg = args.alg
rank = MPI.COMM_WORLD.Get_rank() if MPI else 0
seed = args.seed
env_type, env_id = get_env_type(args.env)
if env_type == 'mujoco':
get_session(tf.ConfigProto(allow_soft_placement=True,
intra_op_parallelism_threads=1,
inter_op_parallelism_threads=1))
if args.num_env:
env = SubprocVecEnv([lambda: make_mujoco_env(env_id, seed + i if seed is not None else None, args.reward_scale) for i in range(args.num_env)])
else:
env = DummyVecEnv([lambda: make_mujoco_env(env_id, seed, args.reward_scale)])
env = VecNormalize(env)
elif env_type == 'atari':
if alg == 'acer':
env = make_atari_env(env_id, nenv, seed)
elif alg == 'deepq':
env = atari_wrappers.make_atari(env_id)
env.seed(seed)
env = bench.Monitor(env, logger.get_dir())
env = atari_wrappers.wrap_deepmind(env, frame_stack=True, scale=True)
elif alg == 'trpo_mpi':
env = atari_wrappers.make_atari(env_id)
env.seed(seed)
env = bench.Monitor(env, logger.get_dir() and osp.join(logger.get_dir(), str(rank)))
env = atari_wrappers.wrap_deepmind(env)
# TODO check if the second seeding is necessary, and eventually remove
env.seed(seed)
else:
frame_stack_size = 4
env = VecFrameStack(make_atari_env(env_id, nenv, seed), frame_stack_size)
elif env_type == 'retro':
import retro
gamestate = args.gamestate or 'Level1-1'
env = retro_wrappers.make_retro(game=args.env, state=gamestate, max_episode_steps=10000, use_restricted_actions=retro.Actions.DISCRETE)
env.seed(args.seed)
env = bench.Monitor(env, logger.get_dir())
env = retro_wrappers.wrap_deepmind_retro(env)
elif env_type == 'classic':
def make_env():
e = gym.make(env_id)
e.seed(seed)
return e
env = DummyVecEnv([make_env])
return env
def get_env_type(env_id):
if env_id in _game_envs.keys():
env_type = env_id
env_id = [g for g in _game_envs[env_type]][0]
else:
env_type = None
for g, e in _game_envs.items():
if env_id in e:
env_type = g
break
assert env_type is not None, 'env_id {} is not recognized in env types'.format(env_id, _game_envs.keys())
return env_type, env_id
def get_default_network(env_type):
if env_type == 'mujoco' or env_type=='classic':
return 'mlp'
if env_type == 'atari':
return 'cnn'
raise ValueError('Unknown env_type {}'.format(env_type))
def get_alg_module(alg, submodule=None):
submodule = submodule or alg
try:
# first try to import the alg module from baselines
alg_module = import_module('.'.join(['baselines', alg, submodule]))
except ImportError:
# then from rl_algs
alg_module = import_module('.'.join(['rl_' + 'algs', alg, submodule]))
return alg_module
def get_learn_function(alg):
return get_alg_module(alg).learn
def get_learn_function_defaults(alg, env_type):
try:
alg_defaults = get_alg_module(alg, 'defaults')
kwargs = getattr(alg_defaults, env_type)()
except (ImportError, AttributeError):
kwargs = {}
return kwargs
def parse(v):
'''
convert value of a command-line arg to a python object if possible, othewise, keep as string
'''
assert isinstance(v, str)
try:
return eval(v)
except (NameError, SyntaxError):
return v
def main():
# configure logger, disable logging in child MPI processes (with rank > 0)
arg_parser = common_arg_parser()
args, unknown_args = arg_parser.parse_known_args()
extra_args = {k: parse(v) for k,v in parse_unknown_args(unknown_args).items()}
if MPI is None or MPI.COMM_WORLD.Get_rank() == 0:
rank = 0
logger.configure(env_name=args.env, normal=args.normal)
else:
logger.configure(format_strs = [], env_name=args.env, normal=args.normal)
rank = MPI.COMM_WORLD.Get_rank()
model, _ = train(args, extra_args, logger.get_dir())
if args.save_path is not None and rank == 0:
save_path = osp.expanduser(args.save_path)
model.save(save_path)
if args.play:
logger.log("Running trained model")
env = build_env(args, render=True)
obs = env.reset()
while True:
actions = model.step(obs)[0]
obs, _, done, _ = env.step(actions)
env.render()
if done:
obs = env.reset()
if __name__ == '__main__':
main()
| 7,705 | 30.325203 | 154 |
py
|
rl-perturbed-reward
|
rl-perturbed-reward-master/gym-atari/baselines/baselines/results_single.py
|
import argparse
import os
import glob
import numpy as np
import matplotlib
import matplotlib.pyplot as plt
import seaborn as sns
sns.set()
sns.set_color_codes()
from baselines.bench.monitor import load_results
matplotlib.rcParams.update({'font.size': 30})
X_TIMESTEPS = 'timesteps'
X_EPISODES = 'episodes'
X_WALLTIME = 'walltime_hrs'
POSSIBLE_X_AXES = [X_TIMESTEPS, X_EPISODES, X_WALLTIME]
EPISODES_WINDOW = 100
COLORS = ['blue', 'green', 'red', 'cyan', 'magenta', 'yellow', 'black', 'purple', 'pink',
'brown', 'orange', 'teal', 'coral', 'lightblue', 'lime', 'lavender', 'turquoise',
'darkgreen', 'tan', 'salmon', 'gold', 'lightpurple', 'darkred', 'darkblue']
def rolling_window(a, window):
shape = a.shape[:-1] + (a.shape[-1] - window + 1, window)
strides = a.strides + (a.strides[-1],)
return np.lib.stride_tricks.as_strided(a, shape=shape, strides=strides)
def window_func(x, y, window, func):
yw = rolling_window(y, window)
yw_func = func(yw, axis=-1)
return x[window-1:], yw_func
def ts2xy(ts, xaxis):
if xaxis == X_TIMESTEPS:
x = np.cumsum(ts.l.values)
y = ts.r.values
elif xaxis == X_EPISODES:
x = np.arange(len(ts))
y = ts.r.values
elif xaxis == X_WALLTIME:
x = ts.t.values / 3600.
y = ts.r.values
else:
raise NotImplementedError
return x, y
def plot_results_single(ax, input_dir, num_timesteps, xaxis):
ts = load_results(input_dir)
ts = ts[ts.l.cumsum() <= num_timesteps]
xy_list = ts2xy(ts, xaxis)
x = xy_list[0]
y = xy_list[1]
ax.plot(x, y, alpha=0.4, linewidth=0.8, c=sns.color_palette()[0])
x, y_mean = window_func(x, y, EPISODES_WINDOW, np.mean) #So returns average of last EPISODE_WINDOW episodes
print ("avg_100: %.1f" % np.mean(y_mean[-100:]))
ax.plot(x, y_mean, linewidth=0.8, c=sns.color_palette()[0], label='normal')
# plt.set_title(title)
# ax.set_ylabel("Episode Rewards")
# ax.legend()
# plt.tight_layout()
def main():
parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('--log_dir', help='Path of log directory', default='logs')
parser.add_argument('--num_timesteps', type=int, default=int(5e7))
parser.add_argument('--xaxis', help='Varible on X-axis', default = X_TIMESTEPS)
parser.add_argument('--task_name', help='Name of atari game', default='Pong')
parser.add_argument('--save_dir', help = 'Directory of output plots', default='../results')
parser.add_argument('--noise_type', type=str, help='noise type (norm_one/norm_all/anti_iden)',
default='anti_iden')
parser.add_argument('--plot_normal', type=str, help='whether to plot baseline with normal rewards')
args = parser.parse_args()
args.save_dir = os.path.join(args.save_dir, "paper")
if not os.path.exists(args.save_dir):
os.makedirs(args.save_dir)
dirs = glob.glob(os.path.join(args.log_dir, "openai*"))
dirs = sorted(dirs)
for input_dir in dirs:
with open(os.path.join(input_dir, "setting.txt"), "r") as f:
line = f.readlines()[-1].rstrip()
# normal = line.split()[1][0:-1].split(',')[0]
weight = float(line.split()[3][0:-1].split(',')[0])
surrogate = line.split()[5][0:-1].split(',')[0]
# noise_type = line.split()[7][0:-1].split(')')[0]
if weight in [0.1, 0.3, 0.7, 0.9] and surrogate == 'True':
print ("-" * 20)
print (line)
plot_results_single(plt, input_dir, args.num_timesteps, args.xaxis)
print ("-" * 20)
if __name__ == '__main__':
main()
| 3,709 | 35.372549 | 111 |
py
|
rl-perturbed-reward
|
rl-perturbed-reward-master/gym-atari/baselines/baselines/noisy_reward.py
|
import numpy as np
import collections
def is_invertible(a):
return a.shape[0] == a.shape[1] and np.linalg.matrix_rank(a) == a.shape[0]
def disarrange(a, axis=-1):
"""
Shuffle `a` in-place along the given axis.
Apply numpy.random.shuffle to the given axis of `a`.
Each one-dimensional slice is shuffled independently.
"""
b = a.swapaxes(axis, -1)
# Shuffle `b` in-place along the last axis. `b` is a view of `a`,
# so `a` is shuffled in place, too.
shp = b.shape[:-1]
for ndx in np.ndindex(shp):
np.random.shuffle(b[ndx])
return
def initialize_cmat(noise_type, M, weight):
cmat = None
flag = True
cnt = 0
while flag:
if noise_type == "norm_all":
init_norm = np.random.rand(M, M) # reward: 0 ~ -16
cmat = init_norm / init_norm.sum(axis=1, keepdims=1) * weight + \
(1 - weight) * np.identity(M)
elif noise_type == "norm_one":
i_mat = np.identity(M)
disarrange(i_mat)
print (i_mat)
cmat = i_mat * weight + (1 - weight) * np.identity(M)
elif noise_type == "anti_iden":
# if weight == 0.5: raise ValueError
cmat = np.identity(M)[::-1] * weight + \
(1 - weight) * np.identity(M)
if weight == 0.5: break
else:
# if weight == 0.5: raise ValueError
i1_mat = np.zeros((M, M)); i1_mat[0:M/2, -1] = 1; i1_mat[M/2:, 0] = 1
i2_mat = np.zeros((M, M)); i2_mat[0:int(np.ceil(M/2.0)), -1] = 1; i2_mat[int(np.ceil(M/2.0)):, 0] = 1
i_mat = (i1_mat + i2_mat) / 2.0
cmat = i_mat * weight + (1 - weight) * np.identity(M)
if weight == 0.5: break
if is_invertible(cmat):
flag = False
cnt += 1
return cmat, cnt
class PongProcessor:
def __init__(self, weight=0.2, normal=False, surrogate=True, noise_type="norm_one"):
M = 3
self.weight = weight
self.normal = normal
self.surrogate = surrogate
self.cmat, _ = initialize_cmat(noise_type, M, self.weight)
# assert (is_invertible(self.cmat))
self.cummat = np.cumsum(self.cmat, axis=1)
print (self.cmat, self.cummat)
self.mmat = np.expand_dims(np.asarray([-1.0, 0.0, 1.0]), axis=1)
print (self.mmat)
self.phi = np.linalg.inv(self.cmat).dot(self.mmat)
print (self.phi)
# self.r_sum = 0
# self.r_counter = 0
def noisy_reward(self, reward):
prob_list = list(self.cummat[int(reward+1), :])
# print prob_list
n = np.random.random()
prob_list.append(n)
# print sorted(prob_list)
j = sorted(prob_list).index(n)
# print (n, j)
reward = j - 1.0
# print reward
return reward
def process_reward(self, reward):
# self.r_sum += reward
reward = int(np.ceil(reward))
if self.normal:
return reward
r = self.noisy_reward(reward)
if self.surrogate:
return self.phi[int(r + 1.0), 0]
return r
# return np.clip(reward, -1., 1.)
def process_step(self, rewards):
rewards_new = []
for reward in rewards:
reward = self.process_reward(reward)
rewards_new.append(reward)
return rewards_new
class BreakoutProcessor:
def __init__(self, weight=0.2, normal=False, surrogate=True, noise_type="anti_iden"):
M = 2
self.weight = weight
self.normal = normal
self.surrogate = surrogate
self.cmat, _ = initialize_cmat(noise_type, M, self.weight)
# assert (is_invertible(self.cmat))
self.cummat = np.cumsum(self.cmat, axis=1)
print (self.cmat, self.cummat)
self.mmat = np.expand_dims(np.asarray([0.0, 1.0]), axis=1)
print (self.mmat)
self.phi = np.linalg.inv(self.cmat).dot(self.mmat)
print (np.linalg.inv(self.cmat).dot(self.mmat))
# self.r_sum = 0
# self.r_counter = 0
def noisy_reward(self, reward):
prob_list = list(self.cummat[int(reward), :])
# print prob_list
n = np.random.random()
prob_list.append(n)
# print sorted(prob_list)
j = sorted(prob_list).index(n)
# print (n, j)
reward = j
# print reward
return reward
def process_reward(self, reward):
# self.r_sum += reward
reward = int(np.ceil(reward))
if self.normal:
return reward
r = self.noisy_reward(reward)
if self.surrogate:
return self.phi[int(r), 0]
return r
# return np.clip(reward, -1., 1.)
def process_step(self, rewards):
rewards_new = []
for reward in rewards:
reward = self.process_reward(reward)
rewards_new.append(reward)
return rewards_new
class BreakoutProcessor2:
"""
Learning from surrogate reward
following paper "Learning from noisy labels"
"""
def __init__(self, weight=0.2, normal=True, surrogate=False, epsilon=1e-6):
assert (np.abs(weight - 0.5) > epsilon)
self.normal = normal
self.e_ = weight
self.e = weight
self.surrogate = surrogate
self.epsilon = 1e-6
self.r1 = 0
self.r2 = 1
def noisy_reward(self, reward):
n = np.random.random()
if np.abs(reward - self.r1) < self.epsilon:
if (n < self.e):
return self.r2
else:
if (n < self.e_):
return self.r1
return reward
def process_reward(self, reward):
r = self.noisy_reward(reward)
if not self.surrogate:
return r
if np.abs(r - self.r1) < self.epsilon:
r_surrogate = ((1 - self.e) * self.r1 - self.e_ * self.r2) / (1 - self.e_ - self.e)
else:
r_surrogate = ((1 - self.e_) * self.r2 - self.e * self.r1) / (1 - self.e_ - self.e)
return r_surrogate
def process_step(self, rewards):
if self.normal:
return rewards
rewards_new = []
for reward in rewards:
reward = self.process_reward(reward)
rewards_new.append(reward)
return rewards_new
class AtariProcessor:
def __init__(self, weight=0.1, normal=True, surrogate=False, epsilon=1e-6):
assert (np.abs(weight - 0.5) > epsilon)
self.normal = normal
self.surrogate = surrogate
self.r_sets = {}
self.e_ = weight
self.e = weight
self.r1 = 0
self.r2 = 1
self.counter = 0
self.C = np.identity(2)
self.epsilon = epsilon
if self.e > 0.5:
self.reverse = True
else: self.reverse = False
def noisy_reward(self, reward):
n = np.random.random()
if np.abs(reward - self.r1) < self.epsilon:
if (n < self.e_):
return self.r2
else:
if (n < self.e):
return self.r1
return reward
def noisy_rewards(self, rewards):
noisy_rewards = []
for r in rewards:
noisy_rewards.append(self.noisy_reward(r))
return noisy_rewards
def process_reward(self, reward):
if not self.surrogate:
return reward
self.est_e_ = self.C[0, 1]
self.est_e = self.C[1, 0]
if np.abs(reward - self.r1) < self.epsilon:
r_surrogate = ((1 - self.est_e) * self.r1 - self.est_e_ * self.r2) / (1 - self.est_e_ - self.est_e)
else:
r_surrogate = ((1 - self.est_e_) * self.r2 - self.est_e * self.r1) / (1 - self.est_e_ - self.est_e)
return r_surrogate
def process_rewards(self, rewards):
self.estimate_C()
rewards_new = []
for r in rewards:
rewards_new.append(self.process_reward(r))
return rewards_new
def estimate_C(self):
if self.counter >= 100 and self.counter % 50 == 0:
e_ = 0; e = 0
self.count1 = 0
self.count2 = 0
for k in self.r_sets.keys():
freq_count = collections.Counter(self.r_sets[k])
if self.reverse:
truth, count = freq_count.most_common()[-1]
else:
truth, count = freq_count.most_common()[0]
if truth == self.r1:
self.count1 += len(self.r_sets[k])
else:
self.count2 += len(self.r_sets[k])
# print (self.count1, self.count2)
for k in self.r_sets.keys():
freq_count = collections.Counter(self.r_sets[k])
if self.reverse:
truth, count = freq_count.most_common()[-1]
else:
truth, count = freq_count.most_common()[0]
prob_correct = float(count) / len(self.r_sets[k])
# print (prob_correct)
if truth == self.r1:
prob_k = float(len(self.r_sets[k])) / self.count1
e_ += prob_k * (1 - prob_correct)
else:
# The estimation of e is not accurate!
# In most cases, the predict true-reward is not r0 (in most cases)
# so the numbers of effective samples are small
prob_k = float(len(self.r_sets[k])) / self.count2
e += prob_k * (1 - prob_correct)
w = e_ if self.count1 >= self.count2 else e
# print (w, abs(w - self.e_))
self.C = np.array([[1-w, w], [w, 1-w]])
# if self.counter >= 10000:
# self.counter = 0
# self.r_sets = {}
# print self.C
def collect(self, rewards):
self.r_sets[self.counter % 1000] = rewards
self.counter += 1
def process_step(self, rewards):
# print (rewards.shape)
if self.normal:
return rewards
rewards = self.noisy_rewards(rewards)
self.collect(rewards)
rewards = self.process_rewards(rewards)
# print (rewards)
return rewards
| 10,368 | 29.952239 | 113 |
py
|
rl-perturbed-reward
|
rl-perturbed-reward-master/gym-atari/baselines/baselines/__init__.py
| 0 | 0 | 0 |
py
|
|
rl-perturbed-reward
|
rl-perturbed-reward-master/gym-atari/baselines/baselines/common/mpi_adam.py
|
from mpi4py import MPI
import baselines.common.tf_util as U
import tensorflow as tf
import numpy as np
class MpiAdam(object):
def __init__(self, var_list, *, beta1=0.9, beta2=0.999, epsilon=1e-08, scale_grad_by_procs=True, comm=None):
self.var_list = var_list
self.beta1 = beta1
self.beta2 = beta2
self.epsilon = epsilon
self.scale_grad_by_procs = scale_grad_by_procs
size = sum(U.numel(v) for v in var_list)
self.m = np.zeros(size, 'float32')
self.v = np.zeros(size, 'float32')
self.t = 0
self.setfromflat = U.SetFromFlat(var_list)
self.getflat = U.GetFlat(var_list)
self.comm = MPI.COMM_WORLD if comm is None else comm
def update(self, localg, stepsize):
if self.t % 100 == 0:
self.check_synced()
localg = localg.astype('float32')
globalg = np.zeros_like(localg)
self.comm.Allreduce(localg, globalg, op=MPI.SUM)
if self.scale_grad_by_procs:
globalg /= self.comm.Get_size()
self.t += 1
a = stepsize * np.sqrt(1 - self.beta2**self.t)/(1 - self.beta1**self.t)
self.m = self.beta1 * self.m + (1 - self.beta1) * globalg
self.v = self.beta2 * self.v + (1 - self.beta2) * (globalg * globalg)
step = (- a) * self.m / (np.sqrt(self.v) + self.epsilon)
self.setfromflat(self.getflat() + step)
def sync(self):
theta = self.getflat()
self.comm.Bcast(theta, root=0)
self.setfromflat(theta)
def check_synced(self):
if self.comm.Get_rank() == 0: # this is root
theta = self.getflat()
self.comm.Bcast(theta, root=0)
else:
thetalocal = self.getflat()
thetaroot = np.empty_like(thetalocal)
self.comm.Bcast(thetaroot, root=0)
assert (thetaroot == thetalocal).all(), (thetaroot, thetalocal)
@U.in_session
def test_MpiAdam():
np.random.seed(0)
tf.set_random_seed(0)
a = tf.Variable(np.random.randn(3).astype('float32'))
b = tf.Variable(np.random.randn(2,5).astype('float32'))
loss = tf.reduce_sum(tf.square(a)) + tf.reduce_sum(tf.sin(b))
stepsize = 1e-2
update_op = tf.train.AdamOptimizer(stepsize).minimize(loss)
do_update = U.function([], loss, updates=[update_op])
tf.get_default_session().run(tf.global_variables_initializer())
for i in range(10):
print(i,do_update())
tf.set_random_seed(0)
tf.get_default_session().run(tf.global_variables_initializer())
var_list = [a,b]
lossandgrad = U.function([], [loss, U.flatgrad(loss, var_list)], updates=[update_op])
adam = MpiAdam(var_list)
for i in range(10):
l,g = lossandgrad()
adam.update(g, stepsize)
print(i,l)
| 2,786 | 34.278481 | 112 |
py
|
rl-perturbed-reward
|
rl-perturbed-reward-master/gym-atari/baselines/baselines/common/cg.py
|
import numpy as np
def cg(f_Ax, b, cg_iters=10, callback=None, verbose=False, residual_tol=1e-10):
"""
Demmel p 312
"""
p = b.copy()
r = b.copy()
x = np.zeros_like(b)
rdotr = r.dot(r)
fmtstr = "%10i %10.3g %10.3g"
titlestr = "%10s %10s %10s"
if verbose: print(titlestr % ("iter", "residual norm", "soln norm"))
for i in range(cg_iters):
if callback is not None:
callback(x)
if verbose: print(fmtstr % (i, rdotr, np.linalg.norm(x)))
z = f_Ax(p)
v = rdotr / p.dot(z)
x += v*p
r -= v*z
newrdotr = r.dot(r)
mu = newrdotr/rdotr
p = r + mu*p
rdotr = newrdotr
if rdotr < residual_tol:
break
if callback is not None:
callback(x)
if verbose: print(fmtstr % (i+1, rdotr, np.linalg.norm(x))) # pylint: disable=W0631
return x
| 896 | 25.382353 | 88 |
py
|
rl-perturbed-reward
|
rl-perturbed-reward-master/gym-atari/baselines/baselines/common/runners.py
|
import numpy as np
from abc import ABC, abstractmethod
class AbstractEnvRunner(ABC):
def __init__(self, *, env, model, nsteps):
self.env = env
self.model = model
self.nenv = nenv = env.num_envs if hasattr(env, 'num_envs') else 1
self.batch_ob_shape = (nenv*nsteps,) + env.observation_space.shape
self.obs = np.zeros((nenv,) + env.observation_space.shape, dtype=env.observation_space.dtype.name)
self.obs[:] = env.reset()
self.nsteps = nsteps
self.states = model.initial_state
self.dones = [False for _ in range(nenv)]
@abstractmethod
def run(self):
raise NotImplementedError
| 670 | 32.55 | 106 |
py
|
rl-perturbed-reward
|
rl-perturbed-reward-master/gym-atari/baselines/baselines/common/distributions.py
|
import tensorflow as tf
import numpy as np
import baselines.common.tf_util as U
from baselines.a2c.utils import fc
from tensorflow.python.ops import math_ops
class Pd(object):
"""
A particular probability distribution
"""
def flatparam(self):
raise NotImplementedError
def mode(self):
raise NotImplementedError
def neglogp(self, x):
# Usually it's easier to define the negative logprob
raise NotImplementedError
def kl(self, other):
raise NotImplementedError
def entropy(self):
raise NotImplementedError
def sample(self):
raise NotImplementedError
def logp(self, x):
return - self.neglogp(x)
class PdType(object):
"""
Parametrized family of probability distributions
"""
def pdclass(self):
raise NotImplementedError
def pdfromflat(self, flat):
return self.pdclass()(flat)
def pdfromlatent(self, latent_vector):
raise NotImplementedError
def param_shape(self):
raise NotImplementedError
def sample_shape(self):
raise NotImplementedError
def sample_dtype(self):
raise NotImplementedError
def param_placeholder(self, prepend_shape, name=None):
return tf.placeholder(dtype=tf.float32, shape=prepend_shape+self.param_shape(), name=name)
def sample_placeholder(self, prepend_shape, name=None):
return tf.placeholder(dtype=self.sample_dtype(), shape=prepend_shape+self.sample_shape(), name=name)
class CategoricalPdType(PdType):
def __init__(self, ncat):
self.ncat = ncat
def pdclass(self):
return CategoricalPd
def pdfromlatent(self, latent_vector, init_scale=1.0, init_bias=0.0):
pdparam = fc(latent_vector, 'pi', self.ncat, init_scale=init_scale, init_bias=init_bias)
return self.pdfromflat(pdparam), pdparam
def param_shape(self):
return [self.ncat]
def sample_shape(self):
return []
def sample_dtype(self):
return tf.int32
class MultiCategoricalPdType(PdType):
def __init__(self, nvec):
self.ncats = nvec
def pdclass(self):
return MultiCategoricalPd
def pdfromflat(self, flat):
return MultiCategoricalPd(self.ncats, flat)
def param_shape(self):
return [sum(self.ncats)]
def sample_shape(self):
return [len(self.ncats)]
def sample_dtype(self):
return tf.int32
class DiagGaussianPdType(PdType):
def __init__(self, size):
self.size = size
def pdclass(self):
return DiagGaussianPd
def pdfromlatent(self, latent_vector, init_scale=1.0, init_bias=0.0):
mean = fc(latent_vector, 'pi', self.size, init_scale=init_scale, init_bias=init_bias)
logstd = tf.get_variable(name='pi/logstd', shape=[1, self.size], initializer=tf.zeros_initializer())
pdparam = tf.concat([mean, mean * 0.0 + logstd], axis=1)
return self.pdfromflat(pdparam), mean
def param_shape(self):
return [2*self.size]
def sample_shape(self):
return [self.size]
def sample_dtype(self):
return tf.float32
class BernoulliPdType(PdType):
def __init__(self, size):
self.size = size
def pdclass(self):
return BernoulliPd
def param_shape(self):
return [self.size]
def sample_shape(self):
return [self.size]
def sample_dtype(self):
return tf.int32
# WRONG SECOND DERIVATIVES
# class CategoricalPd(Pd):
# def __init__(self, logits):
# self.logits = logits
# self.ps = tf.nn.softmax(logits)
# @classmethod
# def fromflat(cls, flat):
# return cls(flat)
# def flatparam(self):
# return self.logits
# def mode(self):
# return U.argmax(self.logits, axis=-1)
# def logp(self, x):
# return -tf.nn.sparse_softmax_cross_entropy_with_logits(self.logits, x)
# def kl(self, other):
# return tf.nn.softmax_cross_entropy_with_logits(other.logits, self.ps) \
# - tf.nn.softmax_cross_entropy_with_logits(self.logits, self.ps)
# def entropy(self):
# return tf.nn.softmax_cross_entropy_with_logits(self.logits, self.ps)
# def sample(self):
# u = tf.random_uniform(tf.shape(self.logits))
# return U.argmax(self.logits - tf.log(-tf.log(u)), axis=-1)
class CategoricalPd(Pd):
def __init__(self, logits):
self.logits = logits
def flatparam(self):
return self.logits
def mode(self):
return tf.argmax(self.logits, axis=-1)
def neglogp(self, x):
# return tf.nn.sparse_softmax_cross_entropy_with_logits(logits=self.logits, labels=x)
# Note: we can't use sparse_softmax_cross_entropy_with_logits because
# the implementation does not allow second-order derivatives...
one_hot_actions = tf.one_hot(x, self.logits.get_shape().as_list()[-1])
return tf.nn.softmax_cross_entropy_with_logits(
# return tf.nn.softmax_cross_entropy_with_logits_v2(
logits=self.logits,
labels=one_hot_actions)
def kl(self, other):
a0 = self.logits - tf.reduce_max(self.logits, axis=-1, keep_dims=True)
a1 = other.logits - tf.reduce_max(other.logits, axis=-1, keep_dims=True)
ea0 = tf.exp(a0)
ea1 = tf.exp(a1)
z0 = tf.reduce_sum(ea0, axis=-1, keep_dims=True)
z1 = tf.reduce_sum(ea1, axis=-1, keep_dims=True)
p0 = ea0 / z0
return tf.reduce_sum(p0 * (a0 - tf.log(z0) - a1 + tf.log(z1)), axis=-1)
def entropy(self):
a0 = self.logits - tf.reduce_max(self.logits, axis=-1, keep_dims=True)
ea0 = tf.exp(a0)
z0 = tf.reduce_sum(ea0, axis=-1, keep_dims=True)
p0 = ea0 / z0
return tf.reduce_sum(p0 * (tf.log(z0) - a0), axis=-1)
def sample(self):
u = tf.random_uniform(tf.shape(self.logits), dtype=self.logits.dtype)
return tf.argmax(self.logits - tf.log(-tf.log(u)), axis=-1)
@classmethod
def fromflat(cls, flat):
return cls(flat)
class MultiCategoricalPd(Pd):
def __init__(self, nvec, flat):
self.flat = flat
self.categoricals = list(map(CategoricalPd, tf.split(flat, nvec, axis=-1)))
def flatparam(self):
return self.flat
def mode(self):
return tf.cast(tf.stack([p.mode() for p in self.categoricals], axis=-1), tf.int32)
def neglogp(self, x):
return tf.add_n([p.neglogp(px) for p, px in zip(self.categoricals, tf.unstack(x, axis=-1))])
def kl(self, other):
return tf.add_n([p.kl(q) for p, q in zip(self.categoricals, other.categoricals)])
def entropy(self):
return tf.add_n([p.entropy() for p in self.categoricals])
def sample(self):
return tf.cast(tf.stack([p.sample() for p in self.categoricals], axis=-1), tf.int32)
@classmethod
def fromflat(cls, flat):
raise NotImplementedError
class DiagGaussianPd(Pd):
def __init__(self, flat):
self.flat = flat
mean, logstd = tf.split(axis=len(flat.shape)-1, num_or_size_splits=2, value=flat)
self.mean = mean
self.logstd = logstd
self.std = tf.exp(logstd)
def flatparam(self):
return self.flat
def mode(self):
return self.mean
def neglogp(self, x):
return 0.5 * tf.reduce_sum(tf.square((x - self.mean) / self.std), axis=-1) \
+ 0.5 * np.log(2.0 * np.pi) * tf.to_float(tf.shape(x)[-1]) \
+ tf.reduce_sum(self.logstd, axis=-1)
def kl(self, other):
assert isinstance(other, DiagGaussianPd)
return tf.reduce_sum(other.logstd - self.logstd + (tf.square(self.std) + tf.square(self.mean - other.mean)) / (2.0 * tf.square(other.std)) - 0.5, axis=-1)
def entropy(self):
return tf.reduce_sum(self.logstd + .5 * np.log(2.0 * np.pi * np.e), axis=-1)
def sample(self):
return self.mean + self.std * tf.random_normal(tf.shape(self.mean))
@classmethod
def fromflat(cls, flat):
return cls(flat)
class BernoulliPd(Pd):
def __init__(self, logits):
self.logits = logits
self.ps = tf.sigmoid(logits)
def flatparam(self):
return self.logits
def mode(self):
return tf.round(self.ps)
def neglogp(self, x):
return tf.reduce_sum(tf.nn.sigmoid_cross_entropy_with_logits(logits=self.logits, labels=tf.to_float(x)), axis=-1)
def kl(self, other):
return tf.reduce_sum(tf.nn.sigmoid_cross_entropy_with_logits(logits=other.logits, labels=self.ps), axis=-1) - tf.reduce_sum(tf.nn.sigmoid_cross_entropy_with_logits(logits=self.logits, labels=self.ps), axis=-1)
def entropy(self):
return tf.reduce_sum(tf.nn.sigmoid_cross_entropy_with_logits(logits=self.logits, labels=self.ps), axis=-1)
def sample(self):
u = tf.random_uniform(tf.shape(self.ps))
return tf.to_float(math_ops.less(u, self.ps))
@classmethod
def fromflat(cls, flat):
return cls(flat)
def make_pdtype(ac_space):
from gym import spaces
if isinstance(ac_space, spaces.Box):
assert len(ac_space.shape) == 1
return DiagGaussianPdType(ac_space.shape[0])
elif isinstance(ac_space, spaces.Discrete):
return CategoricalPdType(ac_space.n)
elif isinstance(ac_space, spaces.MultiDiscrete):
return MultiCategoricalPdType(ac_space.nvec)
elif isinstance(ac_space, spaces.MultiBinary):
return BernoulliPdType(ac_space.n)
else:
raise NotImplementedError
def shape_el(v, i):
maybe = v.get_shape()[i]
if maybe is not None:
return maybe
else:
return tf.shape(v)[i]
@U.in_session
def test_probtypes():
np.random.seed(0)
pdparam_diag_gauss = np.array([-.2, .3, .4, -.5, .1, -.5, .1, 0.8])
diag_gauss = DiagGaussianPdType(pdparam_diag_gauss.size // 2) #pylint: disable=E1101
validate_probtype(diag_gauss, pdparam_diag_gauss)
pdparam_categorical = np.array([-.2, .3, .5])
categorical = CategoricalPdType(pdparam_categorical.size) #pylint: disable=E1101
validate_probtype(categorical, pdparam_categorical)
nvec = [1,2,3]
pdparam_multicategorical = np.array([-.2, .3, .5, .1, 1, -.1])
multicategorical = MultiCategoricalPdType(nvec) #pylint: disable=E1101
validate_probtype(multicategorical, pdparam_multicategorical)
pdparam_bernoulli = np.array([-.2, .3, .5])
bernoulli = BernoulliPdType(pdparam_bernoulli.size) #pylint: disable=E1101
validate_probtype(bernoulli, pdparam_bernoulli)
def validate_probtype(probtype, pdparam):
N = 100000
# Check to see if mean negative log likelihood == differential entropy
Mval = np.repeat(pdparam[None, :], N, axis=0)
M = probtype.param_placeholder([N])
X = probtype.sample_placeholder([N])
pd = probtype.pdfromflat(M)
calcloglik = U.function([X, M], pd.logp(X))
calcent = U.function([M], pd.entropy())
Xval = tf.get_default_session().run(pd.sample(), feed_dict={M:Mval})
logliks = calcloglik(Xval, Mval)
entval_ll = - logliks.mean() #pylint: disable=E1101
entval_ll_stderr = logliks.std() / np.sqrt(N) #pylint: disable=E1101
entval = calcent(Mval).mean() #pylint: disable=E1101
assert np.abs(entval - entval_ll) < 3 * entval_ll_stderr # within 3 sigmas
# Check to see if kldiv[p,q] = - ent[p] - E_p[log q]
M2 = probtype.param_placeholder([N])
pd2 = probtype.pdfromflat(M2)
q = pdparam + np.random.randn(pdparam.size) * 0.1
Mval2 = np.repeat(q[None, :], N, axis=0)
calckl = U.function([M, M2], pd.kl(pd2))
klval = calckl(Mval, Mval2).mean() #pylint: disable=E1101
logliks = calcloglik(Xval, Mval2)
klval_ll = - entval - logliks.mean() #pylint: disable=E1101
klval_ll_stderr = logliks.std() / np.sqrt(N) #pylint: disable=E1101
assert np.abs(klval - klval_ll) < 3 * klval_ll_stderr # within 3 sigmas
print('ok on', probtype, pdparam)
| 11,896 | 37.254019 | 217 |
py
|
rl-perturbed-reward
|
rl-perturbed-reward-master/gym-atari/baselines/baselines/common/mpi_util.py
|
from collections import defaultdict
from mpi4py import MPI
import os, numpy as np
import platform
import shutil
import subprocess
def sync_from_root(sess, variables, comm=None):
"""
Send the root node's parameters to every worker.
Arguments:
sess: the TensorFlow session.
variables: all parameter variables including optimizer's
"""
if comm is None: comm = MPI.COMM_WORLD
rank = comm.Get_rank()
for var in variables:
if rank == 0:
comm.Bcast(sess.run(var))
else:
import tensorflow as tf
returned_var = np.empty(var.shape, dtype='float32')
comm.Bcast(returned_var)
sess.run(tf.assign(var, returned_var))
def gpu_count():
"""
Count the GPUs on this machine.
"""
if shutil.which('nvidia-smi') is None:
return 0
output = subprocess.check_output(['nvidia-smi', '--query-gpu=gpu_name', '--format=csv'])
return max(0, len(output.split(b'\n')) - 2)
def setup_mpi_gpus():
"""
Set CUDA_VISIBLE_DEVICES using MPI.
"""
num_gpus = gpu_count()
if num_gpus == 0:
return
local_rank, _ = get_local_rank_size(MPI.COMM_WORLD)
os.environ['CUDA_VISIBLE_DEVICES'] = str(local_rank % num_gpus)
def get_local_rank_size(comm):
"""
Returns the rank of each process on its machine
The processes on a given machine will be assigned ranks
0, 1, 2, ..., N-1,
where N is the number of processes on this machine.
Useful if you want to assign one gpu per machine
"""
this_node = platform.node()
ranks_nodes = comm.allgather((comm.Get_rank(), this_node))
node2rankssofar = defaultdict(int)
local_rank = None
for (rank, node) in ranks_nodes:
if rank == comm.Get_rank():
local_rank = node2rankssofar[node]
node2rankssofar[node] += 1
assert local_rank is not None
return local_rank, node2rankssofar[this_node]
def share_file(comm, path):
"""
Copies the file from rank 0 to all other ranks
Puts it in the same place on all machines
"""
localrank, _ = get_local_rank_size(comm)
if comm.Get_rank() == 0:
with open(path, 'rb') as fh:
data = fh.read()
comm.bcast(data)
else:
data = comm.bcast(None)
if localrank == 0:
os.makedirs(os.path.dirname(path), exist_ok=True)
with open(path, 'wb') as fh:
fh.write(data)
comm.Barrier()
def dict_gather(comm, d, op='mean', assert_all_have_data=True):
if comm is None: return d
alldicts = comm.allgather(d)
size = comm.size
k2li = defaultdict(list)
for d in alldicts:
for (k,v) in d.items():
k2li[k].append(v)
result = {}
for (k,li) in k2li.items():
if assert_all_have_data:
assert len(li)==size, "only %i out of %i MPI workers have sent '%s'" % (len(li), size, k)
if op=='mean':
result[k] = np.mean(li, axis=0)
elif op=='sum':
result[k] = np.sum(li, axis=0)
else:
assert 0, op
return result
| 3,116 | 29.558824 | 101 |
py
|
rl-perturbed-reward
|
rl-perturbed-reward-master/gym-atari/baselines/baselines/common/schedules.py
|
"""This file is used for specifying various schedules that evolve over
time throughout the execution of the algorithm, such as:
- learning rate for the optimizer
- exploration epsilon for the epsilon greedy exploration strategy
- beta parameter for beta parameter in prioritized replay
Each schedule has a function `value(t)` which returns the current value
of the parameter given the timestep t of the optimization procedure.
"""
class Schedule(object):
def value(self, t):
"""Value of the schedule at time t"""
raise NotImplementedError()
class ConstantSchedule(object):
def __init__(self, value):
"""Value remains constant over time.
Parameters
----------
value: float
Constant value of the schedule
"""
self._v = value
def value(self, t):
"""See Schedule.value"""
return self._v
def linear_interpolation(l, r, alpha):
return l + alpha * (r - l)
class PiecewiseSchedule(object):
def __init__(self, endpoints, interpolation=linear_interpolation, outside_value=None):
"""Piecewise schedule.
endpoints: [(int, int)]
list of pairs `(time, value)` meanining that schedule should output
`value` when `t==time`. All the values for time must be sorted in
an increasing order. When t is between two times, e.g. `(time_a, value_a)`
and `(time_b, value_b)`, such that `time_a <= t < time_b` then value outputs
`interpolation(value_a, value_b, alpha)` where alpha is a fraction of
time passed between `time_a` and `time_b` for time `t`.
interpolation: lambda float, float, float: float
a function that takes value to the left and to the right of t according
to the `endpoints`. Alpha is the fraction of distance from left endpoint to
right endpoint that t has covered. See linear_interpolation for example.
outside_value: float
if the value is requested outside of all the intervals sepecified in
`endpoints` this value is returned. If None then AssertionError is
raised when outside value is requested.
"""
idxes = [e[0] for e in endpoints]
assert idxes == sorted(idxes)
self._interpolation = interpolation
self._outside_value = outside_value
self._endpoints = endpoints
def value(self, t):
"""See Schedule.value"""
for (l_t, l), (r_t, r) in zip(self._endpoints[:-1], self._endpoints[1:]):
if l_t <= t and t < r_t:
alpha = float(t - l_t) / (r_t - l_t)
return self._interpolation(l, r, alpha)
# t does not belong to any of the pieces, so doom.
assert self._outside_value is not None
return self._outside_value
class LinearSchedule(object):
def __init__(self, schedule_timesteps, final_p, initial_p=1.0):
"""Linear interpolation between initial_p and final_p over
schedule_timesteps. After this many timesteps pass final_p is
returned.
Parameters
----------
schedule_timesteps: int
Number of timesteps for which to linearly anneal initial_p
to final_p
initial_p: float
initial output value
final_p: float
final output value
"""
self.schedule_timesteps = schedule_timesteps
self.final_p = final_p
self.initial_p = initial_p
def value(self, t):
"""See Schedule.value"""
fraction = min(float(t) / self.schedule_timesteps, 1.0)
return self.initial_p + fraction * (self.final_p - self.initial_p)
| 3,702 | 36.03 | 90 |
py
|
rl-perturbed-reward
|
rl-perturbed-reward-master/gym-atari/baselines/baselines/common/atari_wrappers.py
|
import numpy as np
import os
os.environ.setdefault('PATH', '')
from collections import deque
import gym
from gym import spaces
import cv2
cv2.ocl.setUseOpenCL(False)
class NoopResetEnv(gym.Wrapper):
def __init__(self, env, noop_max=30):
"""Sample initial states by taking random number of no-ops on reset.
No-op is assumed to be action 0.
"""
gym.Wrapper.__init__(self, env)
self.noop_max = noop_max
self.override_num_noops = None
self.noop_action = 0
assert env.unwrapped.get_action_meanings()[0] == 'NOOP'
def reset(self, **kwargs):
""" Do no-op action for a number of steps in [1, noop_max]."""
self.env.reset(**kwargs)
if self.override_num_noops is not None:
noops = self.override_num_noops
else:
noops = self.unwrapped.np_random.randint(1, self.noop_max + 1) #pylint: disable=E1101
assert noops > 0
obs = None
for _ in range(noops):
obs, _, done, _ = self.env.step(self.noop_action)
if done:
obs = self.env.reset(**kwargs)
return obs
def step(self, ac):
return self.env.step(ac)
class FireResetEnv(gym.Wrapper):
def __init__(self, env):
"""Take action on reset for environments that are fixed until firing."""
gym.Wrapper.__init__(self, env)
assert env.unwrapped.get_action_meanings()[1] == 'FIRE'
assert len(env.unwrapped.get_action_meanings()) >= 3
def reset(self, **kwargs):
self.env.reset(**kwargs)
obs, _, done, _ = self.env.step(1)
if done:
self.env.reset(**kwargs)
obs, _, done, _ = self.env.step(2)
if done:
self.env.reset(**kwargs)
return obs
def step(self, ac):
return self.env.step(ac)
class EpisodicLifeEnv(gym.Wrapper):
def __init__(self, env):
"""Make end-of-life == end-of-episode, but only reset on true game over.
Done by DeepMind for the DQN and co. since it helps value estimation.
"""
gym.Wrapper.__init__(self, env)
self.lives = 0
self.was_real_done = True
def step(self, action):
obs, reward, done, info = self.env.step(action)
self.was_real_done = done
# check current lives, make loss of life terminal,
# then update lives to handle bonus lives
lives = self.env.unwrapped.ale.lives()
if lives < self.lives and lives > 0:
# for Qbert sometimes we stay in lives == 0 condtion for a few frames
# so its important to keep lives > 0, so that we only reset once
# the environment advertises done.
done = True
self.lives = lives
return obs, reward, done, info
def reset(self, **kwargs):
"""Reset only when lives are exhausted.
This way all states are still reachable even though lives are episodic,
and the learner need not know about any of this behind-the-scenes.
"""
if self.was_real_done:
obs = self.env.reset(**kwargs)
else:
# no-op step to advance from terminal/lost life state
obs, _, _, _ = self.env.step(0)
self.lives = self.env.unwrapped.ale.lives()
return obs
class MaxAndSkipEnv(gym.Wrapper):
def __init__(self, env, skip=4):
"""Return only every `skip`-th frame"""
gym.Wrapper.__init__(self, env)
# most recent raw observations (for max pooling across time steps)
self._obs_buffer = np.zeros((2,)+env.observation_space.shape, dtype=np.uint8)
self._skip = skip
def step(self, action):
"""Repeat action, sum reward, and max over last observations."""
total_reward = 0.0
done = None
for i in range(self._skip):
obs, reward, done, info = self.env.step(action)
if i == self._skip - 2: self._obs_buffer[0] = obs
if i == self._skip - 1: self._obs_buffer[1] = obs
total_reward += reward
if done:
break
# Note that the observation on the done=True frame
# doesn't matter
max_frame = self._obs_buffer.max(axis=0)
return max_frame, total_reward, done, info
def reset(self, **kwargs):
return self.env.reset(**kwargs)
class ClipRewardEnv(gym.RewardWrapper):
def __init__(self, env):
gym.RewardWrapper.__init__(self, env)
def reward(self, reward):
"""Bin reward to {+1, 0, -1} by its sign."""
return np.sign(reward)
class WarpFrame(gym.ObservationWrapper):
def __init__(self, env):
"""Warp frames to 84x84 as done in the Nature paper and later work."""
gym.ObservationWrapper.__init__(self, env)
self.width = 84
self.height = 84
self.observation_space = spaces.Box(low=0, high=255,
shape=(self.height, self.width, 1), dtype=np.uint8)
def observation(self, frame):
frame = cv2.cvtColor(frame, cv2.COLOR_RGB2GRAY)
frame = cv2.resize(frame, (self.width, self.height), interpolation=cv2.INTER_AREA)
return frame[:, :, None]
class FrameStack(gym.Wrapper):
def __init__(self, env, k):
"""Stack k last frames.
Returns lazy array, which is much more memory efficient.
See Also
--------
baselines.common.atari_wrappers.LazyFrames
"""
gym.Wrapper.__init__(self, env)
self.k = k
self.frames = deque([], maxlen=k)
shp = env.observation_space.shape
self.observation_space = spaces.Box(low=0, high=255, shape=(shp[0], shp[1], shp[2] * k), dtype=env.observation_space.dtype)
def reset(self):
ob = self.env.reset()
for _ in range(self.k):
self.frames.append(ob)
return self._get_ob()
def step(self, action):
ob, reward, done, info = self.env.step(action)
self.frames.append(ob)
return self._get_ob(), reward, done, info
def _get_ob(self):
assert len(self.frames) == self.k
return LazyFrames(list(self.frames))
class ScaledFloatFrame(gym.ObservationWrapper):
def __init__(self, env):
gym.ObservationWrapper.__init__(self, env)
self.observation_space = gym.spaces.Box(low=0, high=1, shape=env.observation_space.shape, dtype=np.float32)
def observation(self, observation):
# careful! This undoes the memory optimization, use
# with smaller replay buffers only.
return np.array(observation).astype(np.float32) / 255.0
class LazyFrames(object):
def __init__(self, frames):
"""This object ensures that common frames between the observations are only stored once.
It exists purely to optimize memory usage which can be huge for DQN's 1M frames replay
buffers.
This object should only be converted to numpy array before being passed to the model.
You'd not believe how complex the previous solution was."""
self._frames = frames
self._out = None
def _force(self):
if self._out is None:
self._out = np.concatenate(self._frames, axis=2)
self._frames = None
return self._out
def __array__(self, dtype=None):
out = self._force()
if dtype is not None:
out = out.astype(dtype)
return out
def __len__(self):
return len(self._force())
def __getitem__(self, i):
return self._force()[i]
def make_atari(env_id):
env = gym.make(env_id)
assert 'NoFrameskip' in env.spec.id
env = NoopResetEnv(env, noop_max=30)
env = MaxAndSkipEnv(env, skip=4)
return env
def wrap_deepmind(env, episode_life=True, clip_rewards=True, frame_stack=False, scale=False):
"""Configure environment for DeepMind-style Atari.
"""
if episode_life:
env = EpisodicLifeEnv(env)
if 'FIRE' in env.unwrapped.get_action_meanings():
env = FireResetEnv(env)
env = WarpFrame(env)
if scale:
env = ScaledFloatFrame(env)
if clip_rewards:
env = ClipRewardEnv(env)
if frame_stack:
env = FrameStack(env, 4)
return env
| 8,216 | 33.380753 | 131 |
py
|
rl-perturbed-reward
|
rl-perturbed-reward-master/gym-atari/baselines/baselines/common/mpi_running_mean_std.py
|
from mpi4py import MPI
import tensorflow as tf, baselines.common.tf_util as U, numpy as np
class RunningMeanStd(object):
# https://en.wikipedia.org/wiki/Algorithms_for_calculating_variance#Parallel_algorithm
def __init__(self, epsilon=1e-2, shape=()):
self._sum = tf.get_variable(
dtype=tf.float64,
shape=shape,
initializer=tf.constant_initializer(0.0),
name="runningsum", trainable=False)
self._sumsq = tf.get_variable(
dtype=tf.float64,
shape=shape,
initializer=tf.constant_initializer(epsilon),
name="runningsumsq", trainable=False)
self._count = tf.get_variable(
dtype=tf.float64,
shape=(),
initializer=tf.constant_initializer(epsilon),
name="count", trainable=False)
self.shape = shape
self.mean = tf.to_float(self._sum / self._count)
self.std = tf.sqrt( tf.maximum( tf.to_float(self._sumsq / self._count) - tf.square(self.mean) , 1e-2 ))
newsum = tf.placeholder(shape=self.shape, dtype=tf.float64, name='sum')
newsumsq = tf.placeholder(shape=self.shape, dtype=tf.float64, name='var')
newcount = tf.placeholder(shape=[], dtype=tf.float64, name='count')
self.incfiltparams = U.function([newsum, newsumsq, newcount], [],
updates=[tf.assign_add(self._sum, newsum),
tf.assign_add(self._sumsq, newsumsq),
tf.assign_add(self._count, newcount)])
def update(self, x):
x = x.astype('float64')
n = int(np.prod(self.shape))
totalvec = np.zeros(n*2+1, 'float64')
addvec = np.concatenate([x.sum(axis=0).ravel(), np.square(x).sum(axis=0).ravel(), np.array([len(x)],dtype='float64')])
MPI.COMM_WORLD.Allreduce(addvec, totalvec, op=MPI.SUM)
self.incfiltparams(totalvec[0:n].reshape(self.shape), totalvec[n:2*n].reshape(self.shape), totalvec[2*n])
@U.in_session
def test_runningmeanstd():
for (x1, x2, x3) in [
(np.random.randn(3), np.random.randn(4), np.random.randn(5)),
(np.random.randn(3,2), np.random.randn(4,2), np.random.randn(5,2)),
]:
rms = RunningMeanStd(epsilon=0.0, shape=x1.shape[1:])
U.initialize()
x = np.concatenate([x1, x2, x3], axis=0)
ms1 = [x.mean(axis=0), x.std(axis=0)]
rms.update(x1)
rms.update(x2)
rms.update(x3)
ms2 = [rms.mean.eval(), rms.std.eval()]
assert np.allclose(ms1, ms2)
@U.in_session
def test_dist():
np.random.seed(0)
p1,p2,p3=(np.random.randn(3,1), np.random.randn(4,1), np.random.randn(5,1))
q1,q2,q3=(np.random.randn(6,1), np.random.randn(7,1), np.random.randn(8,1))
# p1,p2,p3=(np.random.randn(3), np.random.randn(4), np.random.randn(5))
# q1,q2,q3=(np.random.randn(6), np.random.randn(7), np.random.randn(8))
comm = MPI.COMM_WORLD
assert comm.Get_size()==2
if comm.Get_rank()==0:
x1,x2,x3 = p1,p2,p3
elif comm.Get_rank()==1:
x1,x2,x3 = q1,q2,q3
else:
assert False
rms = RunningMeanStd(epsilon=0.0, shape=(1,))
U.initialize()
rms.update(x1)
rms.update(x2)
rms.update(x3)
bigvec = np.concatenate([p1,p2,p3,q1,q2,q3])
def checkallclose(x,y):
print(x,y)
return np.allclose(x,y)
assert checkallclose(
bigvec.mean(axis=0),
rms.mean.eval(),
)
assert checkallclose(
bigvec.std(axis=0),
rms.std.eval(),
)
if __name__ == "__main__":
# Run with mpirun -np 2 python <filename>
test_dist()
| 3,629 | 32.611111 | 126 |
py
|
rl-perturbed-reward
|
rl-perturbed-reward-master/gym-atari/baselines/baselines/common/misc_util.py
|
import gym
import numpy as np
import os
import pickle
import random
import tempfile
import zipfile
def zipsame(*seqs):
L = len(seqs[0])
assert all(len(seq) == L for seq in seqs[1:])
return zip(*seqs)
def unpack(seq, sizes):
"""
Unpack 'seq' into a sequence of lists, with lengths specified by 'sizes'.
None = just one bare element, not a list
Example:
unpack([1,2,3,4,5,6], [3,None,2]) -> ([1,2,3], 4, [5,6])
"""
seq = list(seq)
it = iter(seq)
assert sum(1 if s is None else s for s in sizes) == len(seq), "Trying to unpack %s into %s" % (seq, sizes)
for size in sizes:
if size is None:
yield it.__next__()
else:
li = []
for _ in range(size):
li.append(it.__next__())
yield li
class EzPickle(object):
"""Objects that are pickled and unpickled via their constructor
arguments.
Example usage:
class Dog(Animal, EzPickle):
def __init__(self, furcolor, tailkind="bushy"):
Animal.__init__()
EzPickle.__init__(furcolor, tailkind)
...
When this object is unpickled, a new Dog will be constructed by passing the provided
furcolor and tailkind into the constructor. However, philosophers are still not sure
whether it is still the same dog.
This is generally needed only for environments which wrap C/C++ code, such as MuJoCo
and Atari.
"""
def __init__(self, *args, **kwargs):
self._ezpickle_args = args
self._ezpickle_kwargs = kwargs
def __getstate__(self):
return {"_ezpickle_args": self._ezpickle_args, "_ezpickle_kwargs": self._ezpickle_kwargs}
def __setstate__(self, d):
out = type(self)(*d["_ezpickle_args"], **d["_ezpickle_kwargs"])
self.__dict__.update(out.__dict__)
def set_global_seeds(i):
try:
import MPI
rank = MPI.COMM_WORLD.Get_rank()
except ImportError:
rank = 0
myseed = i + 1000 * rank if i is not None else None
try:
import tensorflow as tf
except ImportError:
pass
else:
tf.set_random_seed(myseed)
np.random.seed(myseed)
random.seed(myseed)
def pretty_eta(seconds_left):
"""Print the number of seconds in human readable format.
Examples:
2 days
2 hours and 37 minutes
less than a minute
Paramters
---------
seconds_left: int
Number of seconds to be converted to the ETA
Returns
-------
eta: str
String representing the pretty ETA.
"""
minutes_left = seconds_left // 60
seconds_left %= 60
hours_left = minutes_left // 60
minutes_left %= 60
days_left = hours_left // 24
hours_left %= 24
def helper(cnt, name):
return "{} {}{}".format(str(cnt), name, ('s' if cnt > 1 else ''))
if days_left > 0:
msg = helper(days_left, 'day')
if hours_left > 0:
msg += ' and ' + helper(hours_left, 'hour')
return msg
if hours_left > 0:
msg = helper(hours_left, 'hour')
if minutes_left > 0:
msg += ' and ' + helper(minutes_left, 'minute')
return msg
if minutes_left > 0:
return helper(minutes_left, 'minute')
return 'less than a minute'
class RunningAvg(object):
def __init__(self, gamma, init_value=None):
"""Keep a running estimate of a quantity. This is a bit like mean
but more sensitive to recent changes.
Parameters
----------
gamma: float
Must be between 0 and 1, where 0 is the most sensitive to recent
changes.
init_value: float or None
Initial value of the estimate. If None, it will be set on the first update.
"""
self._value = init_value
self._gamma = gamma
def update(self, new_val):
"""Update the estimate.
Parameters
----------
new_val: float
new observated value of estimated quantity.
"""
if self._value is None:
self._value = new_val
else:
self._value = self._gamma * self._value + (1.0 - self._gamma) * new_val
def __float__(self):
"""Get the current estimate"""
return self._value
def boolean_flag(parser, name, default=False, help=None):
"""Add a boolean flag to argparse parser.
Parameters
----------
parser: argparse.Parser
parser to add the flag to
name: str
--<name> will enable the flag, while --no-<name> will disable it
default: bool or None
default value of the flag
help: str
help string for the flag
"""
dest = name.replace('-', '_')
parser.add_argument("--" + name, action="store_true", default=default, dest=dest, help=help)
parser.add_argument("--no-" + name, action="store_false", dest=dest)
def get_wrapper_by_name(env, classname):
"""Given an a gym environment possibly wrapped multiple times, returns a wrapper
of class named classname or raises ValueError if no such wrapper was applied
Parameters
----------
env: gym.Env of gym.Wrapper
gym environment
classname: str
name of the wrapper
Returns
-------
wrapper: gym.Wrapper
wrapper named classname
"""
currentenv = env
while True:
if classname == currentenv.class_name():
return currentenv
elif isinstance(currentenv, gym.Wrapper):
currentenv = currentenv.env
else:
raise ValueError("Couldn't find wrapper named %s" % classname)
def relatively_safe_pickle_dump(obj, path, compression=False):
"""This is just like regular pickle dump, except from the fact that failure cases are
different:
- It's never possible that we end up with a pickle in corrupted state.
- If a there was a different file at the path, that file will remain unchanged in the
even of failure (provided that filesystem rename is atomic).
- it is sometimes possible that we end up with useless temp file which needs to be
deleted manually (it will be removed automatically on the next function call)
The indended use case is periodic checkpoints of experiment state, such that we never
corrupt previous checkpoints if the current one fails.
Parameters
----------
obj: object
object to pickle
path: str
path to the output file
compression: bool
if true pickle will be compressed
"""
temp_storage = path + ".relatively_safe"
if compression:
# Using gzip here would be simpler, but the size is limited to 2GB
with tempfile.NamedTemporaryFile() as uncompressed_file:
pickle.dump(obj, uncompressed_file)
uncompressed_file.file.flush()
with zipfile.ZipFile(temp_storage, "w", compression=zipfile.ZIP_DEFLATED) as myzip:
myzip.write(uncompressed_file.name, "data")
else:
with open(temp_storage, "wb") as f:
pickle.dump(obj, f)
os.rename(temp_storage, path)
def pickle_load(path, compression=False):
"""Unpickle a possible compressed pickle.
Parameters
----------
path: str
path to the output file
compression: bool
if true assumes that pickle was compressed when created and attempts decompression.
Returns
-------
obj: object
the unpickled object
"""
if compression:
with zipfile.ZipFile(path, "r", compression=zipfile.ZIP_DEFLATED) as myzip:
with myzip.open("data") as f:
return pickle.load(f)
else:
with open(path, "rb") as f:
return pickle.load(f)
| 7,776 | 28.236842 | 110 |
py
|
rl-perturbed-reward
|
rl-perturbed-reward-master/gym-atari/baselines/baselines/common/mpi_fork.py
|
import os, subprocess, sys
def mpi_fork(n, bind_to_core=False):
"""Re-launches the current script with workers
Returns "parent" for original parent, "child" for MPI children
"""
if n<=1:
return "child"
if os.getenv("IN_MPI") is None:
env = os.environ.copy()
env.update(
MKL_NUM_THREADS="1",
OMP_NUM_THREADS="1",
IN_MPI="1"
)
args = ["mpirun", "-np", str(n)]
if bind_to_core:
args += ["-bind-to", "core"]
args += [sys.executable] + sys.argv
subprocess.check_call(args, env=env)
return "parent"
else:
return "child"
| 668 | 26.875 | 66 |
py
|
rl-perturbed-reward
|
rl-perturbed-reward-master/gym-atari/baselines/baselines/common/dataset.py
|
import numpy as np
class Dataset(object):
def __init__(self, data_map, deterministic=False, shuffle=True):
self.data_map = data_map
self.deterministic = deterministic
self.enable_shuffle = shuffle
self.n = next(iter(data_map.values())).shape[0]
self._next_id = 0
self.shuffle()
def shuffle(self):
if self.deterministic:
return
perm = np.arange(self.n)
np.random.shuffle(perm)
for key in self.data_map:
self.data_map[key] = self.data_map[key][perm]
self._next_id = 0
def next_batch(self, batch_size):
if self._next_id >= self.n and self.enable_shuffle:
self.shuffle()
cur_id = self._next_id
cur_batch_size = min(batch_size, self.n - self._next_id)
self._next_id += cur_batch_size
data_map = dict()
for key in self.data_map:
data_map[key] = self.data_map[key][cur_id:cur_id+cur_batch_size]
return data_map
def iterate_once(self, batch_size):
if self.enable_shuffle: self.shuffle()
while self._next_id <= self.n - batch_size:
yield self.next_batch(batch_size)
self._next_id = 0
def subset(self, num_elements, deterministic=True):
data_map = dict()
for key in self.data_map:
data_map[key] = self.data_map[key][:num_elements]
return Dataset(data_map, deterministic)
def iterbatches(arrays, *, num_batches=None, batch_size=None, shuffle=True, include_final_partial_batch=True):
assert (num_batches is None) != (batch_size is None), 'Provide num_batches or batch_size, but not both'
arrays = tuple(map(np.asarray, arrays))
n = arrays[0].shape[0]
assert all(a.shape[0] == n for a in arrays[1:])
inds = np.arange(n)
if shuffle: np.random.shuffle(inds)
sections = np.arange(0, n, batch_size)[1:] if num_batches is None else num_batches
for batch_inds in np.array_split(inds, sections):
if include_final_partial_batch or len(batch_inds) == batch_size:
yield tuple(a[batch_inds] for a in arrays)
| 2,132 | 33.967213 | 110 |
py
|
rl-perturbed-reward
|
rl-perturbed-reward-master/gym-atari/baselines/baselines/common/math_util.py
|
import numpy as np
import scipy.signal
def discount(x, gamma):
"""
computes discounted sums along 0th dimension of x.
inputs
------
x: ndarray
gamma: float
outputs
-------
y: ndarray with same shape as x, satisfying
y[t] = x[t] + gamma*x[t+1] + gamma^2*x[t+2] + ... + gamma^k x[t+k],
where k = len(x) - t - 1
"""
assert x.ndim >= 1
return scipy.signal.lfilter([1],[1,-gamma],x[::-1], axis=0)[::-1]
def explained_variance(ypred,y):
"""
Computes fraction of variance that ypred explains about y.
Returns 1 - Var[y-ypred] / Var[y]
interpretation:
ev=0 => might as well have predicted zero
ev=1 => perfect prediction
ev<0 => worse than just predicting zero
"""
assert y.ndim == 1 and ypred.ndim == 1
vary = np.var(y)
return np.nan if vary==0 else 1 - np.var(y-ypred)/vary
def explained_variance_2d(ypred, y):
assert y.ndim == 2 and ypred.ndim == 2
vary = np.var(y, axis=0)
out = 1 - np.var(y-ypred)/vary
out[vary < 1e-10] = 0
return out
def ncc(ypred, y):
return np.corrcoef(ypred, y)[1,0]
def flatten_arrays(arrs):
return np.concatenate([arr.flat for arr in arrs])
def unflatten_vector(vec, shapes):
i=0
arrs = []
for shape in shapes:
size = np.prod(shape)
arr = vec[i:i+size].reshape(shape)
arrs.append(arr)
i += size
return arrs
def discount_with_boundaries(X, New, gamma):
"""
X: 2d array of floats, time x features
New: 2d array of bools, indicating when a new episode has started
"""
Y = np.zeros_like(X)
T = X.shape[0]
Y[T-1] = X[T-1]
for t in range(T-2, -1, -1):
Y[t] = X[t] + gamma * Y[t+1] * (1 - New[t+1])
return Y
def test_discount_with_boundaries():
gamma=0.9
x = np.array([1.0, 2.0, 3.0, 4.0], 'float32')
starts = [1.0, 0.0, 0.0, 1.0]
y = discount_with_boundaries(x, starts, gamma)
assert np.allclose(y, [
1 + gamma * 2 + gamma**2 * 3,
2 + gamma * 3,
3,
4
])
| 2,093 | 23.635294 | 75 |
py
|
rl-perturbed-reward
|
rl-perturbed-reward-master/gym-atari/baselines/baselines/common/tf_util.py
|
import joblib
import numpy as np
import tensorflow as tf # pylint: ignore-module
import copy
import os
import functools
import collections
import multiprocessing
def switch(condition, then_expression, else_expression):
"""Switches between two operations depending on a scalar value (int or bool).
Note that both `then_expression` and `else_expression`
should be symbolic tensors of the *same shape*.
# Arguments
condition: scalar tensor.
then_expression: TensorFlow operation.
else_expression: TensorFlow operation.
"""
x_shape = copy.copy(then_expression.get_shape())
x = tf.cond(tf.cast(condition, 'bool'),
lambda: then_expression,
lambda: else_expression)
x.set_shape(x_shape)
return x
# ================================================================
# Extras
# ================================================================
def lrelu(x, leak=0.2):
f1 = 0.5 * (1 + leak)
f2 = 0.5 * (1 - leak)
return f1 * x + f2 * abs(x)
# ================================================================
# Mathematical utils
# ================================================================
def huber_loss(x, delta=1.0):
"""Reference: https://en.wikipedia.org/wiki/Huber_loss"""
return tf.where(
tf.abs(x) < delta,
tf.square(x) * 0.5,
delta * (tf.abs(x) - 0.5 * delta)
)
# ================================================================
# Global session
# ================================================================
def get_session(config=None):
"""Get default session or create one with a given config"""
sess = tf.get_default_session()
if sess is None:
sess = make_session(config=config, make_default=True)
return sess
def make_session(config=None, num_cpu=None, make_default=False, graph=None):
"""Returns a session that will use <num_cpu> CPU's only"""
if num_cpu is None:
num_cpu = int(os.getenv('RCALL_NUM_CPU', multiprocessing.cpu_count()))
if config is None:
config = tf.ConfigProto(
allow_soft_placement=True,
inter_op_parallelism_threads=num_cpu,
intra_op_parallelism_threads=num_cpu)
config.gpu_options.allow_growth = True
if make_default:
return tf.InteractiveSession(config=config, graph=graph)
else:
return tf.Session(config=config, graph=graph)
def single_threaded_session():
"""Returns a session which will only use a single CPU"""
return make_session(num_cpu=1)
def in_session(f):
@functools.wraps(f)
def newfunc(*args, **kwargs):
with tf.Session():
f(*args, **kwargs)
return newfunc
ALREADY_INITIALIZED = set()
def initialize():
"""Initialize all the uninitialized variables in the global scope."""
new_variables = set(tf.global_variables()) - ALREADY_INITIALIZED
get_session().run(tf.variables_initializer(new_variables))
ALREADY_INITIALIZED.update(new_variables)
# ================================================================
# Model components
# ================================================================
def normc_initializer(std=1.0, axis=0):
def _initializer(shape, dtype=None, partition_info=None): # pylint: disable=W0613
out = np.random.randn(*shape).astype(dtype.as_numpy_dtype)
out *= std / np.sqrt(np.square(out).sum(axis=axis, keepdims=True))
return tf.constant(out)
return _initializer
def conv2d(x, num_filters, name, filter_size=(3, 3), stride=(1, 1), pad="SAME", dtype=tf.float32, collections=None,
summary_tag=None):
with tf.variable_scope(name):
stride_shape = [1, stride[0], stride[1], 1]
filter_shape = [filter_size[0], filter_size[1], int(x.get_shape()[3]), num_filters]
# there are "num input feature maps * filter height * filter width"
# inputs to each hidden unit
fan_in = intprod(filter_shape[:3])
# each unit in the lower layer receives a gradient from:
# "num output feature maps * filter height * filter width" /
# pooling size
fan_out = intprod(filter_shape[:2]) * num_filters
# initialize weights with random weights
w_bound = np.sqrt(6. / (fan_in + fan_out))
w = tf.get_variable("W", filter_shape, dtype, tf.random_uniform_initializer(-w_bound, w_bound),
collections=collections)
b = tf.get_variable("b", [1, 1, 1, num_filters], initializer=tf.zeros_initializer(),
collections=collections)
if summary_tag is not None:
tf.summary.image(summary_tag,
tf.transpose(tf.reshape(w, [filter_size[0], filter_size[1], -1, 1]),
[2, 0, 1, 3]),
max_images=10)
return tf.nn.conv2d(x, w, stride_shape, pad) + b
# ================================================================
# Theano-like Function
# ================================================================
def function(inputs, outputs, updates=None, givens=None):
"""Just like Theano function. Take a bunch of tensorflow placeholders and expressions
computed based on those placeholders and produces f(inputs) -> outputs. Function f takes
values to be fed to the input's placeholders and produces the values of the expressions
in outputs.
Input values can be passed in the same order as inputs or can be provided as kwargs based
on placeholder name (passed to constructor or accessible via placeholder.op.name).
Example:
x = tf.placeholder(tf.int32, (), name="x")
y = tf.placeholder(tf.int32, (), name="y")
z = 3 * x + 2 * y
lin = function([x, y], z, givens={y: 0})
with single_threaded_session():
initialize()
assert lin(2) == 6
assert lin(x=3) == 9
assert lin(2, 2) == 10
assert lin(x=2, y=3) == 12
Parameters
----------
inputs: [tf.placeholder, tf.constant, or object with make_feed_dict method]
list of input arguments
outputs: [tf.Variable] or tf.Variable
list of outputs or a single output to be returned from function. Returned
value will also have the same shape.
"""
if isinstance(outputs, list):
return _Function(inputs, outputs, updates, givens=givens)
elif isinstance(outputs, (dict, collections.OrderedDict)):
f = _Function(inputs, outputs.values(), updates, givens=givens)
return lambda *args, **kwargs: type(outputs)(zip(outputs.keys(), f(*args, **kwargs)))
else:
f = _Function(inputs, [outputs], updates, givens=givens)
return lambda *args, **kwargs: f(*args, **kwargs)[0]
class _Function(object):
def __init__(self, inputs, outputs, updates, givens):
for inpt in inputs:
if not hasattr(inpt, 'make_feed_dict') and not (type(inpt) is tf.Tensor and len(inpt.op.inputs) == 0):
assert False, "inputs should all be placeholders, constants, or have a make_feed_dict method"
self.inputs = inputs
updates = updates or []
self.update_group = tf.group(*updates)
self.outputs_update = list(outputs) + [self.update_group]
self.givens = {} if givens is None else givens
def _feed_input(self, feed_dict, inpt, value):
if hasattr(inpt, 'make_feed_dict'):
feed_dict.update(inpt.make_feed_dict(value))
else:
feed_dict[inpt] = adjust_shape(inpt, value)
def __call__(self, *args):
assert len(args) <= len(self.inputs), "Too many arguments provided"
feed_dict = {}
# Update the args
for inpt, value in zip(self.inputs, args):
self._feed_input(feed_dict, inpt, value)
# Update feed dict with givens.
for inpt in self.givens:
feed_dict[inpt] = adjust_shape(inpt, feed_dict.get(inpt, self.givens[inpt]))
results = get_session().run(self.outputs_update, feed_dict=feed_dict)[:-1]
return results
# ================================================================
# Flat vectors
# ================================================================
def var_shape(x):
out = x.get_shape().as_list()
assert all(isinstance(a, int) for a in out), \
"shape function assumes that shape is fully known"
return out
def numel(x):
return intprod(var_shape(x))
def intprod(x):
return int(np.prod(x))
def flatgrad(loss, var_list, clip_norm=None):
grads = tf.gradients(loss, var_list)
if clip_norm is not None:
grads = [tf.clip_by_norm(grad, clip_norm=clip_norm) for grad in grads]
return tf.concat(axis=0, values=[
tf.reshape(grad if grad is not None else tf.zeros_like(v), [numel(v)])
for (v, grad) in zip(var_list, grads)
])
class SetFromFlat(object):
def __init__(self, var_list, dtype=tf.float32):
assigns = []
shapes = list(map(var_shape, var_list))
total_size = np.sum([intprod(shape) for shape in shapes])
self.theta = theta = tf.placeholder(dtype, [total_size])
start = 0
assigns = []
for (shape, v) in zip(shapes, var_list):
size = intprod(shape)
assigns.append(tf.assign(v, tf.reshape(theta[start:start + size], shape)))
start += size
self.op = tf.group(*assigns)
def __call__(self, theta):
tf.get_default_session().run(self.op, feed_dict={self.theta: theta})
class GetFlat(object):
def __init__(self, var_list):
self.op = tf.concat(axis=0, values=[tf.reshape(v, [numel(v)]) for v in var_list])
def __call__(self):
return tf.get_default_session().run(self.op)
def flattenallbut0(x):
return tf.reshape(x, [-1, intprod(x.get_shape().as_list()[1:])])
# =============================================================
# TF placeholders management
# ============================================================
_PLACEHOLDER_CACHE = {} # name -> (placeholder, dtype, shape)
def get_placeholder(name, dtype, shape):
if name in _PLACEHOLDER_CACHE:
out, dtype1, shape1 = _PLACEHOLDER_CACHE[name]
if out.graph == tf.get_default_graph():
assert dtype1 == dtype and shape1 == shape, \
'Placeholder with name {} has already been registered and has shape {}, different from requested {}'.format(name, shape1, shape)
return out
out = tf.placeholder(dtype=dtype, shape=shape, name=name)
_PLACEHOLDER_CACHE[name] = (out, dtype, shape)
return out
def get_placeholder_cached(name):
return _PLACEHOLDER_CACHE[name][0]
# ================================================================
# Diagnostics
# ================================================================
def display_var_info(vars):
from baselines import logger
count_params = 0
for v in vars:
name = v.name
if "/Adam" in name or "beta1_power" in name or "beta2_power" in name: continue
v_params = np.prod(v.shape.as_list())
count_params += v_params
if "/b:" in name or "/biases" in name: continue # Wx+b, bias is not interesting to look at => count params, but not print
logger.info(" %s%s %i params %s" % (name, " "*(55-len(name)), v_params, str(v.shape)))
logger.info("Total model parameters: %0.2f million" % (count_params*1e-6))
def get_available_gpus():
# recipe from here:
# https://stackoverflow.com/questions/38559755/how-to-get-current-available-gpus-in-tensorflow?utm_medium=organic&utm_source=google_rich_qa&utm_campaign=google_rich_qa
from tensorflow.python.client import device_lib
local_device_protos = device_lib.list_local_devices()
return [x.name for x in local_device_protos if x.device_type == 'GPU']
# ================================================================
# Saving variables
# ================================================================
def load_state(fname, sess=None):
sess = sess or get_session()
saver = tf.train.Saver()
saver.restore(tf.get_default_session(), fname)
def save_state(fname, sess=None):
sess = sess or get_session()
os.makedirs(os.path.dirname(fname), exist_ok=True)
saver = tf.train.Saver()
saver.save(tf.get_default_session(), fname)
# The methods above and below are clearly doing the same thing, and in a rather similar way
# TODO: ensure there is no subtle differences and remove one
def save_variables(save_path, variables=None, sess=None):
sess = sess or get_session()
variables = variables or tf.trainable_variables()
ps = sess.run(variables)
save_dict = {v.name: value for v, value in zip(variables, ps)}
os.makedirs(os.path.dirname(save_path), exist_ok=True)
joblib.dump(save_dict, save_path)
def load_variables(load_path, variables=None, sess=None):
sess = sess or get_session()
variables = variables or tf.trainable_variables()
loaded_params = joblib.load(os.path.expanduser(load_path))
restores = []
for v in variables:
restores.append(v.assign(loaded_params[v.name]))
sess.run(restores)
# ================================================================
# Shape adjustment for feeding into tf placeholders
# ================================================================
def adjust_shape(placeholder, data):
'''
adjust shape of the data to the shape of the placeholder if possible.
If shape is incompatible, AssertionError is thrown
Parameters:
placeholder tensorflow input placeholder
data input data to be (potentially) reshaped to be fed into placeholder
Returns:
reshaped data
'''
if not isinstance(data, np.ndarray) and not isinstance(data, list):
return data
if isinstance(data, list):
data = np.array(data)
placeholder_shape = [x or -1 for x in placeholder.shape.as_list()]
assert _check_shape(placeholder_shape, data.shape), \
'Shape of data {} is not compatible with shape of the placeholder {}'.format(data.shape, placeholder_shape)
return np.reshape(data, placeholder_shape)
def _check_shape(placeholder_shape, data_shape):
''' check if two shapes are compatible (i.e. differ only by dimensions of size 1, or by the batch dimension)'''
return True
squeezed_placeholder_shape = _squeeze_shape(placeholder_shape)
squeezed_data_shape = _squeeze_shape(data_shape)
for i, s_data in enumerate(squeezed_data_shape):
s_placeholder = squeezed_placeholder_shape[i]
if s_placeholder != -1 and s_data != s_placeholder:
return False
return True
def _squeeze_shape(shape):
return [x for x in shape if x != 1]
# Tensorboard interfacing
# ================================================================
def launch_tensorboard_in_background(log_dir):
from tensorboard import main as tb
import threading
tf.flags.FLAGS.logdir = log_dir
t = threading.Thread(target=tb.main, args=([]))
t.start()
| 15,157 | 36.334975 | 171 |
py
|
rl-perturbed-reward
|
rl-perturbed-reward-master/gym-atari/baselines/baselines/common/tile_images.py
|
import numpy as np
def tile_images(img_nhwc):
"""
Tile N images into one big PxQ image
(P,Q) are chosen to be as close as possible, and if N
is square, then P=Q.
input: img_nhwc, list or array of images, ndim=4 once turned into array
n = batch index, h = height, w = width, c = channel
returns:
bigim_HWc, ndarray with ndim=3
"""
img_nhwc = np.asarray(img_nhwc)
N, h, w, c = img_nhwc.shape
H = int(np.ceil(np.sqrt(N)))
W = int(np.ceil(float(N)/H))
img_nhwc = np.array(list(img_nhwc) + [img_nhwc[0]*0 for _ in range(N, H*W)])
img_HWhwc = img_nhwc.reshape(H, W, h, w, c)
img_HhWwc = img_HWhwc.transpose(0, 2, 1, 3, 4)
img_Hh_Ww_c = img_HhWwc.reshape(H*h, W*w, c)
return img_Hh_Ww_c
| 763 | 30.833333 | 80 |
py
|
rl-perturbed-reward
|
rl-perturbed-reward-master/gym-atari/baselines/baselines/common/running_mean_std.py
|
import tensorflow as tf
import numpy as np
from baselines.common.tf_util import get_session
class RunningMeanStd(object):
# https://en.wikipedia.org/wiki/Algorithms_for_calculating_variance#Parallel_algorithm
def __init__(self, epsilon=1e-4, shape=()):
self.mean = np.zeros(shape, 'float64')
self.var = np.ones(shape, 'float64')
self.count = epsilon
def update(self, x):
batch_mean = np.mean(x, axis=0)
batch_var = np.var(x, axis=0)
batch_count = x.shape[0]
self.update_from_moments(batch_mean, batch_var, batch_count)
def update_from_moments(self, batch_mean, batch_var, batch_count):
self.mean, self.var, self.count = update_mean_var_count_from_moments(
self.mean, self.var, self.count, batch_mean, batch_var, batch_count)
def update_mean_var_count_from_moments(mean, var, count, batch_mean, batch_var, batch_count):
delta = batch_mean - mean
tot_count = count + batch_count
new_mean = mean + delta * batch_count / tot_count
m_a = var * count
m_b = batch_var * batch_count
M2 = m_a + m_b + np.square(delta) * count * batch_count / (count + batch_count)
new_var = M2 / (count + batch_count)
new_count = batch_count + count
return new_mean, new_var, new_count
class TfRunningMeanStd(object):
# https://en.wikipedia.org/wiki/Algorithms_for_calculating_variance#Parallel_algorithm
'''
TensorFlow variables-based implmentation of computing running mean and std
Benefit of this implementation is that it can be saved / loaded together with the tensorflow model
'''
def __init__(self, epsilon=1e-4, shape=(), scope=''):
sess = get_session()
self._new_mean = tf.placeholder(shape=shape, dtype=tf.float64)
self._new_var = tf.placeholder(shape=shape, dtype=tf.float64)
self._new_count = tf.placeholder(shape=(), dtype=tf.float64)
with tf.variable_scope(scope, reuse=tf.AUTO_REUSE):
self._mean = tf.get_variable('mean', initializer=np.zeros(shape, 'float64'), dtype=tf.float64)
self._var = tf.get_variable('std', initializer=np.ones(shape, 'float64'), dtype=tf.float64)
self._count = tf.get_variable('count', initializer=np.full((), epsilon, 'float64'), dtype=tf.float64)
self.update_ops = tf.group([
self._var.assign(self._new_var),
self._mean.assign(self._new_mean),
self._count.assign(self._new_count)
])
sess.run(tf.variables_initializer([self._mean, self._var, self._count]))
self.sess = sess
self._set_mean_var_count()
def _set_mean_var_count(self):
self.mean, self.var, self.count = self.sess.run([self._mean, self._var, self._count])
def update(self, x):
batch_mean = np.mean(x, axis=0)
batch_var = np.var(x, axis=0)
batch_count = x.shape[0]
new_mean, new_var, new_count = update_mean_var_count_from_moments(self.mean, self.var, self.count, batch_mean, batch_var, batch_count)
self.sess.run(self.update_ops, feed_dict={
self._new_mean: new_mean,
self._new_var: new_var,
self._new_count: new_count
})
self._set_mean_var_count()
def test_runningmeanstd():
for (x1, x2, x3) in [
(np.random.randn(3), np.random.randn(4), np.random.randn(5)),
(np.random.randn(3,2), np.random.randn(4,2), np.random.randn(5,2)),
]:
rms = RunningMeanStd(epsilon=0.0, shape=x1.shape[1:])
x = np.concatenate([x1, x2, x3], axis=0)
ms1 = [x.mean(axis=0), x.var(axis=0)]
rms.update(x1)
rms.update(x2)
rms.update(x3)
ms2 = [rms.mean, rms.var]
np.testing.assert_allclose(ms1, ms2)
def test_tf_runningmeanstd():
for (x1, x2, x3) in [
(np.random.randn(3), np.random.randn(4), np.random.randn(5)),
(np.random.randn(3,2), np.random.randn(4,2), np.random.randn(5,2)),
]:
rms = TfRunningMeanStd(epsilon=0.0, shape=x1.shape[1:], scope='running_mean_std' + str(np.random.randint(0, 128)))
x = np.concatenate([x1, x2, x3], axis=0)
ms1 = [x.mean(axis=0), x.var(axis=0)]
rms.update(x1)
rms.update(x2)
rms.update(x3)
ms2 = [rms.mean, rms.var]
np.testing.assert_allclose(ms1, ms2)
def profile_tf_runningmeanstd():
import time
from baselines.common import tf_util
tf_util.get_session( config=tf.ConfigProto(
inter_op_parallelism_threads=1,
intra_op_parallelism_threads=1,
allow_soft_placement=True
))
x = np.random.random((376,))
n_trials = 10000
rms = RunningMeanStd()
tfrms = TfRunningMeanStd()
tic1 = time.time()
for _ in range(n_trials):
rms.update(x)
tic2 = time.time()
for _ in range(n_trials):
tfrms.update(x)
tic3 = time.time()
print('rms update time ({} trials): {} s'.format(n_trials, tic2 - tic1))
print('tfrms update time ({} trials): {} s'.format(n_trials, tic3 - tic2))
tic1 = time.time()
for _ in range(n_trials):
z1 = rms.mean
tic2 = time.time()
for _ in range(n_trials):
z2 = tfrms.mean
assert z1 == z2
tic3 = time.time()
print('rms get mean time ({} trials): {} s'.format(n_trials, tic2 - tic1))
print('tfrms get mean time ({} trials): {} s'.format(n_trials, tic3 - tic2))
'''
options = tf.RunOptions(trace_level=tf.RunOptions.FULL_TRACE) #pylint: disable=E1101
run_metadata = tf.RunMetadata()
profile_opts = dict(options=options, run_metadata=run_metadata)
from tensorflow.python.client import timeline
fetched_timeline = timeline.Timeline(run_metadata.step_stats) #pylint: disable=E1101
chrome_trace = fetched_timeline.generate_chrome_trace_format()
outfile = '/tmp/timeline.json'
with open(outfile, 'wt') as f:
f.write(chrome_trace)
print(f'Successfully saved profile to {outfile}. Exiting.')
exit(0)
'''
if __name__ == '__main__':
profile_tf_runningmeanstd()
| 6,200 | 31.984043 | 142 |
py
|
rl-perturbed-reward
|
rl-perturbed-reward-master/gym-atari/baselines/baselines/common/retro_wrappers.py
|
# flake8: noqa F403, F405
from .atari_wrappers import *
import numpy as np
import gym
class TimeLimit(gym.Wrapper):
def __init__(self, env, max_episode_steps=None):
super(TimeLimit, self).__init__(env)
self._max_episode_steps = max_episode_steps
self._elapsed_steps = 0
def step(self, ac):
observation, reward, done, info = self.env.step(ac)
self._elapsed_steps += 1
if self._elapsed_steps >= self._max_episode_steps:
done = True
info['TimeLimit.truncated'] = True
return observation, reward, done, info
def reset(self, **kwargs):
self._elapsed_steps = 0
return self.env.reset(**kwargs)
class StochasticFrameSkip(gym.Wrapper):
def __init__(self, env, n, stickprob):
gym.Wrapper.__init__(self, env)
self.n = n
self.stickprob = stickprob
self.curac = None
self.rng = np.random.RandomState()
self.supports_want_render = hasattr(env, "supports_want_render")
def reset(self, **kwargs):
self.curac = None
return self.env.reset(**kwargs)
def step(self, ac):
done = False
totrew = 0
for i in range(self.n):
# First step after reset, use action
if self.curac is None:
self.curac = ac
# First substep, delay with probability=stickprob
elif i==0:
if self.rng.rand() > self.stickprob:
self.curac = ac
# Second substep, new action definitely kicks in
elif i==1:
self.curac = ac
if self.supports_want_render and i<self.n-1:
ob, rew, done, info = self.env.step(self.curac, want_render=False)
else:
ob, rew, done, info = self.env.step(self.curac)
totrew += rew
if done: break
return ob, totrew, done, info
def seed(self, s):
self.rng.seed(s)
class PartialFrameStack(gym.Wrapper):
def __init__(self, env, k, channel=1):
"""
Stack one channel (channel keyword) from previous frames
"""
gym.Wrapper.__init__(self, env)
shp = env.observation_space.shape
self.channel = channel
self.observation_space = gym.spaces.Box(low=0, high=255,
shape=(shp[0], shp[1], shp[2] + k - 1),
dtype=env.observation_space.dtype)
self.k = k
self.frames = deque([], maxlen=k)
shp = env.observation_space.shape
def reset(self):
ob = self.env.reset()
assert ob.shape[2] > self.channel
for _ in range(self.k):
self.frames.append(ob)
return self._get_ob()
def step(self, ac):
ob, reward, done, info = self.env.step(ac)
self.frames.append(ob)
return self._get_ob(), reward, done, info
def _get_ob(self):
assert len(self.frames) == self.k
return np.concatenate([frame if i==self.k-1 else frame[:,:,self.channel:self.channel+1]
for (i, frame) in enumerate(self.frames)], axis=2)
class Downsample(gym.ObservationWrapper):
def __init__(self, env, ratio):
"""
Downsample images by a factor of ratio
"""
gym.ObservationWrapper.__init__(self, env)
(oldh, oldw, oldc) = env.observation_space.shape
newshape = (oldh//ratio, oldw//ratio, oldc)
self.observation_space = spaces.Box(low=0, high=255,
shape=newshape, dtype=np.uint8)
def observation(self, frame):
height, width, _ = self.observation_space.shape
frame = cv2.resize(frame, (width, height), interpolation=cv2.INTER_AREA)
if frame.ndim == 2:
frame = frame[:,:,None]
return frame
class Rgb2gray(gym.ObservationWrapper):
def __init__(self, env):
"""
Downsample images by a factor of ratio
"""
gym.ObservationWrapper.__init__(self, env)
(oldh, oldw, _oldc) = env.observation_space.shape
self.observation_space = spaces.Box(low=0, high=255,
shape=(oldh, oldw, 1), dtype=np.uint8)
def observation(self, frame):
frame = cv2.cvtColor(frame, cv2.COLOR_RGB2GRAY)
return frame[:,:,None]
class MovieRecord(gym.Wrapper):
def __init__(self, env, savedir, k):
gym.Wrapper.__init__(self, env)
self.savedir = savedir
self.k = k
self.epcount = 0
def reset(self):
if self.epcount % self.k == 0:
print('saving movie this episode', self.savedir)
self.env.unwrapped.movie_path = self.savedir
else:
print('not saving this episode')
self.env.unwrapped.movie_path = None
self.env.unwrapped.movie = None
self.epcount += 1
return self.env.reset()
class AppendTimeout(gym.Wrapper):
def __init__(self, env):
gym.Wrapper.__init__(self, env)
self.action_space = env.action_space
self.timeout_space = gym.spaces.Box(low=np.array([0.0]), high=np.array([1.0]), dtype=np.float32)
self.original_os = env.observation_space
if isinstance(self.original_os, gym.spaces.Dict):
import copy
ordered_dict = copy.deepcopy(self.original_os.spaces)
ordered_dict['value_estimation_timeout'] = self.timeout_space
self.observation_space = gym.spaces.Dict(ordered_dict)
self.dict_mode = True
else:
self.observation_space = gym.spaces.Dict({
'original': self.original_os,
'value_estimation_timeout': self.timeout_space
})
self.dict_mode = False
self.ac_count = None
while 1:
if not hasattr(env, "_max_episode_steps"): # Looking for TimeLimit wrapper that has this field
env = env.env
continue
break
self.timeout = env._max_episode_steps
def step(self, ac):
self.ac_count += 1
ob, rew, done, info = self.env.step(ac)
return self._process(ob), rew, done, info
def reset(self):
self.ac_count = 0
return self._process(self.env.reset())
def _process(self, ob):
fracmissing = 1 - self.ac_count / self.timeout
if self.dict_mode:
ob['value_estimation_timeout'] = fracmissing
else:
return { 'original': ob, 'value_estimation_timeout': fracmissing }
class StartDoingRandomActionsWrapper(gym.Wrapper):
"""
Warning: can eat info dicts, not good if you depend on them
"""
def __init__(self, env, max_random_steps, on_startup=True, every_episode=False):
gym.Wrapper.__init__(self, env)
self.on_startup = on_startup
self.every_episode = every_episode
self.random_steps = max_random_steps
self.last_obs = None
if on_startup:
self.some_random_steps()
def some_random_steps(self):
self.last_obs = self.env.reset()
n = np.random.randint(self.random_steps)
#print("running for random %i frames" % n)
for _ in range(n):
self.last_obs, _, done, _ = self.env.step(self.env.action_space.sample())
if done: self.last_obs = self.env.reset()
def reset(self):
return self.last_obs
def step(self, a):
self.last_obs, rew, done, info = self.env.step(a)
if done:
self.last_obs = self.env.reset()
if self.every_episode:
self.some_random_steps()
return self.last_obs, rew, done, info
def make_retro(*, game, state, max_episode_steps, **kwargs):
import retro
env = retro.make(game, state, **kwargs)
env = StochasticFrameSkip(env, n=4, stickprob=0.25)
if max_episode_steps is not None:
env = TimeLimit(env, max_episode_steps=max_episode_steps)
return env
def wrap_deepmind_retro(env, scale=True, frame_stack=4):
"""
Configure environment for retro games, using config similar to DeepMind-style Atari in wrap_deepmind
"""
env = WarpFrame(env)
env = ClipRewardEnv(env)
env = FrameStack(env, frame_stack)
if scale:
env = ScaledFloatFrame(env)
return env
class SonicDiscretizer(gym.ActionWrapper):
"""
Wrap a gym-retro environment and make it use discrete
actions for the Sonic game.
"""
def __init__(self, env):
super(SonicDiscretizer, self).__init__(env)
buttons = ["B", "A", "MODE", "START", "UP", "DOWN", "LEFT", "RIGHT", "C", "Y", "X", "Z"]
actions = [['LEFT'], ['RIGHT'], ['LEFT', 'DOWN'], ['RIGHT', 'DOWN'], ['DOWN'],
['DOWN', 'B'], ['B']]
self._actions = []
for action in actions:
arr = np.array([False] * 12)
for button in action:
arr[buttons.index(button)] = True
self._actions.append(arr)
self.action_space = gym.spaces.Discrete(len(self._actions))
def action(self, a): # pylint: disable=W0221
return self._actions[a].copy()
class RewardScaler(gym.RewardWrapper):
"""
Bring rewards to a reasonable scale for PPO.
This is incredibly important and effects performance
drastically.
"""
def __init__(self, env, scale=0.01):
super(RewardScaler, self).__init__(env)
self.scale = scale
def reward(self, reward):
return reward * self.scale
class AllowBacktracking(gym.Wrapper):
"""
Use deltas in max(X) as the reward, rather than deltas
in X. This way, agents are not discouraged too heavily
from exploring backwards if there is no way to advance
head-on in the level.
"""
def __init__(self, env):
super(AllowBacktracking, self).__init__(env)
self._cur_x = 0
self._max_x = 0
def reset(self, **kwargs): # pylint: disable=E0202
self._cur_x = 0
self._max_x = 0
return self.env.reset(**kwargs)
def step(self, action): # pylint: disable=E0202
obs, rew, done, info = self.env.step(action)
self._cur_x += rew
rew = max(0, self._cur_x - self._max_x)
self._max_x = max(self._max_x, self._cur_x)
return obs, rew, done, info
| 10,238 | 33.826531 | 107 |
py
|
rl-perturbed-reward
|
rl-perturbed-reward-master/gym-atari/baselines/baselines/common/running_stat.py
|
import numpy as np
# http://www.johndcook.com/blog/standard_deviation/
class RunningStat(object):
def __init__(self, shape):
self._n = 0
self._M = np.zeros(shape)
self._S = np.zeros(shape)
def push(self, x):
x = np.asarray(x)
assert x.shape == self._M.shape
self._n += 1
if self._n == 1:
self._M[...] = x
else:
oldM = self._M.copy()
self._M[...] = oldM + (x - oldM)/self._n
self._S[...] = self._S + (x - oldM)*(x - self._M)
@property
def n(self):
return self._n
@property
def mean(self):
return self._M
@property
def var(self):
return self._S/(self._n - 1) if self._n > 1 else np.square(self._M)
@property
def std(self):
return np.sqrt(self.var)
@property
def shape(self):
return self._M.shape
def test_running_stat():
for shp in ((), (3,), (3,4)):
li = []
rs = RunningStat(shp)
for _ in range(5):
val = np.random.randn(*shp)
rs.push(val)
li.append(val)
m = np.mean(li, axis=0)
assert np.allclose(rs.mean, m)
v = np.square(m) if (len(li) == 1) else np.var(li, ddof=1, axis=0)
assert np.allclose(rs.var, v)
| 1,320 | 27.106383 | 78 |
py
|
rl-perturbed-reward
|
rl-perturbed-reward-master/gym-atari/baselines/baselines/common/segment_tree.py
|
import operator
class SegmentTree(object):
def __init__(self, capacity, operation, neutral_element):
"""Build a Segment Tree data structure.
https://en.wikipedia.org/wiki/Segment_tree
Can be used as regular array, but with two
important differences:
a) setting item's value is slightly slower.
It is O(lg capacity) instead of O(1).
b) user has access to an efficient ( O(log segment size) )
`reduce` operation which reduces `operation` over
a contiguous subsequence of items in the array.
Paramters
---------
capacity: int
Total size of the array - must be a power of two.
operation: lambda obj, obj -> obj
and operation for combining elements (eg. sum, max)
must form a mathematical group together with the set of
possible values for array elements (i.e. be associative)
neutral_element: obj
neutral element for the operation above. eg. float('-inf')
for max and 0 for sum.
"""
assert capacity > 0 and capacity & (capacity - 1) == 0, "capacity must be positive and a power of 2."
self._capacity = capacity
self._value = [neutral_element for _ in range(2 * capacity)]
self._operation = operation
def _reduce_helper(self, start, end, node, node_start, node_end):
if start == node_start and end == node_end:
return self._value[node]
mid = (node_start + node_end) // 2
if end <= mid:
return self._reduce_helper(start, end, 2 * node, node_start, mid)
else:
if mid + 1 <= start:
return self._reduce_helper(start, end, 2 * node + 1, mid + 1, node_end)
else:
return self._operation(
self._reduce_helper(start, mid, 2 * node, node_start, mid),
self._reduce_helper(mid + 1, end, 2 * node + 1, mid + 1, node_end)
)
def reduce(self, start=0, end=None):
"""Returns result of applying `self.operation`
to a contiguous subsequence of the array.
self.operation(arr[start], operation(arr[start+1], operation(... arr[end])))
Parameters
----------
start: int
beginning of the subsequence
end: int
end of the subsequences
Returns
-------
reduced: obj
result of reducing self.operation over the specified range of array elements.
"""
if end is None:
end = self._capacity
if end < 0:
end += self._capacity
end -= 1
return self._reduce_helper(start, end, 1, 0, self._capacity - 1)
def __setitem__(self, idx, val):
# index of the leaf
idx += self._capacity
self._value[idx] = val
idx //= 2
while idx >= 1:
self._value[idx] = self._operation(
self._value[2 * idx],
self._value[2 * idx + 1]
)
idx //= 2
def __getitem__(self, idx):
assert 0 <= idx < self._capacity
return self._value[self._capacity + idx]
class SumSegmentTree(SegmentTree):
def __init__(self, capacity):
super(SumSegmentTree, self).__init__(
capacity=capacity,
operation=operator.add,
neutral_element=0.0
)
def sum(self, start=0, end=None):
"""Returns arr[start] + ... + arr[end]"""
return super(SumSegmentTree, self).reduce(start, end)
def find_prefixsum_idx(self, prefixsum):
"""Find the highest index `i` in the array such that
sum(arr[0] + arr[1] + ... + arr[i - i]) <= prefixsum
if array values are probabilities, this function
allows to sample indexes according to the discrete
probability efficiently.
Parameters
----------
perfixsum: float
upperbound on the sum of array prefix
Returns
-------
idx: int
highest index satisfying the prefixsum constraint
"""
assert 0 <= prefixsum <= self.sum() + 1e-5
idx = 1
while idx < self._capacity: # while non-leaf
if self._value[2 * idx] > prefixsum:
idx = 2 * idx
else:
prefixsum -= self._value[2 * idx]
idx = 2 * idx + 1
return idx - self._capacity
class MinSegmentTree(SegmentTree):
def __init__(self, capacity):
super(MinSegmentTree, self).__init__(
capacity=capacity,
operation=min,
neutral_element=float('inf')
)
def min(self, start=0, end=None):
"""Returns min(arr[start], ..., arr[end])"""
return super(MinSegmentTree, self).reduce(start, end)
| 4,899 | 32.561644 | 109 |
py
|
rl-perturbed-reward
|
rl-perturbed-reward-master/gym-atari/baselines/baselines/common/policies.py
|
import tensorflow as tf
from baselines.common import tf_util
from baselines.a2c.utils import fc
from baselines.common.distributions import make_pdtype
from baselines.common.input import observation_placeholder, encode_observation
from baselines.common.tf_util import adjust_shape
from baselines.common.mpi_running_mean_std import RunningMeanStd
from baselines.common.models import get_network_builder
import gym
class PolicyWithValue(object):
"""
Encapsulates fields and methods for RL policy and value function estimation with shared parameters
"""
def __init__(self, env, observations, latent, estimate_q=False, vf_latent=None, sess=None, **tensors):
"""
Parameters:
----------
env RL environment
observations tensorflow placeholder in which the observations will be fed
latent latent state from which policy distribution parameters should be inferred
vf_latent latent state from which value function should be inferred (if None, then latent is used)
sess tensorflow session to run calculations in (if None, default session is used)
**tensors tensorflow tensors for additional attributes such as state or mask
"""
self.X = observations
self.state = tf.constant([])
self.initial_state = None
self.__dict__.update(tensors)
vf_latent = vf_latent if vf_latent is not None else latent
vf_latent = tf.layers.flatten(vf_latent)
latent = tf.layers.flatten(latent)
self.pdtype = make_pdtype(env.action_space)
self.pd, self.pi = self.pdtype.pdfromlatent(latent, init_scale=0.01)
self.action = self.pd.sample()
self.neglogp = self.pd.neglogp(self.action)
self.sess = sess
if estimate_q:
assert isinstance(env.action_space, gym.spaces.Discrete)
self.q = fc(vf_latent, 'q', env.action_space.n)
self.vf = self.q
else:
self.vf = fc(vf_latent, 'vf', 1)
self.vf = self.vf[:,0]
def _evaluate(self, variables, observation, **extra_feed):
sess = self.sess or tf.get_default_session()
feed_dict = {self.X: adjust_shape(self.X, observation)}
for inpt_name, data in extra_feed.items():
if inpt_name in self.__dict__.keys():
inpt = self.__dict__[inpt_name]
if isinstance(inpt, tf.Tensor) and inpt._op.type == 'Placeholder':
feed_dict[inpt] = adjust_shape(inpt, data)
return sess.run(variables, feed_dict)
def step(self, observation, **extra_feed):
"""
Compute next action(s) given the observaion(s)
Parameters:
----------
observation observation data (either single or a batch)
**extra_feed additional data such as state or mask (names of the arguments should match the ones in constructor, see __init__)
Returns:
-------
(action, value estimate, next state, negative log likelihood of the action under current policy parameters) tuple
"""
a, v, state, neglogp = self._evaluate([self.action, self.vf, self.state, self.neglogp], observation, **extra_feed)
if state.size == 0:
state = None
return a, v, state, neglogp
def value(self, ob, *args, **kwargs):
"""
Compute value estimate(s) given the observaion(s)
Parameters:
----------
observation observation data (either single or a batch)
**extra_feed additional data such as state or mask (names of the arguments should match the ones in constructor, see __init__)
Returns:
-------
value estimate
"""
return self._evaluate(self.vf, ob, *args, **kwargs)
def save(self, save_path):
tf_util.save_state(save_path, sess=self.sess)
def load(self, load_path):
tf_util.load_state(load_path, sess=self.sess)
def build_policy(env, policy_network, value_network=None, normalize_observations=False, estimate_q=False, **policy_kwargs):
if isinstance(policy_network, str):
network_type = policy_network
policy_network = get_network_builder(network_type)(**policy_kwargs)
def policy_fn(nbatch=None, nsteps=None, sess=None, observ_placeholder=None):
ob_space = env.observation_space
X = observ_placeholder if observ_placeholder is not None else observation_placeholder(ob_space, batch_size=nbatch)
extra_tensors = {}
if normalize_observations and X.dtype == tf.float32:
encoded_x, rms = _normalize_clip_observation(X)
extra_tensors['rms'] = rms
else:
encoded_x = X
encoded_x = encode_observation(ob_space, encoded_x)
with tf.variable_scope('pi', reuse=tf.AUTO_REUSE):
policy_latent, recurrent_tensors = policy_network(encoded_x)
if recurrent_tensors is not None:
# recurrent architecture, need a few more steps
nenv = nbatch // nsteps
assert nenv > 0, 'Bad input for recurrent policy: batch size {} smaller than nsteps {}'.format(nbatch, nsteps)
policy_latent, recurrent_tensors = policy_network(encoded_x, nenv)
extra_tensors.update(recurrent_tensors)
_v_net = value_network
if _v_net is None or _v_net == 'shared':
vf_latent = policy_latent
else:
if _v_net == 'copy':
_v_net = policy_network
else:
assert callable(_v_net)
with tf.variable_scope('vf', reuse=tf.AUTO_REUSE):
vf_latent, _ = _v_net(encoded_x)
policy = PolicyWithValue(
env=env,
observations=X,
latent=policy_latent,
vf_latent=vf_latent,
sess=sess,
estimate_q=estimate_q,
**extra_tensors
)
return policy
return policy_fn
def _normalize_clip_observation(x, clip_range=[-5.0, 5.0]):
rms = RunningMeanStd(shape=x.shape[1:])
norm_x = tf.clip_by_value((x - rms.mean) / rms.std, min(clip_range), max(clip_range))
return norm_x, rms
| 6,337 | 34.211111 | 137 |
py
|
rl-perturbed-reward
|
rl-perturbed-reward-master/gym-atari/baselines/baselines/common/models.py
|
import numpy as np
import tensorflow as tf
from baselines.a2c import utils
from baselines.a2c.utils import conv, fc, conv_to_fc, batch_to_seq, seq_to_batch
from baselines.common.mpi_running_mean_std import RunningMeanStd
import tensorflow.contrib.layers as layers
def nature_cnn(unscaled_images, **conv_kwargs):
"""
CNN from Nature paper.
"""
scaled_images = tf.cast(unscaled_images, tf.float32) / 255.
activ = tf.nn.relu
h = activ(conv(scaled_images, 'c1', nf=32, rf=8, stride=4, init_scale=np.sqrt(2),
**conv_kwargs))
h2 = activ(conv(h, 'c2', nf=64, rf=4, stride=2, init_scale=np.sqrt(2), **conv_kwargs))
h3 = activ(conv(h2, 'c3', nf=64, rf=3, stride=1, init_scale=np.sqrt(2), **conv_kwargs))
h3 = conv_to_fc(h3)
return activ(fc(h3, 'fc1', nh=512, init_scale=np.sqrt(2)))
def mlp(num_layers=2, num_hidden=64, activation=tf.tanh):
"""
Simple fully connected layer policy. Separate stacks of fully-connected layers are used for policy and value function estimation.
More customized fully-connected policies can be obtained by using PolicyWithV class directly.
Parameters:
----------
num_layers: int number of fully-connected layers (default: 2)
num_hidden: int size of fully-connected layers (default: 64)
activation: activation function (default: tf.tanh)
Returns:
-------
function that builds fully connected network with a given input placeholder
"""
def network_fn(X):
h = tf.layers.flatten(X)
for i in range(num_layers):
h = activation(fc(h, 'mlp_fc{}'.format(i), nh=num_hidden, init_scale=np.sqrt(2)))
return h, None
return network_fn
def cnn(**conv_kwargs):
def network_fn(X):
return nature_cnn(X, **conv_kwargs), None
return network_fn
def cnn_small(**conv_kwargs):
def network_fn(X):
h = tf.cast(X, tf.float32) / 255.
activ = tf.nn.relu
h = activ(conv(h, 'c1', nf=8, rf=8, stride=4, init_scale=np.sqrt(2), **conv_kwargs))
h = activ(conv(h, 'c2', nf=16, rf=4, stride=2, init_scale=np.sqrt(2), **conv_kwargs))
h = conv_to_fc(h)
h = activ(fc(h, 'fc1', nh=128, init_scale=np.sqrt(2)))
return h, None
return network_fn
def lstm(nlstm=128, layer_norm=False):
def network_fn(X, nenv=1):
nbatch = X.shape[0]
nsteps = nbatch // nenv
h = tf.layers.flatten(X)
M = tf.placeholder(tf.float32, [nbatch]) #mask (done t-1)
S = tf.placeholder(tf.float32, [nenv, 2*nlstm]) #states
xs = batch_to_seq(h, nenv, nsteps)
ms = batch_to_seq(M, nenv, nsteps)
if layer_norm:
h5, snew = utils.lnlstm(xs, ms, S, scope='lnlstm', nh=nlstm)
else:
h5, snew = utils.lstm(xs, ms, S, scope='lstm', nh=nlstm)
h = seq_to_batch(h5)
initial_state = np.zeros(S.shape.as_list(), dtype=float)
return h, {'S':S, 'M':M, 'state':snew, 'initial_state':initial_state}
return network_fn
def cnn_lstm(nlstm=128, layer_norm=False, **conv_kwargs):
def network_fn(X, nenv=1):
nbatch = X.shape[0]
nsteps = nbatch // nenv
h = nature_cnn(X, **conv_kwargs)
M = tf.placeholder(tf.float32, [nbatch]) #mask (done t-1)
S = tf.placeholder(tf.float32, [nenv, 2*nlstm]) #states
xs = batch_to_seq(h, nenv, nsteps)
ms = batch_to_seq(M, nenv, nsteps)
if layer_norm:
h5, snew = utils.lnlstm(xs, ms, S, scope='lnlstm', nh=nlstm)
else:
h5, snew = utils.lstm(xs, ms, S, scope='lstm', nh=nlstm)
h = seq_to_batch(h5)
initial_state = np.zeros(S.shape.as_list(), dtype=float)
return h, {'S':S, 'M':M, 'state':snew, 'initial_state':initial_state}
return network_fn
def cnn_lnlstm(nlstm=128, **conv_kwargs):
return cnn_lstm(nlstm, layer_norm=True, **conv_kwargs)
def conv_only(convs=[(32, 8, 4), (64, 4, 2), (64, 3, 1)], **conv_kwargs):
'''
convolutions-only net
Parameters:
----------
conv: list of triples (filter_number, filter_size, stride) specifying parameters for each layer.
Returns:
function that takes tensorflow tensor as input and returns the output of the last convolutional layer
'''
def network_fn(X):
out = tf.cast(X, tf.float32) / 255.
with tf.variable_scope("convnet"):
for num_outputs, kernel_size, stride in convs:
out = layers.convolution2d(out,
num_outputs=num_outputs,
kernel_size=kernel_size,
stride=stride,
activation_fn=tf.nn.relu,
**conv_kwargs)
return out, None
return network_fn
def _normalize_clip_observation(x, clip_range=[-5.0, 5.0]):
rms = RunningMeanStd(shape=x.shape[1:])
norm_x = tf.clip_by_value((x - rms.mean) / rms.std, min(clip_range), max(clip_range))
return norm_x, rms
def get_network_builder(name):
# TODO: replace with reflection?
if name == 'cnn':
return cnn
elif name == 'cnn_small':
return cnn_small
elif name == 'conv_only':
return conv_only
elif name == 'mlp':
return mlp
elif name == 'lstm':
return lstm
elif name == 'cnn_lstm':
return cnn_lstm
elif name == 'cnn_lnlstm':
return cnn_lnlstm
else:
raise ValueError('Unknown network type: {}'.format(name))
| 5,749 | 31.303371 | 133 |
py
|
rl-perturbed-reward
|
rl-perturbed-reward-master/gym-atari/baselines/baselines/common/mpi_adam_optimizer.py
|
import numpy as np
import tensorflow as tf
from mpi4py import MPI
class MpiAdamOptimizer(tf.train.AdamOptimizer):
"""Adam optimizer that averages gradients across mpi processes."""
def __init__(self, comm, **kwargs):
self.comm = comm
tf.train.AdamOptimizer.__init__(self, **kwargs)
def compute_gradients(self, loss, var_list, **kwargs):
grads_and_vars = tf.train.AdamOptimizer.compute_gradients(self, loss, var_list, **kwargs)
grads_and_vars = [(g, v) for g, v in grads_and_vars if g is not None]
flat_grad = tf.concat([tf.reshape(g, (-1,)) for g, v in grads_and_vars], axis=0)
shapes = [v.shape.as_list() for g, v in grads_and_vars]
sizes = [int(np.prod(s)) for s in shapes]
num_tasks = self.comm.Get_size()
buf = np.zeros(sum(sizes), np.float32)
def _collect_grads(flat_grad):
self.comm.Allreduce(flat_grad, buf, op=MPI.SUM)
np.divide(buf, float(num_tasks), out=buf)
return buf
avg_flat_grad = tf.py_func(_collect_grads, [flat_grad], tf.float32)
avg_flat_grad.set_shape(flat_grad.shape)
avg_grads = tf.split(avg_flat_grad, sizes, axis=0)
avg_grads_and_vars = [(tf.reshape(g, v.shape), v)
for g, (_, v) in zip(avg_grads, grads_and_vars)]
return avg_grads_and_vars
| 1,358 | 41.46875 | 97 |
py
|
rl-perturbed-reward
|
rl-perturbed-reward-master/gym-atari/baselines/baselines/common/identity_env.py
|
from gym import Env
from gym.spaces import Discrete
class IdentityEnv(Env):
def __init__(
self,
dim,
ep_length=100,
):
self.action_space = Discrete(dim)
self.reset()
def reset(self):
self._choose_next_state()
self.observation_space = self.action_space
return self.state
def step(self, actions):
rew = self._get_reward(actions)
self._choose_next_state()
return self.state, rew, False, {}
def _choose_next_state(self):
self.state = self.action_space.sample()
def _get_reward(self, actions):
return 1 if self.state == actions else 0
| 678 | 20.903226 | 50 |
py
|
rl-perturbed-reward
|
rl-perturbed-reward-master/gym-atari/baselines/baselines/common/__init__.py
|
# flake8: noqa F403
from baselines.common.console_util import *
from baselines.common.dataset import Dataset
from baselines.common.math_util import *
from baselines.common.misc_util import *
| 191 | 31 | 44 |
py
|
rl-perturbed-reward
|
rl-perturbed-reward-master/gym-atari/baselines/baselines/common/mpi_moments.py
|
from mpi4py import MPI
import numpy as np
from baselines.common import zipsame
def mpi_mean(x, axis=0, comm=None, keepdims=False):
x = np.asarray(x)
assert x.ndim > 0
if comm is None: comm = MPI.COMM_WORLD
xsum = x.sum(axis=axis, keepdims=keepdims)
n = xsum.size
localsum = np.zeros(n+1, x.dtype)
localsum[:n] = xsum.ravel()
localsum[n] = x.shape[axis]
globalsum = np.zeros_like(localsum)
comm.Allreduce(localsum, globalsum, op=MPI.SUM)
return globalsum[:n].reshape(xsum.shape) / globalsum[n], globalsum[n]
def mpi_moments(x, axis=0, comm=None, keepdims=False):
x = np.asarray(x)
assert x.ndim > 0
mean, count = mpi_mean(x, axis=axis, comm=comm, keepdims=True)
sqdiffs = np.square(x - mean)
meansqdiff, count1 = mpi_mean(sqdiffs, axis=axis, comm=comm, keepdims=True)
assert count1 == count
std = np.sqrt(meansqdiff)
if not keepdims:
newshape = mean.shape[:axis] + mean.shape[axis+1:]
mean = mean.reshape(newshape)
std = std.reshape(newshape)
return mean, std, count
def test_runningmeanstd():
import subprocess
subprocess.check_call(['mpirun', '-np', '3',
'python','-c',
'from baselines.common.mpi_moments import _helper_runningmeanstd; _helper_runningmeanstd()'])
def _helper_runningmeanstd():
comm = MPI.COMM_WORLD
np.random.seed(0)
for (triple,axis) in [
((np.random.randn(3), np.random.randn(4), np.random.randn(5)),0),
((np.random.randn(3,2), np.random.randn(4,2), np.random.randn(5,2)),0),
((np.random.randn(2,3), np.random.randn(2,4), np.random.randn(2,4)),1),
]:
x = np.concatenate(triple, axis=axis)
ms1 = [x.mean(axis=axis), x.std(axis=axis), x.shape[axis]]
ms2 = mpi_moments(triple[comm.Get_rank()],axis=axis)
for (a1,a2) in zipsame(ms1, ms2):
print(a1, a2)
assert np.allclose(a1, a2)
print("ok!")
| 1,963 | 31.196721 | 101 |
py
|
rl-perturbed-reward
|
rl-perturbed-reward-master/gym-atari/baselines/baselines/common/filters.py
|
from .running_stat import RunningStat
from collections import deque
import numpy as np
class Filter(object):
def __call__(self, x, update=True):
raise NotImplementedError
def reset(self):
pass
class IdentityFilter(Filter):
def __call__(self, x, update=True):
return x
class CompositionFilter(Filter):
def __init__(self, fs):
self.fs = fs
def __call__(self, x, update=True):
for f in self.fs:
x = f(x)
return x
def output_shape(self, input_space):
out = input_space.shape
for f in self.fs:
out = f.output_shape(out)
return out
class ZFilter(Filter):
"""
y = (x-mean)/std
using running estimates of mean,std
"""
def __init__(self, shape, demean=True, destd=True, clip=10.0):
self.demean = demean
self.destd = destd
self.clip = clip
self.rs = RunningStat(shape)
def __call__(self, x, update=True):
if update: self.rs.push(x)
if self.demean:
x = x - self.rs.mean
if self.destd:
x = x / (self.rs.std+1e-8)
if self.clip:
x = np.clip(x, -self.clip, self.clip)
return x
def output_shape(self, input_space):
return input_space.shape
class AddClock(Filter):
def __init__(self):
self.count = 0
def reset(self):
self.count = 0
def __call__(self, x, update=True):
return np.append(x, self.count/100.0)
def output_shape(self, input_space):
return (input_space.shape[0]+1,)
class FlattenFilter(Filter):
def __call__(self, x, update=True):
return x.ravel()
def output_shape(self, input_space):
return (int(np.prod(input_space.shape)),)
class Ind2OneHotFilter(Filter):
def __init__(self, n):
self.n = n
def __call__(self, x, update=True):
out = np.zeros(self.n)
out[x] = 1
return out
def output_shape(self, input_space):
return (input_space.n,)
class DivFilter(Filter):
def __init__(self, divisor):
self.divisor = divisor
def __call__(self, x, update=True):
return x / self.divisor
def output_shape(self, input_space):
return input_space.shape
class StackFilter(Filter):
def __init__(self, length):
self.stack = deque(maxlen=length)
def reset(self):
self.stack.clear()
def __call__(self, x, update=True):
self.stack.append(x)
while len(self.stack) < self.stack.maxlen:
self.stack.append(x)
return np.concatenate(self.stack, axis=-1)
def output_shape(self, input_space):
return input_space.shape[:-1] + (input_space.shape[-1] * self.stack.maxlen,)
| 2,742 | 26.707071 | 84 |
py
|
rl-perturbed-reward
|
rl-perturbed-reward-master/gym-atari/baselines/baselines/common/console_util.py
|
from __future__ import print_function
from contextlib import contextmanager
import numpy as np
import time
# ================================================================
# Misc
# ================================================================
def fmt_row(width, row, header=False):
out = " | ".join(fmt_item(x, width) for x in row)
if header: out = out + "\n" + "-"*len(out)
return out
def fmt_item(x, l):
if isinstance(x, np.ndarray):
assert x.ndim==0
x = x.item()
if isinstance(x, (float, np.float32, np.float64)):
v = abs(x)
if (v < 1e-4 or v > 1e+4) and v > 0:
rep = "%7.2e" % x
else:
rep = "%7.5f" % x
else: rep = str(x)
return " "*(l - len(rep)) + rep
color2num = dict(
gray=30,
red=31,
green=32,
yellow=33,
blue=34,
magenta=35,
cyan=36,
white=37,
crimson=38
)
def colorize(string, color, bold=False, highlight=False):
attr = []
num = color2num[color]
if highlight: num += 10
attr.append(str(num))
if bold: attr.append('1')
return '\x1b[%sm%s\x1b[0m' % (';'.join(attr), string)
MESSAGE_DEPTH = 0
@contextmanager
def timed(msg):
global MESSAGE_DEPTH #pylint: disable=W0603
print(colorize('\t'*MESSAGE_DEPTH + '=: ' + msg, color='magenta'))
tstart = time.time()
MESSAGE_DEPTH += 1
yield
MESSAGE_DEPTH -= 1
print(colorize('\t'*MESSAGE_DEPTH + "done in %.3f seconds"%(time.time() - tstart), color='magenta'))
| 1,504 | 24.083333 | 104 |
py
|
rl-perturbed-reward
|
rl-perturbed-reward-master/gym-atari/baselines/baselines/common/cmd_util.py
|
"""
Helpers for scripts like run_atari.py.
"""
import os
try:
from mpi4py import MPI
except ImportError:
MPI = None
import gym
from gym.wrappers import FlattenDictWrapper
from baselines import logger
from baselines.bench import Monitor
from baselines.common import set_global_seeds
from baselines.common.atari_wrappers import make_atari, wrap_deepmind
from baselines.common.vec_env.subproc_vec_env import SubprocVecEnv
def make_atari_env(env_id, num_env, seed, wrapper_kwargs=None, start_index=0):
"""
Create a wrapped, monitored SubprocVecEnv for Atari.
"""
if wrapper_kwargs is None: wrapper_kwargs = {}
mpi_rank = MPI.COMM_WORLD.Get_rank() if MPI else 0
def make_env(rank): # pylint: disable=C0111
def _thunk():
env = make_atari(env_id)
env.seed(seed + 10000*mpi_rank + rank if seed is not None else None)
env = Monitor(env, logger.get_dir() and os.path.join(logger.get_dir(), str(mpi_rank) + '.' + str(rank)))
return wrap_deepmind(env, **wrapper_kwargs)
return _thunk
set_global_seeds(seed)
return SubprocVecEnv([make_env(i + start_index) for i in range(num_env)])
def make_mujoco_env(env_id, seed, reward_scale=1.0):
"""
Create a wrapped, monitored gym.Env for MuJoCo.
"""
rank = MPI.COMM_WORLD.Get_rank()
myseed = seed + 1000 * rank if seed is not None else None
set_global_seeds(myseed)
env = gym.make(env_id)
env = Monitor(env, os.path.join(logger.get_dir(), str(rank)), allow_early_resets=True)
env.seed(seed)
if reward_scale != 1.0:
from baselines.common.retro_wrappers import RewardScaler
env = RewardScaler(env, reward_scale)
return env
def make_robotics_env(env_id, seed, rank=0):
"""
Create a wrapped, monitored gym.Env for MuJoCo.
"""
set_global_seeds(seed)
env = gym.make(env_id)
env = FlattenDictWrapper(env, ['observation', 'desired_goal'])
env = Monitor(
env, logger.get_dir() and os.path.join(logger.get_dir(), str(rank)),
info_keywords=('is_success',))
env.seed(seed)
return env
def arg_parser():
"""
Create an empty argparse.ArgumentParser.
"""
import argparse
return argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter)
def atari_arg_parser():
"""
Create an argparse.ArgumentParser for run_atari.py.
"""
print('Obsolete - use common_arg_parser instead')
return common_arg_parser()
def mujoco_arg_parser():
print('Obsolete - use common_arg_parser instead')
return common_arg_parser()
def str2bool(v):
if v.lower() in ('yes', 'true', 't', 'y', '1'):
return True
elif v.lower() in ('no', 'false', 'f', 'n', '0'):
return False
else:
raise argparse.ArgumentTypeError('Boolean value expected.')
def common_arg_parser():
"""
Create an argparse.ArgumentParser for run_mujoco.py.
"""
parser = arg_parser()
parser.add_argument('--env', help='environment ID', type=str, default='Reacher-v2')
parser.add_argument('--seed', help='RNG seed', type=int, default=2019)
parser.add_argument('--alg', help='Algorithm', type=str, default='ppo2')
parser.add_argument('--num_timesteps', type=float, default=1e6),
parser.add_argument('--weight', help='weight of noise', type=float, default=0.1)
parser.add_argument('--normal', help='no noise', type=str2bool, default=True)
parser.add_argument('--surrogate', help='surrogate reward', type=str2bool, default=False)
parser.add_argument('--noise_type', help='noise type (norm_one, norm_all, max_one, anti_iden)', type=str, default='norm_one')
parser.add_argument('--network', help='network type (mlp, cnn, lstm, cnn_lstm, conv_only)', default=None)
parser.add_argument('--gamestate', help='game state to load (so far only used in retro games)', default=None)
parser.add_argument('--num_env', help='Number of environment copies being run in parallel. When not specified, set to number of cpus for Atari, and to 1 for Mujoco', default=None, type=int)
parser.add_argument('--reward_scale', help='Reward scale factor. Default: 1.0', default=1.0, type=float)
parser.add_argument('--save_path', help='Path to save trained model to', default=None, type=str)
parser.add_argument('--play', default=False, action='store_true')
return parser
def robotics_arg_parser():
"""
Create an argparse.ArgumentParser for run_mujoco.py.
"""
parser = arg_parser()
parser.add_argument('--env', help='environment ID', type=str, default='FetchReach-v0')
parser.add_argument('--seed', help='RNG seed', type=int, default=None)
parser.add_argument('--num-timesteps', type=int, default=int(1e6))
return parser
def parse_unknown_args(args):
"""
Parse arguments not consumed by arg parser into a dicitonary
"""
retval = {}
for arg in args:
assert arg.startswith('--')
assert '=' in arg, 'cannot parse arg {}'.format(arg)
key = arg.split('=')[0][2:]
value = arg.split('=')[1]
retval[key] = value
return retval
| 5,142 | 36 | 193 |
py
|
rl-perturbed-reward
|
rl-perturbed-reward-master/gym-atari/baselines/baselines/common/input.py
|
import tensorflow as tf
from gym.spaces import Discrete, Box
def observation_placeholder(ob_space, batch_size=None, name='Ob'):
'''
Create placeholder to feed observations into of the size appropriate to the observation space
Parameters:
----------
ob_space: gym.Space observation space
batch_size: int size of the batch to be fed into input. Can be left None in most cases.
name: str name of the placeholder
Returns:
-------
tensorflow placeholder tensor
'''
assert isinstance(ob_space, Discrete) or isinstance(ob_space, Box), \
'Can only deal with Discrete and Box observation spaces for now'
return tf.placeholder(shape=(batch_size,) + ob_space.shape, dtype=ob_space.dtype, name=name)
def observation_input(ob_space, batch_size=None, name='Ob'):
'''
Create placeholder to feed observations into of the size appropriate to the observation space, and add input
encoder of the appropriate type.
'''
placeholder = observation_placeholder(ob_space, batch_size, name)
return placeholder, encode_observation(ob_space, placeholder)
def encode_observation(ob_space, placeholder):
'''
Encode input in the way that is appropriate to the observation space
Parameters:
----------
ob_space: gym.Space observation space
placeholder: tf.placeholder observation input placeholder
'''
if isinstance(ob_space, Discrete):
return tf.to_float(tf.one_hot(placeholder, ob_space.n))
elif isinstance(ob_space, Box):
return tf.to_float(placeholder)
else:
raise NotImplementedError
| 1,686 | 28.596491 | 113 |
py
|
rl-perturbed-reward
|
rl-perturbed-reward-master/gym-atari/baselines/baselines/common/tests/test_tf_util.py
|
# tests for tf_util
import tensorflow as tf
from baselines.common.tf_util import (
function,
initialize,
single_threaded_session
)
def test_function():
with tf.Graph().as_default():
x = tf.placeholder(tf.int32, (), name="x")
y = tf.placeholder(tf.int32, (), name="y")
z = 3 * x + 2 * y
lin = function([x, y], z, givens={y: 0})
with single_threaded_session():
initialize()
assert lin(2) == 6
assert lin(2, 2) == 10
def test_multikwargs():
with tf.Graph().as_default():
x = tf.placeholder(tf.int32, (), name="x")
with tf.variable_scope("other"):
x2 = tf.placeholder(tf.int32, (), name="x")
z = 3 * x + 2 * x2
lin = function([x, x2], z, givens={x2: 0})
with single_threaded_session():
initialize()
assert lin(2) == 6
assert lin(2, 2) == 10
if __name__ == '__main__':
test_function()
test_multikwargs()
| 1,000 | 23.414634 | 55 |
py
|
rl-perturbed-reward
|
rl-perturbed-reward-master/gym-atari/baselines/baselines/common/tests/test_schedules.py
|
import numpy as np
from baselines.common.schedules import ConstantSchedule, PiecewiseSchedule
def test_piecewise_schedule():
ps = PiecewiseSchedule([(-5, 100), (5, 200), (10, 50), (100, 50), (200, -50)], outside_value=500)
assert np.isclose(ps.value(-10), 500)
assert np.isclose(ps.value(0), 150)
assert np.isclose(ps.value(5), 200)
assert np.isclose(ps.value(9), 80)
assert np.isclose(ps.value(50), 50)
assert np.isclose(ps.value(80), 50)
assert np.isclose(ps.value(150), 0)
assert np.isclose(ps.value(175), -25)
assert np.isclose(ps.value(201), 500)
assert np.isclose(ps.value(500), 500)
assert np.isclose(ps.value(200 - 1e-10), -50)
def test_constant_schedule():
cs = ConstantSchedule(5)
for i in range(-100, 100):
assert np.isclose(cs.value(i), 5)
| 823 | 29.518519 | 101 |
py
|
rl-perturbed-reward
|
rl-perturbed-reward-master/gym-atari/baselines/baselines/common/tests/test_identity.py
|
import pytest
from baselines.common.tests.envs.identity_env import DiscreteIdentityEnv, BoxIdentityEnv
from baselines.run import get_learn_function
from baselines.common.tests.util import simple_test
common_kwargs = dict(
total_timesteps=30000,
network='mlp',
gamma=0.9,
seed=0,
)
learn_kwargs = {
'a2c' : {},
'acktr': {},
'deepq': {},
'ppo2': dict(lr=1e-3, nsteps=64, ent_coef=0.0),
'trpo_mpi': dict(timesteps_per_batch=100, cg_iters=10, gamma=0.9, lam=1.0, max_kl=0.01)
}
@pytest.mark.slow
@pytest.mark.parametrize("alg", learn_kwargs.keys())
def test_discrete_identity(alg):
'''
Test if the algorithm (with an mlp policy)
can learn an identity transformation (i.e. return observation as an action)
'''
kwargs = learn_kwargs[alg]
kwargs.update(common_kwargs)
learn_fn = lambda e: get_learn_function(alg)(env=e, **kwargs)
env_fn = lambda: DiscreteIdentityEnv(10, episode_len=100)
simple_test(env_fn, learn_fn, 0.9)
@pytest.mark.slow
@pytest.mark.parametrize("alg", ['a2c', 'ppo2', 'trpo_mpi'])
def test_continuous_identity(alg):
'''
Test if the algorithm (with an mlp policy)
can learn an identity transformation (i.e. return observation as an action)
to a required precision
'''
kwargs = learn_kwargs[alg]
kwargs.update(common_kwargs)
learn_fn = lambda e: get_learn_function(alg)(env=e, **kwargs)
env_fn = lambda: BoxIdentityEnv((1,), episode_len=100)
simple_test(env_fn, learn_fn, -0.1)
if __name__ == '__main__':
test_continuous_identity('a2c')
| 1,583 | 27.285714 | 91 |
py
|
rl-perturbed-reward
|
rl-perturbed-reward-master/gym-atari/baselines/baselines/common/tests/test_segment_tree.py
|
import numpy as np
from baselines.common.segment_tree import SumSegmentTree, MinSegmentTree
def test_tree_set():
tree = SumSegmentTree(4)
tree[2] = 1.0
tree[3] = 3.0
assert np.isclose(tree.sum(), 4.0)
assert np.isclose(tree.sum(0, 2), 0.0)
assert np.isclose(tree.sum(0, 3), 1.0)
assert np.isclose(tree.sum(2, 3), 1.0)
assert np.isclose(tree.sum(2, -1), 1.0)
assert np.isclose(tree.sum(2, 4), 4.0)
def test_tree_set_overlap():
tree = SumSegmentTree(4)
tree[2] = 1.0
tree[2] = 3.0
assert np.isclose(tree.sum(), 3.0)
assert np.isclose(tree.sum(2, 3), 3.0)
assert np.isclose(tree.sum(2, -1), 3.0)
assert np.isclose(tree.sum(2, 4), 3.0)
assert np.isclose(tree.sum(1, 2), 0.0)
def test_prefixsum_idx():
tree = SumSegmentTree(4)
tree[2] = 1.0
tree[3] = 3.0
assert tree.find_prefixsum_idx(0.0) == 2
assert tree.find_prefixsum_idx(0.5) == 2
assert tree.find_prefixsum_idx(0.99) == 2
assert tree.find_prefixsum_idx(1.01) == 3
assert tree.find_prefixsum_idx(3.00) == 3
assert tree.find_prefixsum_idx(4.00) == 3
def test_prefixsum_idx2():
tree = SumSegmentTree(4)
tree[0] = 0.5
tree[1] = 1.0
tree[2] = 1.0
tree[3] = 3.0
assert tree.find_prefixsum_idx(0.00) == 0
assert tree.find_prefixsum_idx(0.55) == 1
assert tree.find_prefixsum_idx(0.99) == 1
assert tree.find_prefixsum_idx(1.51) == 2
assert tree.find_prefixsum_idx(3.00) == 3
assert tree.find_prefixsum_idx(5.50) == 3
def test_max_interval_tree():
tree = MinSegmentTree(4)
tree[0] = 1.0
tree[2] = 0.5
tree[3] = 3.0
assert np.isclose(tree.min(), 0.5)
assert np.isclose(tree.min(0, 2), 1.0)
assert np.isclose(tree.min(0, 3), 0.5)
assert np.isclose(tree.min(0, -1), 0.5)
assert np.isclose(tree.min(2, 4), 0.5)
assert np.isclose(tree.min(3, 4), 3.0)
tree[2] = 0.7
assert np.isclose(tree.min(), 0.7)
assert np.isclose(tree.min(0, 2), 1.0)
assert np.isclose(tree.min(0, 3), 0.7)
assert np.isclose(tree.min(0, -1), 0.7)
assert np.isclose(tree.min(2, 4), 0.7)
assert np.isclose(tree.min(3, 4), 3.0)
tree[2] = 4.0
assert np.isclose(tree.min(), 1.0)
assert np.isclose(tree.min(0, 2), 1.0)
assert np.isclose(tree.min(0, 3), 1.0)
assert np.isclose(tree.min(0, -1), 1.0)
assert np.isclose(tree.min(2, 4), 3.0)
assert np.isclose(tree.min(2, 3), 4.0)
assert np.isclose(tree.min(2, -1), 4.0)
assert np.isclose(tree.min(3, 4), 3.0)
if __name__ == '__main__':
test_tree_set()
test_tree_set_overlap()
test_prefixsum_idx()
test_prefixsum_idx2()
test_max_interval_tree()
| 2,691 | 24.884615 | 72 |
py
|
rl-perturbed-reward
|
rl-perturbed-reward-master/gym-atari/baselines/baselines/common/tests/test_mnist.py
|
import pytest
# from baselines.acer import acer_simple as acer
from baselines.common.tests.envs.mnist_env import MnistEnv
from baselines.common.tests.util import simple_test
from baselines.run import get_learn_function
# TODO investigate a2c and ppo2 failures - is it due to bad hyperparameters for this problem?
# GitHub issue https://github.com/openai/baselines/issues/189
common_kwargs = {
'seed': 0,
'network':'cnn',
'gamma':0.9,
'pad':'SAME'
}
learn_args = {
'a2c': dict(total_timesteps=50000),
# TODO need to resolve inference (step) API differences for acer; also slow
# 'acer': dict(seed=0, total_timesteps=1000),
'deepq': dict(total_timesteps=5000),
'acktr': dict(total_timesteps=30000),
'ppo2': dict(total_timesteps=50000, lr=1e-3, nsteps=128, ent_coef=0.0),
'trpo_mpi': dict(total_timesteps=80000, timesteps_per_batch=100, cg_iters=10, lam=1.0, max_kl=0.001)
}
#tests pass, but are too slow on travis. Same algorithms are covered
# by other tests with less compute-hungry nn's and by benchmarks
@pytest.mark.skip
@pytest.mark.slow
@pytest.mark.parametrize("alg", learn_args.keys())
def test_mnist(alg):
'''
Test if the algorithm can learn to classify MNIST digits.
Uses CNN policy.
'''
learn_kwargs = learn_args[alg]
learn_kwargs.update(common_kwargs)
learn = get_learn_function(alg)
learn_fn = lambda e: learn(env=e, **learn_kwargs)
env_fn = lambda: MnistEnv(seed=0, episode_len=100)
simple_test(env_fn, learn_fn, 0.6)
if __name__ == '__main__':
test_mnist('deepq')
| 1,591 | 30.215686 | 104 |
py
|
rl-perturbed-reward
|
rl-perturbed-reward-master/gym-atari/baselines/baselines/common/tests/util.py
|
import tensorflow as tf
import numpy as np
from gym.spaces import np_random
from baselines.common.vec_env.dummy_vec_env import DummyVecEnv
N_TRIALS = 10000
N_EPISODES = 100
def simple_test(env_fn, learn_fn, min_reward_fraction, n_trials=N_TRIALS):
np.random.seed(0)
np_random.seed(0)
env = DummyVecEnv([env_fn])
with tf.Graph().as_default(), tf.Session(config=tf.ConfigProto(allow_soft_placement=True)).as_default():
tf.set_random_seed(0)
model = learn_fn(env)
sum_rew = 0
done = True
for i in range(n_trials):
if done:
obs = env.reset()
state = model.initial_state
if state is not None:
a, v, state, _ = model.step(obs, S=state, M=[False])
else:
a, v, _, _ = model.step(obs)
obs, rew, done, _ = env.step(a)
sum_rew += float(rew)
print("Reward in {} trials is {}".format(n_trials, sum_rew))
assert sum_rew > min_reward_fraction * n_trials, \
'sum of rewards {} is less than {} of the total number of trials {}'.format(sum_rew, min_reward_fraction, n_trials)
def reward_per_episode_test(env_fn, learn_fn, min_avg_reward, n_trials=N_EPISODES):
env = DummyVecEnv([env_fn])
with tf.Graph().as_default(), tf.Session(config=tf.ConfigProto(allow_soft_placement=True)).as_default():
model = learn_fn(env)
N_TRIALS = 100
observations, actions, rewards = rollout(env, model, N_TRIALS)
rewards = [sum(r) for r in rewards]
avg_rew = sum(rewards) / N_TRIALS
print("Average reward in {} episodes is {}".format(n_trials, avg_rew))
assert avg_rew > min_avg_reward, \
'average reward in {} episodes ({}) is less than {}'.format(n_trials, avg_rew, min_avg_reward)
def rollout(env, model, n_trials):
rewards = []
actions = []
observations = []
for i in range(n_trials):
obs = env.reset()
state = model.initial_state
episode_rew = []
episode_actions = []
episode_obs = []
while True:
if state is not None:
a, v, state, _ = model.step(obs, S=state, M=[False])
else:
a,v, _, _ = model.step(obs)
obs, rew, done, _ = env.step(a)
episode_rew.append(rew)
episode_actions.append(a)
episode_obs.append(obs)
if done:
break
rewards.append(episode_rew)
actions.append(episode_actions)
observations.append(episode_obs)
return observations, actions, rewards
| 2,676 | 28.097826 | 127 |
py
|
rl-perturbed-reward
|
rl-perturbed-reward-master/gym-atari/baselines/baselines/common/tests/__init__.py
| 0 | 0 | 0 |
py
|
|
rl-perturbed-reward
|
rl-perturbed-reward-master/gym-atari/baselines/baselines/common/tests/test_serialization.py
|
import os
import tempfile
import pytest
import tensorflow as tf
import numpy as np
from baselines.common.tests.envs.mnist_env import MnistEnv
from baselines.common.vec_env.dummy_vec_env import DummyVecEnv
from baselines.run import get_learn_function
from baselines.common.tf_util import make_session, get_session
from functools import partial
learn_kwargs = {
'deepq': {},
'a2c': {},
'acktr': {},
'ppo2': {'nminibatches': 1, 'nsteps': 10},
'trpo_mpi': {},
}
network_kwargs = {
'mlp': {},
'cnn': {'pad': 'SAME'},
'lstm': {},
'cnn_lnlstm': {'pad': 'SAME'}
}
@pytest.mark.parametrize("learn_fn", learn_kwargs.keys())
@pytest.mark.parametrize("network_fn", network_kwargs.keys())
def test_serialization(learn_fn, network_fn):
'''
Test if the trained model can be serialized
'''
if network_fn.endswith('lstm') and learn_fn in ['acktr', 'trpo_mpi', 'deepq']:
# TODO make acktr work with recurrent policies
# and test
# github issue: https://github.com/openai/baselines/issues/194
return
env = DummyVecEnv([lambda: MnistEnv(10, episode_len=100)])
ob = env.reset().copy()
learn = get_learn_function(learn_fn)
kwargs = {}
kwargs.update(network_kwargs[network_fn])
kwargs.update(learn_kwargs[learn_fn])
learn = partial(learn, env=env, network=network_fn, seed=0, **kwargs)
with tempfile.TemporaryDirectory() as td:
model_path = os.path.join(td, 'serialization_test_model')
with tf.Graph().as_default(), make_session().as_default():
model = learn(total_timesteps=100)
model.save(model_path)
mean1, std1 = _get_action_stats(model, ob)
variables_dict1 = _serialize_variables()
with tf.Graph().as_default(), make_session().as_default():
model = learn(total_timesteps=0, load_path=model_path)
mean2, std2 = _get_action_stats(model, ob)
variables_dict2 = _serialize_variables()
for k, v in variables_dict1.items():
np.testing.assert_allclose(v, variables_dict2[k], atol=0.01,
err_msg='saved and loaded variable {} value mismatch'.format(k))
np.testing.assert_allclose(mean1, mean2, atol=0.5)
np.testing.assert_allclose(std1, std2, atol=0.5)
def _serialize_variables():
sess = get_session()
variables = tf.trainable_variables()
values = sess.run(variables)
return {var.name: value for var, value in zip(variables, values)}
def _get_action_stats(model, ob):
ntrials = 1000
if model.initial_state is None or model.initial_state == []:
actions = np.array([model.step(ob)[0] for _ in range(ntrials)])
else:
actions = np.array([model.step(ob, S=model.initial_state, M=[False])[0] for _ in range(ntrials)])
mean = np.mean(actions, axis=0)
std = np.std(actions, axis=0)
return mean, std
| 2,955 | 29.163265 | 105 |
py
|
rl-perturbed-reward
|
rl-perturbed-reward-master/gym-atari/baselines/baselines/common/tests/test_cartpole.py
|
import pytest
import gym
from baselines.run import get_learn_function
from baselines.common.tests.util import reward_per_episode_test
common_kwargs = dict(
total_timesteps=30000,
network='mlp',
gamma=1.0,
seed=0,
)
learn_kwargs = {
'a2c' : dict(nsteps=32, value_network='copy', lr=0.05),
'acktr': dict(nsteps=32, value_network='copy'),
'deepq': {},
'ppo2': dict(value_network='copy'),
'trpo_mpi': {}
}
@pytest.mark.slow
@pytest.mark.parametrize("alg", learn_kwargs.keys())
def test_cartpole(alg):
'''
Test if the algorithm (with an mlp policy)
can learn to balance the cartpole
'''
kwargs = common_kwargs.copy()
kwargs.update(learn_kwargs[alg])
learn_fn = lambda e: get_learn_function(alg)(env=e, **kwargs)
def env_fn():
env = gym.make('CartPole-v0')
env.seed(0)
return env
reward_per_episode_test(env_fn, learn_fn, 100)
| 937 | 21.878049 | 65 |
py
|
rl-perturbed-reward
|
rl-perturbed-reward-master/gym-atari/baselines/baselines/common/tests/test_fixed_sequence.py
|
import pytest
from baselines.common.tests.envs.fixed_sequence_env import FixedSequenceEnv
from baselines.common.tests.util import simple_test
from baselines.run import get_learn_function
common_kwargs = dict(
seed=0,
total_timesteps=50000,
)
learn_kwargs = {
'a2c': {},
'ppo2': dict(nsteps=10, ent_coef=0.0, nminibatches=1),
# TODO enable sequential models for trpo_mpi (proper handling of nbatch and nsteps)
# github issue: https://github.com/openai/baselines/issues/188
# 'trpo_mpi': lambda e, p: trpo_mpi.learn(policy_fn=p(env=e), env=e, max_timesteps=30000, timesteps_per_batch=100, cg_iters=10, gamma=0.9, lam=1.0, max_kl=0.001)
}
alg_list = learn_kwargs.keys()
rnn_list = ['lstm']
@pytest.mark.slow
@pytest.mark.parametrize("alg", alg_list)
@pytest.mark.parametrize("rnn", rnn_list)
def test_fixed_sequence(alg, rnn):
'''
Test if the algorithm (with a given policy)
can learn an identity transformation (i.e. return observation as an action)
'''
kwargs = learn_kwargs[alg]
kwargs.update(common_kwargs)
episode_len = 5
env_fn = lambda: FixedSequenceEnv(10, episode_len=episode_len)
learn = lambda e: get_learn_function(alg)(
env=e,
network=rnn,
**kwargs
)
simple_test(env_fn, learn, 0.7)
if __name__ == '__main__':
test_fixed_sequence('ppo2', 'lstm')
| 1,379 | 25.538462 | 165 |
py
|
rl-perturbed-reward
|
rl-perturbed-reward-master/gym-atari/baselines/baselines/common/tests/envs/mnist_env.py
|
import os.path as osp
import numpy as np
import tempfile
import filelock
from gym import Env
from gym.spaces import Discrete, Box
class MnistEnv(Env):
def __init__(
self,
seed=0,
episode_len=None,
no_images=None
):
from tensorflow.examples.tutorials.mnist import input_data
# we could use temporary directory for this with a context manager and
# TemporaryDirecotry, but then each test that uses mnist would re-download the data
# this way the data is not cleaned up, but we only download it once per machine
mnist_path = osp.join(tempfile.gettempdir(), 'MNIST_data')
with filelock.FileLock(mnist_path + '.lock'):
self.mnist = input_data.read_data_sets(mnist_path)
self.np_random = np.random.RandomState()
self.np_random.seed(seed)
self.observation_space = Box(low=0.0, high=1.0, shape=(28,28,1))
self.action_space = Discrete(10)
self.episode_len = episode_len
self.time = 0
self.no_images = no_images
self.train_mode()
self.reset()
def reset(self):
self._choose_next_state()
self.time = 0
return self.state[0]
def step(self, actions):
rew = self._get_reward(actions)
self._choose_next_state()
done = False
if self.episode_len and self.time >= self.episode_len:
rew = 0
done = True
return self.state[0], rew, done, {}
def train_mode(self):
self.dataset = self.mnist.train
def test_mode(self):
self.dataset = self.mnist.test
def _choose_next_state(self):
max_index = (self.no_images if self.no_images is not None else self.dataset.num_examples) - 1
index = self.np_random.randint(0, max_index)
image = self.dataset.images[index].reshape(28,28,1)*255
label = self.dataset.labels[index]
self.state = (image, label)
self.time += 1
def _get_reward(self, actions):
return 1 if self.state[1] == actions else 0
| 2,099 | 28.577465 | 101 |
py
|
rl-perturbed-reward
|
rl-perturbed-reward-master/gym-atari/baselines/baselines/common/tests/envs/fixed_sequence_env.py
|
import numpy as np
from gym import Env
from gym.spaces import Discrete
class FixedSequenceEnv(Env):
def __init__(
self,
n_actions=10,
seed=0,
episode_len=100
):
self.np_random = np.random.RandomState()
self.np_random.seed(seed)
self.sequence = [self.np_random.randint(0, n_actions-1) for _ in range(episode_len)]
self.action_space = Discrete(n_actions)
self.observation_space = Discrete(1)
self.episode_len = episode_len
self.time = 0
self.reset()
def reset(self):
self.time = 0
return 0
def step(self, actions):
rew = self._get_reward(actions)
self._choose_next_state()
done = False
if self.episode_len and self.time >= self.episode_len:
rew = 0
done = True
return 0, rew, done, {}
def _choose_next_state(self):
self.time += 1
def _get_reward(self, actions):
return 1 if actions == self.sequence[self.time] else 0
| 1,066 | 22.711111 | 92 |
py
|
rl-perturbed-reward
|
rl-perturbed-reward-master/gym-atari/baselines/baselines/common/tests/envs/identity_env.py
|
import numpy as np
from abc import abstractmethod
from gym import Env
from gym.spaces import Discrete, Box
class IdentityEnv(Env):
def __init__(
self,
episode_len=None
):
self.episode_len = episode_len
self.time = 0
self.reset()
def reset(self):
self._choose_next_state()
self.time = 0
self.observation_space = self.action_space
return self.state
def step(self, actions):
rew = self._get_reward(actions)
self._choose_next_state()
done = False
if self.episode_len and self.time >= self.episode_len:
rew = 0
done = True
return self.state, rew, done, {}
def _choose_next_state(self):
self.state = self.action_space.sample()
self.time += 1
@abstractmethod
def _get_reward(self, actions):
raise NotImplementedError
class DiscreteIdentityEnv(IdentityEnv):
def __init__(
self,
dim,
episode_len=None,
):
self.action_space = Discrete(dim)
super().__init__(episode_len=episode_len)
def _get_reward(self, actions):
return 1 if self.state == actions else 0
class BoxIdentityEnv(IdentityEnv):
def __init__(
self,
shape,
episode_len=None,
):
self.action_space = Box(low=-1.0, high=1.0, shape=shape)
super().__init__(episode_len=episode_len)
def _get_reward(self, actions):
diff = actions - self.state
diff = diff[:]
return -0.5 * np.dot(diff, diff)
| 1,608 | 21.661972 | 64 |
py
|
rl-perturbed-reward
|
rl-perturbed-reward-master/gym-atari/baselines/baselines/common/tests/envs/__init__.py
| 0 | 0 | 0 |
py
|
|
rl-perturbed-reward
|
rl-perturbed-reward-master/gym-atari/baselines/baselines/common/vec_env/vec_normalize.py
|
from baselines.common.vec_env import VecEnvWrapper
from baselines.common.running_mean_std import RunningMeanStd
import numpy as np
class VecNormalize(VecEnvWrapper):
"""
Vectorized environment base class
"""
def __init__(self, venv, ob=True, ret=True, clipob=10., cliprew=10., gamma=0.99, epsilon=1e-8):
VecEnvWrapper.__init__(self, venv)
self.ob_rms = RunningMeanStd(shape=self.observation_space.shape) if ob else None
self.ret_rms = RunningMeanStd(shape=()) if ret else None
#self.ob_rms = TfRunningMeanStd(shape=self.observation_space.shape, scope='observation_running_mean_std') if ob else None
#self.ret_rms = TfRunningMeanStd(shape=(), scope='return_running_mean_std') if ret else None
self.clipob = clipob
self.cliprew = cliprew
self.ret = np.zeros(self.num_envs)
self.gamma = gamma
self.epsilon = epsilon
def step_wait(self):
"""
Apply sequence of actions to sequence of environments
actions -> (observations, rewards, news)
where 'news' is a boolean vector indicating whether each element is new.
"""
obs, rews, news, infos = self.venv.step_wait()
self.ret = self.ret * self.gamma + rews
obs = self._obfilt(obs)
if self.ret_rms:
self.ret_rms.update(self.ret)
rews = np.clip(rews / np.sqrt(self.ret_rms.var + self.epsilon), -self.cliprew, self.cliprew)
return obs, rews, news, infos
def _obfilt(self, obs):
if self.ob_rms:
self.ob_rms.update(obs)
obs = np.clip((obs - self.ob_rms.mean) / np.sqrt(self.ob_rms.var + self.epsilon), -self.clipob, self.clipob)
return obs
else:
return obs
def reset(self):
"""
Reset all environments
"""
obs = self.venv.reset()
return self._obfilt(obs)
| 1,910 | 37.22 | 129 |
py
|
rl-perturbed-reward
|
rl-perturbed-reward-master/gym-atari/baselines/baselines/common/vec_env/dummy_vec_env.py
|
import numpy as np
from gym import spaces
from collections import OrderedDict
from . import VecEnv
class DummyVecEnv(VecEnv):
def __init__(self, env_fns):
self.envs = [fn() for fn in env_fns]
env = self.envs[0]
VecEnv.__init__(self, len(env_fns), env.observation_space, env.action_space)
shapes, dtypes = {}, {}
self.keys = []
obs_space = env.observation_space
if isinstance(obs_space, spaces.Dict):
assert isinstance(obs_space.spaces, OrderedDict)
subspaces = obs_space.spaces
else:
subspaces = {None: obs_space}
for key, box in subspaces.items():
shapes[key] = box.shape
dtypes[key] = box.dtype
self.keys.append(key)
self.buf_obs = { k: np.zeros((self.num_envs,) + tuple(shapes[k]), dtype=dtypes[k]) for k in self.keys }
self.buf_dones = np.zeros((self.num_envs,), dtype=np.bool)
self.buf_rews = np.zeros((self.num_envs,), dtype=np.float32)
self.buf_infos = [{} for _ in range(self.num_envs)]
self.actions = None
def step_async(self, actions):
listify = True
try:
if len(actions) == self.num_envs:
listify = False
except TypeError:
pass
if not listify:
self.actions = actions
else:
assert self.num_envs == 1, "actions {} is either not a list or has a wrong size - cannot match to {} environments".format(actions, self.num_envs)
self.actions = [actions]
def step_wait(self):
for e in range(self.num_envs):
action = self.actions[e]
if isinstance(self.envs[e].action_space, spaces.Discrete):
action = int(action)
obs, self.buf_rews[e], self.buf_dones[e], self.buf_infos[e] = self.envs[e].step(action)
if self.buf_dones[e]:
obs = self.envs[e].reset()
self._save_obs(e, obs)
return (np.copy(self._obs_from_buf()), np.copy(self.buf_rews), np.copy(self.buf_dones),
self.buf_infos.copy())
def reset(self):
for e in range(self.num_envs):
obs = self.envs[e].reset()
self._save_obs(e, obs)
return self._obs_from_buf()
def close(self):
return
def render(self, mode='human'):
return [e.render(mode=mode) for e in self.envs]
def _save_obs(self, e, obs):
for k in self.keys:
if k is None:
self.buf_obs[k][e] = obs
else:
self.buf_obs[k][e] = obs[k]
def _obs_from_buf(self):
if self.keys==[None]:
return self.buf_obs[None]
else:
return self.buf_obs
| 2,772 | 32.409639 | 157 |
py
|
rl-perturbed-reward
|
rl-perturbed-reward-master/gym-atari/baselines/baselines/common/vec_env/__init__.py
|
from abc import ABC, abstractmethod
from baselines import logger
class AlreadySteppingError(Exception):
"""
Raised when an asynchronous step is running while
step_async() is called again.
"""
def __init__(self):
msg = 'already running an async step'
Exception.__init__(self, msg)
class NotSteppingError(Exception):
"""
Raised when an asynchronous step is not running but
step_wait() is called.
"""
def __init__(self):
msg = 'not running an async step'
Exception.__init__(self, msg)
class VecEnv(ABC):
"""
An abstract asynchronous, vectorized environment.
"""
def __init__(self, num_envs, observation_space, action_space):
self.num_envs = num_envs
self.observation_space = observation_space
self.action_space = action_space
@abstractmethod
def reset(self):
"""
Reset all the environments and return an array of
observations, or a tuple of observation arrays.
If step_async is still doing work, that work will
be cancelled and step_wait() should not be called
until step_async() is invoked again.
"""
pass
@abstractmethod
def step_async(self, actions):
"""
Tell all the environments to start taking a step
with the given actions.
Call step_wait() to get the results of the step.
You should not call this if a step_async run is
already pending.
"""
pass
@abstractmethod
def step_wait(self):
"""
Wait for the step taken with step_async().
Returns (obs, rews, dones, infos):
- obs: an array of observations, or a tuple of
arrays of observations.
- rews: an array of rewards
- dones: an array of "episode done" booleans
- infos: a sequence of info objects
"""
pass
@abstractmethod
def close(self):
"""
Clean up the environments' resources.
"""
pass
def step(self, actions):
self.step_async(actions)
return self.step_wait()
def render(self, mode='human'):
logger.warn('Render not defined for %s'%self)
@property
def unwrapped(self):
if isinstance(self, VecEnvWrapper):
return self.venv.unwrapped
else:
return self
class VecEnvWrapper(VecEnv):
def __init__(self, venv, observation_space=None, action_space=None):
self.venv = venv
VecEnv.__init__(self,
num_envs=venv.num_envs,
observation_space=observation_space or venv.observation_space,
action_space=action_space or venv.action_space)
def step_async(self, actions):
self.venv.step_async(actions)
@abstractmethod
def reset(self):
pass
@abstractmethod
def step_wait(self):
pass
def close(self):
return self.venv.close()
def render(self):
self.venv.render()
class CloudpickleWrapper(object):
"""
Uses cloudpickle to serialize contents (otherwise multiprocessing tries to use pickle)
"""
def __init__(self, x):
self.x = x
def __getstate__(self):
import cloudpickle
return cloudpickle.dumps(self.x)
def __setstate__(self, ob):
import pickle
self.x = pickle.loads(ob)
| 3,392 | 25.716535 | 90 |
py
|
rl-perturbed-reward
|
rl-perturbed-reward-master/gym-atari/baselines/baselines/common/vec_env/subproc_vec_env.py
|
import numpy as np
from multiprocessing import Process, Pipe
from baselines.common.vec_env import VecEnv, CloudpickleWrapper
from baselines.common.tile_images import tile_images
def worker(remote, parent_remote, env_fn_wrapper):
parent_remote.close()
env = env_fn_wrapper.x()
try:
while True:
cmd, data = remote.recv()
if cmd == 'step':
ob, reward, done, info = env.step(data)
if done:
ob = env.reset()
remote.send((ob, reward, done, info))
elif cmd == 'reset':
ob = env.reset()
remote.send(ob)
elif cmd == 'render':
remote.send(env.render(mode='rgb_array'))
elif cmd == 'close':
remote.close()
break
elif cmd == 'get_spaces':
remote.send((env.observation_space, env.action_space))
else:
raise NotImplementedError
except KeyboardInterrupt:
print('SubprocVecEnv worker: got KeyboardInterrupt')
finally:
env.close()
class SubprocVecEnv(VecEnv):
def __init__(self, env_fns, spaces=None):
"""
envs: list of gym environments to run in subprocesses
"""
self.waiting = False
self.closed = False
nenvs = len(env_fns)
self.remotes, self.work_remotes = zip(*[Pipe() for _ in range(nenvs)])
self.ps = [Process(target=worker, args=(work_remote, remote, CloudpickleWrapper(env_fn)))
for (work_remote, remote, env_fn) in zip(self.work_remotes, self.remotes, env_fns)]
for p in self.ps:
p.daemon = True # if the main process crashes, we should not cause things to hang
p.start()
for remote in self.work_remotes:
remote.close()
self.remotes[0].send(('get_spaces', None))
observation_space, action_space = self.remotes[0].recv()
VecEnv.__init__(self, len(env_fns), observation_space, action_space)
def step_async(self, actions):
for remote, action in zip(self.remotes, actions):
remote.send(('step', action))
self.waiting = True
def step_wait(self):
results = [remote.recv() for remote in self.remotes]
self.waiting = False
obs, rews, dones, infos = zip(*results)
return np.stack(obs), np.stack(rews), np.stack(dones), infos
def reset(self):
for remote in self.remotes:
remote.send(('reset', None))
return np.stack([remote.recv() for remote in self.remotes])
def reset_task(self):
for remote in self.remotes:
remote.send(('reset_task', None))
return np.stack([remote.recv() for remote in self.remotes])
def close(self):
if self.closed:
return
if self.waiting:
for remote in self.remotes:
remote.recv()
for remote in self.remotes:
remote.send(('close', None))
for p in self.ps:
p.join()
self.closed = True
def render(self, mode='human'):
for pipe in self.remotes:
pipe.send(('render', None))
imgs = [pipe.recv() for pipe in self.remotes]
bigimg = tile_images(imgs)
if mode == 'human':
import cv2
cv2.imshow('vecenv', bigimg[:,:,::-1])
cv2.waitKey(1)
elif mode == 'rgb_array':
return bigimg
else:
raise NotImplementedError
| 3,553 | 34.188119 | 97 |
py
|
rl-perturbed-reward
|
rl-perturbed-reward-master/gym-atari/baselines/baselines/common/vec_env/vec_frame_stack.py
|
from baselines.common.vec_env import VecEnvWrapper
import numpy as np
from gym import spaces
class VecFrameStack(VecEnvWrapper):
"""
Vectorized environment base class
"""
def __init__(self, venv, nstack):
self.venv = venv
self.nstack = nstack
wos = venv.observation_space # wrapped ob space
low = np.repeat(wos.low, self.nstack, axis=-1)
high = np.repeat(wos.high, self.nstack, axis=-1)
self.stackedobs = np.zeros((venv.num_envs,)+low.shape, low.dtype)
observation_space = spaces.Box(low=low, high=high, dtype=venv.observation_space.dtype)
VecEnvWrapper.__init__(self, venv, observation_space=observation_space)
def step_wait(self):
obs, rews, news, infos = self.venv.step_wait()
self.stackedobs = np.roll(self.stackedobs, shift=-1, axis=-1)
for (i, new) in enumerate(news):
if new:
self.stackedobs[i] = 0
self.stackedobs[..., -obs.shape[-1]:] = obs
return self.stackedobs, rews, news, infos
def reset(self):
"""
Reset all environments
"""
obs = self.venv.reset()
self.stackedobs[...] = 0
self.stackedobs[..., -obs.shape[-1]:] = obs
return self.stackedobs
def close(self):
self.venv.close()
| 1,319 | 32.846154 | 94 |
py
|
rl-perturbed-reward
|
rl-perturbed-reward-master/gym-atari/baselines/baselines/ppo2/ppo2.py
|
import os
import time
import functools
import numpy as np
import os.path as osp
import tensorflow as tf
from baselines import logger
from collections import deque
from baselines.common import explained_variance, set_global_seeds
from baselines.common.policies import build_policy
from baselines.common.runners import AbstractEnvRunner
from baselines.common.tf_util import get_session, save_variables, load_variables
from baselines.common.mpi_adam_optimizer import MpiAdamOptimizer
from mpi4py import MPI
from baselines.common.tf_util import initialize
from baselines.common.mpi_util import sync_from_root
from baselines.noisy_reward import PongProcessor, BreakoutProcessor, \
BreakoutProcessor2, AtariProcessor
class Model(object):
def __init__(self, *, policy, ob_space, ac_space, nbatch_act, nbatch_train,
nsteps, ent_coef, vf_coef, max_grad_norm):
sess = get_session()
with tf.variable_scope('ppo2_model', reuse=tf.AUTO_REUSE):
act_model = policy(nbatch_act, 1, sess)
train_model = policy(nbatch_train, nsteps, sess)
A = train_model.pdtype.sample_placeholder([None])
ADV = tf.placeholder(tf.float32, [None])
R = tf.placeholder(tf.float32, [None])
OLDNEGLOGPAC = tf.placeholder(tf.float32, [None])
OLDVPRED = tf.placeholder(tf.float32, [None])
LR = tf.placeholder(tf.float32, [])
CLIPRANGE = tf.placeholder(tf.float32, [])
neglogpac = train_model.pd.neglogp(A)
entropy = tf.reduce_mean(train_model.pd.entropy())
vpred = train_model.vf
vpredclipped = OLDVPRED + tf.clip_by_value(train_model.vf - OLDVPRED, - CLIPRANGE, CLIPRANGE)
vf_losses1 = tf.square(vpred - R)
vf_losses2 = tf.square(vpredclipped - R)
vf_loss = .5 * tf.reduce_mean(tf.maximum(vf_losses1, vf_losses2))
ratio = tf.exp(OLDNEGLOGPAC - neglogpac)
pg_losses = -ADV * ratio
pg_losses2 = -ADV * tf.clip_by_value(ratio, 1.0 - CLIPRANGE, 1.0 + CLIPRANGE)
pg_loss = tf.reduce_mean(tf.maximum(pg_losses, pg_losses2))
approxkl = .5 * tf.reduce_mean(tf.square(neglogpac - OLDNEGLOGPAC))
clipfrac = tf.reduce_mean(tf.to_float(tf.greater(tf.abs(ratio - 1.0), CLIPRANGE)))
loss = pg_loss - entropy * ent_coef + vf_loss * vf_coef
params = tf.trainable_variables('ppo2_model')
trainer = MpiAdamOptimizer(MPI.COMM_WORLD, learning_rate=LR, epsilon=1e-5)
grads_and_var = trainer.compute_gradients(loss, params)
grads, var = zip(*grads_and_var)
if max_grad_norm is not None:
grads, _grad_norm = tf.clip_by_global_norm(grads, max_grad_norm)
grads_and_var = list(zip(grads, var))
_train = trainer.apply_gradients(grads_and_var)
def train(lr, cliprange, obs, returns, masks, actions, values, neglogpacs, states=None):
advs = returns - values
advs = (advs - advs.mean()) / (advs.std() + 1e-8)
td_map = {train_model.X:obs, A:actions, ADV:advs, R:returns, LR:lr,
CLIPRANGE:cliprange, OLDNEGLOGPAC:neglogpacs, OLDVPRED:values}
if states is not None:
td_map[train_model.S] = states
td_map[train_model.M] = masks
return sess.run(
[pg_loss, vf_loss, entropy, approxkl, clipfrac, _train],
td_map
)[:-1]
self.loss_names = ['policy_loss', 'value_loss', 'policy_entropy', 'approxkl', 'clipfrac']
self.train = train
self.train_model = train_model
self.act_model = act_model
self.step = act_model.step
self.value = act_model.value
self.initial_state = act_model.initial_state
self.save = functools.partial(save_variables, sess=sess)
self.load = functools.partial(load_variables, sess=sess)
if MPI.COMM_WORLD.Get_rank() == 0:
initialize()
global_variables = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, scope="")
sync_from_root(sess, global_variables) #pylint: disable=E1101
class Runner(AbstractEnvRunner):
def __init__(self, *, env, model, nsteps, gamma, lam, weight, normal, surrogate, noise_type, env_name):
super().__init__(env=env, model=model, nsteps=nsteps)
self.lam = lam
self.gamma = gamma
if "Pong" in env_name:
self.processor = PongProcessor(weight=weight, normal=normal, surrogate=surrogate, noise_type=noise_type)
else:
self.processor = AtariProcessor(weight=weight, normal=normal, surrogate=surrogate)
# self.processor = BreakoutProcessor(weight=weight, normal=normal, surrogate=surrogate)
print (weight, normal, surrogate, noise_type)
# self.processor = AtariProcessor(weight=0.05, normal=False, surrogate=True)
def run(self):
mb_obs, mb_rewards, mb_actions, mb_values, mb_dones, mb_neglogpacs = [],[],[],[],[],[]
mb_states = self.states
epinfos = []
for _ in range(self.nsteps):
actions, values, self.states, neglogpacs = self.model.step(self.obs, S=self.states, M=self.dones)
mb_obs.append(self.obs.copy())
mb_actions.append(actions)
mb_values.append(values)
mb_neglogpacs.append(neglogpacs)
mb_dones.append(self.dones)
self.obs[:], rewards, self.dones, infos = self.env.step(actions)
rewards = self.processor.process_step(rewards)
# print (rewards)
# TODO: surrogate reward
for info in infos:
maybeepinfo = info.get('episode')
if maybeepinfo: epinfos.append(maybeepinfo)
mb_rewards.append(rewards)
#batch of steps to batch of rollouts
mb_obs = np.asarray(mb_obs, dtype=self.obs.dtype)
mb_rewards = np.asarray(mb_rewards, dtype=np.float32)
mb_actions = np.asarray(mb_actions)
mb_values = np.asarray(mb_values, dtype=np.float32)
mb_neglogpacs = np.asarray(mb_neglogpacs, dtype=np.float32)
mb_dones = np.asarray(mb_dones, dtype=np.bool)
last_values = self.model.value(self.obs, S=self.states, M=self.dones)
#discount/bootstrap off value fn
mb_returns = np.zeros_like(mb_rewards)
mb_advs = np.zeros_like(mb_rewards)
lastgaelam = 0
for t in reversed(range(self.nsteps)):
if t == self.nsteps - 1:
nextnonterminal = 1.0 - self.dones
nextvalues = last_values
else:
nextnonterminal = 1.0 - mb_dones[t+1]
nextvalues = mb_values[t+1]
delta = mb_rewards[t] + self.gamma * nextvalues * nextnonterminal - mb_values[t]
mb_advs[t] = lastgaelam = delta + self.gamma * self.lam * nextnonterminal * lastgaelam
mb_returns = mb_advs + mb_values
return (*map(sf01, (mb_obs, mb_returns, mb_dones, mb_actions, mb_values, mb_neglogpacs)),
mb_states, epinfos)
# obs, returns, masks, actions, values, neglogpacs, states = runner.run()
def sf01(arr):
"""
swap and then flatten axes 0 and 1
"""
s = arr.shape
return arr.swapaxes(0, 1).reshape(s[0] * s[1], *s[2:])
def constfn(val):
def f(_):
return val
return f
def learn(*, network, env, total_timesteps, seed=None, nsteps=2048, ent_coef=0.0, lr=3e-4,
vf_coef=0.5, max_grad_norm=0.5, gamma=0.99, lam=0.95,
log_interval=10, nminibatches=4, noptepochs=4, cliprange=0.2,
save_interval=0, load_path=None, weight=0.1, normal=True,
surrogate=False, noise_type='norm_one', env_name='PongNoFrameskip-v4', **network_kwargs):
'''
Learn policy using PPO algorithm (https://arxiv.org/abs/1707.06347)
Parameters:
----------
network: policy network architecture. Either string (mlp, lstm, lnlstm, cnn_lstm, cnn, cnn_small, conv_only - see baselines.common/models.py for full list)
specifying the standard network architecture, or a function that takes tensorflow tensor as input and returns
tuple (output_tensor, extra_feed) where output tensor is the last network layer output, extra_feed is None for feed-forward
neural nets, and extra_feed is a dictionary describing how to feed state into the network for recurrent neural nets.
See baselines.common/policies.py/lstm for more details on using recurrent nets in policies
env: baselines.common.vec_env.VecEnv environment. Needs to be vectorized for parallel environment simulation.
The environments produced by gym.make can be wrapped using baselines.common.vec_env.DummyVecEnv class.
nsteps: int number of steps of the vectorized environment per update (i.e. batch size is nsteps * nenv where
nenv is number of environment copies simulated in parallel)
total_timesteps: int number of timesteps (i.e. number of actions taken in the environment)
ent_coef: float policy entropy coefficient in the optimization objective
lr: float or function learning rate, constant or a schedule function [0,1] -> R+ where 1 is beginning of the
training and 0 is the end of the training.
vf_coef: float value function loss coefficient in the optimization objective
max_grad_norm: float or None gradient norm clipping coefficient
gamma: float discounting factor
lam: float advantage estimation discounting factor (lambda in the paper)
log_interval: int number of timesteps between logging events
nminibatches: int number of training minibatches per update
noptepochs: int number of training epochs per update
cliprange: float or function clipping range, constant or schedule function [0,1] -> R+ where 1 is beginning of the training
and 0 is the end of the training
save_interval: int number of timesteps between saving events
load_path: str path to load the model from
**network_kwargs: keyword arguments to the policy / network builder. See baselines.common/policies.py/build_policy and arguments to a particular type of network
For instance, 'mlp' network architecture has arguments num_hidden and num_layers.
'''
set_global_seeds(seed)
if isinstance(lr, float): lr = constfn(lr)
else: assert callable(lr)
if isinstance(cliprange, float): cliprange = constfn(cliprange)
else: assert callable(cliprange)
total_timesteps = int(total_timesteps)
policy = build_policy(env, network, **network_kwargs)
nenvs = env.num_envs
ob_space = env.observation_space
ac_space = env.action_space
nbatch = nenvs * nsteps
nbatch_train = nbatch // nminibatches
make_model = lambda : Model(policy=policy, ob_space=ob_space, ac_space=ac_space, nbatch_act=nenvs, nbatch_train=nbatch_train,
nsteps=nsteps, ent_coef=ent_coef, vf_coef=vf_coef,
max_grad_norm=max_grad_norm)
if save_interval and logger.get_dir():
import cloudpickle
with open(osp.join(logger.get_dir(), 'make_model.pkl'), 'wb') as fh:
fh.write(cloudpickle.dumps(make_model))
model = make_model()
if load_path is not None:
model.load(load_path)
runner = Runner(env=env, model=model, nsteps=nsteps, gamma=gamma,
lam=lam, weight=weight, normal=normal, surrogate=surrogate,
noise_type=noise_type, env_name=env_name)
epinfobuf = deque(maxlen=100)
tfirststart = time.time()
nupdates = total_timesteps//nbatch
for update in range(1, nupdates+1):
assert nbatch % nminibatches == 0
tstart = time.time()
frac = 1.0 - (update - 1.0) / nupdates
lrnow = lr(frac)
cliprangenow = cliprange(frac)
obs, returns, masks, actions, values, neglogpacs, states, epinfos = runner.run() #pylint: disable=E0632
epinfobuf.extend(epinfos)
mblossvals = []
if states is None: # nonrecurrent version
inds = np.arange(nbatch)
for _ in range(noptepochs):
np.random.shuffle(inds)
for start in range(0, nbatch, nbatch_train):
end = start + nbatch_train
mbinds = inds[start:end]
slices = (arr[mbinds] for arr in (obs, returns, masks, actions, values, neglogpacs))
mblossvals.append(model.train(lrnow, cliprangenow, *slices))
else: # recurrent version
assert nenvs % nminibatches == 0
envsperbatch = nenvs // nminibatches
envinds = np.arange(nenvs)
flatinds = np.arange(nenvs * nsteps).reshape(nenvs, nsteps)
envsperbatch = nbatch_train // nsteps
for _ in range(noptepochs):
np.random.shuffle(envinds)
for start in range(0, nenvs, envsperbatch):
end = start + envsperbatch
mbenvinds = envinds[start:end]
mbflatinds = flatinds[mbenvinds].ravel()
slices = (arr[mbflatinds] for arr in (obs, returns, masks, actions, values, neglogpacs))
mbstates = states[mbenvinds]
mblossvals.append(model.train(lrnow, cliprangenow, *slices, mbstates))
lossvals = np.mean(mblossvals, axis=0)
tnow = time.time()
fps = int(nbatch / (tnow - tstart))
if update % log_interval == 0 or update == 1:
ev = explained_variance(values, returns)
logger.logkv("serial_timesteps", update*nsteps)
logger.logkv("nupdates", update)
logger.logkv("total_timesteps", update*nbatch)
logger.logkv("fps", fps)
logger.logkv("explained_variance", float(ev))
logger.logkv('eprewmean', safemean([epinfo['r'] for epinfo in epinfobuf]))
logger.logkv('eplenmean', safemean([epinfo['l'] for epinfo in epinfobuf]))
logger.logkv('time_elapsed', tnow - tfirststart)
for (lossval, lossname) in zip(lossvals, model.loss_names):
logger.logkv(lossname, lossval)
if MPI.COMM_WORLD.Get_rank() == 0:
logger.dumpkvs()
if save_interval and (update % save_interval == 0 or update == 1) and logger.get_dir() and MPI.COMM_WORLD.Get_rank() == 0:
checkdir = osp.join(logger.get_dir(), 'checkpoints')
os.makedirs(checkdir, exist_ok=True)
savepath = osp.join(checkdir, '%.5i'%update)
print('Saving to', savepath)
model.save(savepath)
env.close()
return model
def safemean(xs):
return np.nan if len(xs) == 0 else np.mean(xs)
| 15,257 | 46.53271 | 184 |
py
|
rl-perturbed-reward
|
rl-perturbed-reward-master/gym-atari/baselines/baselines/ppo2/defaults.py
|
def mujoco():
return dict(
nsteps=2048,
nminibatches=32,
lam=0.95,
gamma=0.99,
noptepochs=10,
log_interval=1,
ent_coef=0.0,
lr=lambda f: 3e-4 * f,
cliprange=0.2,
value_network='copy'
)
def atari():
return dict(
nsteps=128, nminibatches=4,
lam=0.95, gamma=0.99, noptepochs=4, log_interval=1,
ent_coef=.01,
lr=lambda f : f * 2.5e-4,
cliprange=lambda f : f * 0.1,
)
| 500 | 20.782609 | 59 |
py
|
rl-perturbed-reward
|
rl-perturbed-reward-master/gym-atari/baselines/baselines/ppo2/__init__.py
| 0 | 0 | 0 |
py
|
|
rl-perturbed-reward
|
rl-perturbed-reward-master/gym-atari/baselines/baselines/a2c/a2c.py
|
import time
import functools
import tensorflow as tf
from baselines import logger
from baselines.common import set_global_seeds, explained_variance
from baselines.common import tf_util
from baselines.common.policies import build_policy
from baselines.a2c.utils import Scheduler, find_trainable_variables
from baselines.a2c.runner import Runner
from tensorflow import losses
class Model(object):
def __init__(self, policy, env, nsteps,
ent_coef=0.01, vf_coef=0.5, max_grad_norm=0.5, lr=7e-4,
alpha=0.99, epsilon=1e-5, total_timesteps=int(80e6), lrschedule='linear'):
sess = tf_util.get_session()
nenvs = env.num_envs
nbatch = nenvs*nsteps
with tf.variable_scope('a2c_model', reuse=tf.AUTO_REUSE):
step_model = policy(nenvs, 1, sess)
train_model = policy(nbatch, nsteps, sess)
A = tf.placeholder(train_model.action.dtype, train_model.action.shape)
ADV = tf.placeholder(tf.float32, [nbatch])
R = tf.placeholder(tf.float32, [nbatch])
LR = tf.placeholder(tf.float32, [])
neglogpac = train_model.pd.neglogp(A)
entropy = tf.reduce_mean(train_model.pd.entropy())
pg_loss = tf.reduce_mean(ADV * neglogpac)
vf_loss = losses.mean_squared_error(tf.squeeze(train_model.vf), R)
loss = pg_loss - entropy*ent_coef + vf_loss * vf_coef
params = find_trainable_variables("a2c_model")
grads = tf.gradients(loss, params)
if max_grad_norm is not None:
grads, grad_norm = tf.clip_by_global_norm(grads, max_grad_norm)
grads = list(zip(grads, params))
trainer = tf.train.RMSPropOptimizer(learning_rate=LR, decay=alpha, epsilon=epsilon)
_train = trainer.apply_gradients(grads)
lr = Scheduler(v=lr, nvalues=total_timesteps, schedule=lrschedule)
def train(obs, states, rewards, masks, actions, values):
advs = rewards - values
for step in range(len(obs)):
cur_lr = lr.value()
td_map = {train_model.X:obs, A:actions, ADV:advs, R:rewards, LR:cur_lr}
if states is not None:
td_map[train_model.S] = states
td_map[train_model.M] = masks
policy_loss, value_loss, policy_entropy, _ = sess.run(
[pg_loss, vf_loss, entropy, _train],
td_map
)
return policy_loss, value_loss, policy_entropy
self.train = train
self.train_model = train_model
self.step_model = step_model
self.step = step_model.step
self.value = step_model.value
self.initial_state = step_model.initial_state
self.save = functools.partial(tf_util.save_variables, sess=sess)
self.load = functools.partial(tf_util.load_variables, sess=sess)
tf.global_variables_initializer().run(session=sess)
def learn(
network,
env,
seed=None,
nsteps=5,
total_timesteps=int(80e6),
vf_coef=0.5,
ent_coef=0.01,
max_grad_norm=0.5,
lr=7e-4,
lrschedule='linear',
epsilon=1e-5,
alpha=0.99,
gamma=0.99,
log_interval=100,
load_path=None,
**network_kwargs):
'''
Main entrypoint for A2C algorithm. Train a policy with given network architecture on a given environment using a2c algorithm.
Parameters:
-----------
network: policy network architecture. Either string (mlp, lstm, lnlstm, cnn_lstm, cnn, cnn_small, conv_only - see baselines.common/models.py for full list)
specifying the standard network architecture, or a function that takes tensorflow tensor as input and returns
tuple (output_tensor, extra_feed) where output tensor is the last network layer output, extra_feed is None for feed-forward
neural nets, and extra_feed is a dictionary describing how to feed state into the network for recurrent neural nets.
See baselines.common/policies.py/lstm for more details on using recurrent nets in policies
env: RL environment. Should implement interface similar to VecEnv (baselines.common/vec_env) or be wrapped with DummyVecEnv (baselines.common/vec_env/dummy_vec_env.py)
seed: seed to make random number sequence in the alorightm reproducible. By default is None which means seed from system noise generator (not reproducible)
nsteps: int, number of steps of the vectorized environment per update (i.e. batch size is nsteps * nenv where
nenv is number of environment copies simulated in parallel)
total_timesteps: int, total number of timesteps to train on (default: 80M)
vf_coef: float, coefficient in front of value function loss in the total loss function (default: 0.5)
ent_coef: float, coeffictiant in front of the policy entropy in the total loss function (default: 0.01)
max_gradient_norm: float, gradient is clipped to have global L2 norm no more than this value (default: 0.5)
lr: float, learning rate for RMSProp (current implementation has RMSProp hardcoded in) (default: 7e-4)
lrschedule: schedule of learning rate. Can be 'linear', 'constant', or a function [0..1] -> [0..1] that takes fraction of the training progress as input and
returns fraction of the learning rate (specified as lr) as output
epsilon: float, RMSProp epsilon (stabilizes square root computation in denominator of RMSProp update) (default: 1e-5)
alpha: float, RMSProp decay parameter (default: 0.99)
gamma: float, reward discounting parameter (default: 0.99)
log_interval: int, specifies how frequently the logs are printed out (default: 100)
**network_kwargs: keyword arguments to the policy / network builder. See baselines.common/policies.py/build_policy and arguments to a particular type of network
For instance, 'mlp' network architecture has arguments num_hidden and num_layers.
'''
set_global_seeds(seed)
nenvs = env.num_envs
policy = build_policy(env, network, **network_kwargs)
model = Model(policy=policy, env=env, nsteps=nsteps, ent_coef=ent_coef, vf_coef=vf_coef,
max_grad_norm=max_grad_norm, lr=lr, alpha=alpha, epsilon=epsilon, total_timesteps=total_timesteps, lrschedule=lrschedule)
if load_path is not None:
model.load(load_path)
runner = Runner(env, model, nsteps=nsteps, gamma=gamma)
nbatch = nenvs*nsteps
tstart = time.time()
for update in range(1, total_timesteps//nbatch+1):
obs, states, rewards, masks, actions, values = runner.run()
policy_loss, value_loss, policy_entropy = model.train(obs, states, rewards, masks, actions, values)
nseconds = time.time()-tstart
fps = int((update*nbatch)/nseconds)
if update % log_interval == 0 or update == 1:
ev = explained_variance(values, rewards)
logger.record_tabular("nupdates", update)
logger.record_tabular("total_timesteps", update*nbatch)
logger.record_tabular("fps", fps)
logger.record_tabular("policy_entropy", float(policy_entropy))
logger.record_tabular("value_loss", float(value_loss))
logger.record_tabular("explained_variance", float(ev))
logger.dump_tabular()
env.close()
return model
| 7,571 | 41.301676 | 186 |
py
|
rl-perturbed-reward
|
rl-perturbed-reward-master/gym-atari/baselines/baselines/a2c/utils.py
|
import os
import numpy as np
import tensorflow as tf
from collections import deque
def sample(logits):
noise = tf.random_uniform(tf.shape(logits))
return tf.argmax(logits - tf.log(-tf.log(noise)), 1)
def cat_entropy(logits):
a0 = logits - tf.reduce_max(logits, 1, keepdims=True)
ea0 = tf.exp(a0)
z0 = tf.reduce_sum(ea0, 1, keepdims=True)
p0 = ea0 / z0
return tf.reduce_sum(p0 * (tf.log(z0) - a0), 1)
def cat_entropy_softmax(p0):
return - tf.reduce_sum(p0 * tf.log(p0 + 1e-6), axis = 1)
def ortho_init(scale=1.0):
def _ortho_init(shape, dtype, partition_info=None):
#lasagne ortho init for tf
shape = tuple(shape)
if len(shape) == 2:
flat_shape = shape
elif len(shape) == 4: # assumes NHWC
flat_shape = (np.prod(shape[:-1]), shape[-1])
else:
raise NotImplementedError
a = np.random.normal(0.0, 1.0, flat_shape)
u, _, v = np.linalg.svd(a, full_matrices=False)
q = u if u.shape == flat_shape else v # pick the one with the correct shape
q = q.reshape(shape)
return (scale * q[:shape[0], :shape[1]]).astype(np.float32)
return _ortho_init
def conv(x, scope, *, nf, rf, stride, pad='VALID', init_scale=1.0, data_format='NHWC', one_dim_bias=False):
if data_format == 'NHWC':
channel_ax = 3
strides = [1, stride, stride, 1]
bshape = [1, 1, 1, nf]
elif data_format == 'NCHW':
channel_ax = 1
strides = [1, 1, stride, stride]
bshape = [1, nf, 1, 1]
else:
raise NotImplementedError
bias_var_shape = [nf] if one_dim_bias else [1, nf, 1, 1]
nin = x.get_shape()[channel_ax].value
wshape = [rf, rf, nin, nf]
with tf.variable_scope(scope):
w = tf.get_variable("w", wshape, initializer=ortho_init(init_scale))
b = tf.get_variable("b", bias_var_shape, initializer=tf.constant_initializer(0.0))
if not one_dim_bias and data_format == 'NHWC':
b = tf.reshape(b, bshape)
return tf.nn.conv2d(x, w, strides=strides, padding=pad, data_format=data_format) + b
def fc(x, scope, nh, *, init_scale=1.0, init_bias=0.0):
with tf.variable_scope(scope):
nin = x.get_shape()[1].value
w = tf.get_variable("w", [nin, nh], initializer=ortho_init(init_scale))
b = tf.get_variable("b", [nh], initializer=tf.constant_initializer(init_bias))
return tf.matmul(x, w)+b
def batch_to_seq(h, nbatch, nsteps, flat=False):
if flat:
h = tf.reshape(h, [nbatch, nsteps])
else:
h = tf.reshape(h, [nbatch, nsteps, -1])
return [tf.squeeze(v, [1]) for v in tf.split(axis=1, num_or_size_splits=nsteps, value=h)]
def seq_to_batch(h, flat = False):
shape = h[0].get_shape().as_list()
if not flat:
assert(len(shape) > 1)
nh = h[0].get_shape()[-1].value
return tf.reshape(tf.concat(axis=1, values=h), [-1, nh])
else:
return tf.reshape(tf.stack(values=h, axis=1), [-1])
def lstm(xs, ms, s, scope, nh, init_scale=1.0):
nbatch, nin = [v.value for v in xs[0].get_shape()]
with tf.variable_scope(scope):
wx = tf.get_variable("wx", [nin, nh*4], initializer=ortho_init(init_scale))
wh = tf.get_variable("wh", [nh, nh*4], initializer=ortho_init(init_scale))
b = tf.get_variable("b", [nh*4], initializer=tf.constant_initializer(0.0))
c, h = tf.split(axis=1, num_or_size_splits=2, value=s)
for idx, (x, m) in enumerate(zip(xs, ms)):
c = c*(1-m)
h = h*(1-m)
z = tf.matmul(x, wx) + tf.matmul(h, wh) + b
i, f, o, u = tf.split(axis=1, num_or_size_splits=4, value=z)
i = tf.nn.sigmoid(i)
f = tf.nn.sigmoid(f)
o = tf.nn.sigmoid(o)
u = tf.tanh(u)
c = f*c + i*u
h = o*tf.tanh(c)
xs[idx] = h
s = tf.concat(axis=1, values=[c, h])
return xs, s
def _ln(x, g, b, e=1e-5, axes=[1]):
u, s = tf.nn.moments(x, axes=axes, keep_dims=True)
x = (x-u)/tf.sqrt(s+e)
x = x*g+b
return x
def lnlstm(xs, ms, s, scope, nh, init_scale=1.0):
nbatch, nin = [v.value for v in xs[0].get_shape()]
with tf.variable_scope(scope):
wx = tf.get_variable("wx", [nin, nh*4], initializer=ortho_init(init_scale))
gx = tf.get_variable("gx", [nh*4], initializer=tf.constant_initializer(1.0))
bx = tf.get_variable("bx", [nh*4], initializer=tf.constant_initializer(0.0))
wh = tf.get_variable("wh", [nh, nh*4], initializer=ortho_init(init_scale))
gh = tf.get_variable("gh", [nh*4], initializer=tf.constant_initializer(1.0))
bh = tf.get_variable("bh", [nh*4], initializer=tf.constant_initializer(0.0))
b = tf.get_variable("b", [nh*4], initializer=tf.constant_initializer(0.0))
gc = tf.get_variable("gc", [nh], initializer=tf.constant_initializer(1.0))
bc = tf.get_variable("bc", [nh], initializer=tf.constant_initializer(0.0))
c, h = tf.split(axis=1, num_or_size_splits=2, value=s)
for idx, (x, m) in enumerate(zip(xs, ms)):
c = c*(1-m)
h = h*(1-m)
z = _ln(tf.matmul(x, wx), gx, bx) + _ln(tf.matmul(h, wh), gh, bh) + b
i, f, o, u = tf.split(axis=1, num_or_size_splits=4, value=z)
i = tf.nn.sigmoid(i)
f = tf.nn.sigmoid(f)
o = tf.nn.sigmoid(o)
u = tf.tanh(u)
c = f*c + i*u
h = o*tf.tanh(_ln(c, gc, bc))
xs[idx] = h
s = tf.concat(axis=1, values=[c, h])
return xs, s
def conv_to_fc(x):
nh = np.prod([v.value for v in x.get_shape()[1:]])
x = tf.reshape(x, [-1, nh])
return x
def discount_with_dones(rewards, dones, gamma):
discounted = []
r = 0
for reward, done in zip(rewards[::-1], dones[::-1]):
r = reward + gamma*r*(1.-done) # fixed off by one bug
discounted.append(r)
return discounted[::-1]
def find_trainable_variables(key):
return tf.trainable_variables(key)
def make_path(f):
return os.makedirs(f, exist_ok=True)
def constant(p):
return 1
def linear(p):
return 1-p
def middle_drop(p):
eps = 0.75
if 1-p<eps:
return eps*0.1
return 1-p
def double_linear_con(p):
p *= 2
eps = 0.125
if 1-p<eps:
return eps
return 1-p
def double_middle_drop(p):
eps1 = 0.75
eps2 = 0.25
if 1-p<eps1:
if 1-p<eps2:
return eps2*0.5
return eps1*0.1
return 1-p
schedules = {
'linear':linear,
'constant':constant,
'double_linear_con': double_linear_con,
'middle_drop': middle_drop,
'double_middle_drop': double_middle_drop
}
class Scheduler(object):
def __init__(self, v, nvalues, schedule):
self.n = 0.
self.v = v
self.nvalues = nvalues
self.schedule = schedules[schedule]
def value(self):
current_value = self.v*self.schedule(self.n/self.nvalues)
self.n += 1.
return current_value
def value_steps(self, steps):
return self.v*self.schedule(steps/self.nvalues)
class EpisodeStats:
def __init__(self, nsteps, nenvs):
self.episode_rewards = []
for i in range(nenvs):
self.episode_rewards.append([])
self.lenbuffer = deque(maxlen=40) # rolling buffer for episode lengths
self.rewbuffer = deque(maxlen=40) # rolling buffer for episode rewards
self.nsteps = nsteps
self.nenvs = nenvs
def feed(self, rewards, masks):
rewards = np.reshape(rewards, [self.nenvs, self.nsteps])
masks = np.reshape(masks, [self.nenvs, self.nsteps])
for i in range(0, self.nenvs):
for j in range(0, self.nsteps):
self.episode_rewards[i].append(rewards[i][j])
if masks[i][j]:
l = len(self.episode_rewards[i])
s = sum(self.episode_rewards[i])
self.lenbuffer.append(l)
self.rewbuffer.append(s)
self.episode_rewards[i] = []
def mean_length(self):
if self.lenbuffer:
return np.mean(self.lenbuffer)
else:
return 0 # on the first params dump, no episodes are finished
def mean_reward(self):
if self.rewbuffer:
return np.mean(self.rewbuffer)
else:
return 0
# For ACER
def get_by_index(x, idx):
assert(len(x.get_shape()) == 2)
assert(len(idx.get_shape()) == 1)
idx_flattened = tf.range(0, x.shape[0]) * x.shape[1] + idx
y = tf.gather(tf.reshape(x, [-1]), # flatten input
idx_flattened) # use flattened indices
return y
def check_shape(ts,shapes):
i = 0
for (t,shape) in zip(ts,shapes):
assert t.get_shape().as_list()==shape, "id " + str(i) + " shape " + str(t.get_shape()) + str(shape)
i += 1
def avg_norm(t):
return tf.reduce_mean(tf.sqrt(tf.reduce_sum(tf.square(t), axis=-1)))
def gradient_add(g1, g2, param):
print([g1, g2, param.name])
assert (not (g1 is None and g2 is None)), param.name
if g1 is None:
return g2
elif g2 is None:
return g1
else:
return g1 + g2
def q_explained_variance(qpred, q):
_, vary = tf.nn.moments(q, axes=[0, 1])
_, varpred = tf.nn.moments(q - qpred, axes=[0, 1])
check_shape([vary, varpred], [[]] * 2)
return 1.0 - (varpred / vary)
| 9,348 | 32.035336 | 107 |
py
|
rl-perturbed-reward
|
rl-perturbed-reward-master/gym-atari/baselines/baselines/a2c/runner.py
|
import numpy as np
from baselines.a2c.utils import discount_with_dones
from baselines.common.runners import AbstractEnvRunner
class Runner(AbstractEnvRunner):
def __init__(self, env, model, nsteps=5, gamma=0.99):
super().__init__(env=env, model=model, nsteps=nsteps)
self.gamma = gamma
self.batch_action_shape = [x if x is not None else -1 for x in model.train_model.action.shape.as_list()]
self.ob_dtype = model.train_model.X.dtype.as_numpy_dtype
def run(self):
mb_obs, mb_rewards, mb_actions, mb_values, mb_dones = [],[],[],[],[]
mb_states = self.states
for n in range(self.nsteps):
actions, values, states, _ = self.model.step(self.obs, S=self.states, M=self.dones)
mb_obs.append(np.copy(self.obs))
mb_actions.append(actions)
mb_values.append(values)
mb_dones.append(self.dones)
obs, rewards, dones, _ = self.env.step(actions)
# TODO: surrogate reward
self.states = states
self.dones = dones
for n, done in enumerate(dones):
if done:
self.obs[n] = self.obs[n]*0
self.obs = obs
mb_rewards.append(rewards)
mb_dones.append(self.dones)
#batch of steps to batch of rollouts
mb_obs = np.asarray(mb_obs, dtype=self.ob_dtype).swapaxes(1, 0).reshape(self.batch_ob_shape)
mb_rewards = np.asarray(mb_rewards, dtype=np.float32).swapaxes(1, 0)
mb_actions = np.asarray(mb_actions, dtype=self.model.train_model.action.dtype.name).swapaxes(1, 0)
mb_values = np.asarray(mb_values, dtype=np.float32).swapaxes(1, 0)
mb_dones = np.asarray(mb_dones, dtype=np.bool).swapaxes(1, 0)
mb_masks = mb_dones[:, :-1]
mb_dones = mb_dones[:, 1:]
if self.gamma > 0.0:
#discount/bootstrap off value fn
last_values = self.model.value(self.obs, S=self.states, M=self.dones).tolist()
for n, (rewards, dones, value) in enumerate(zip(mb_rewards, mb_dones, last_values)):
rewards = rewards.tolist()
dones = dones.tolist()
if dones[-1] == 0:
rewards = discount_with_dones(rewards+[value], dones+[0], self.gamma)[:-1]
else:
rewards = discount_with_dones(rewards, dones, self.gamma)
mb_rewards[n] = rewards
mb_actions = mb_actions.reshape(self.batch_action_shape)
mb_rewards = mb_rewards.flatten()
mb_values = mb_values.flatten()
mb_masks = mb_masks.flatten()
return mb_obs, mb_states, mb_rewards, mb_masks, mb_actions, mb_values
| 2,727 | 43 | 112 |
py
|
rl-perturbed-reward
|
rl-perturbed-reward-master/gym-atari/baselines/baselines/a2c/__init__.py
| 0 | 0 | 0 |
py
|
|
rl-perturbed-reward
|
rl-perturbed-reward-master/gym-atari/baselines/baselines/bench/benchmarks.py
|
import re
import os.path as osp
import os
SCRIPT_DIR = os.path.dirname(os.path.abspath(__file__))
_atari7 = ['BeamRider', 'Breakout', 'Enduro', 'Pong', 'Qbert', 'Seaquest', 'SpaceInvaders']
_atariexpl7 = ['Freeway', 'Gravitar', 'MontezumaRevenge', 'Pitfall', 'PrivateEye', 'Solaris', 'Venture']
_BENCHMARKS = []
remove_version_re = re.compile(r'-v\d+$')
def register_benchmark(benchmark):
for b in _BENCHMARKS:
if b['name'] == benchmark['name']:
raise ValueError('Benchmark with name %s already registered!' % b['name'])
# automatically add a description if it is not present
if 'tasks' in benchmark:
for t in benchmark['tasks']:
if 'desc' not in t:
t['desc'] = remove_version_re.sub('', t['env_id'])
_BENCHMARKS.append(benchmark)
def list_benchmarks():
return [b['name'] for b in _BENCHMARKS]
def get_benchmark(benchmark_name):
for b in _BENCHMARKS:
if b['name'] == benchmark_name:
return b
raise ValueError('%s not found! Known benchmarks: %s' % (benchmark_name, list_benchmarks()))
def get_task(benchmark, env_id):
"""Get a task by env_id. Return None if the benchmark doesn't have the env"""
return next(filter(lambda task: task['env_id'] == env_id, benchmark['tasks']), None)
def find_task_for_env_id_in_any_benchmark(env_id):
for bm in _BENCHMARKS:
for task in bm["tasks"]:
if task["env_id"] == env_id:
return bm, task
return None, None
_ATARI_SUFFIX = 'NoFrameskip-v4'
register_benchmark({
'name': 'Atari50M',
'description': '7 Atari games from Mnih et al. (2013), with pixel observations, 50M timesteps',
'tasks': [{'desc': _game, 'env_id': _game + _ATARI_SUFFIX, 'trials': 2, 'num_timesteps': int(50e6)} for _game in _atari7]
})
register_benchmark({
'name': 'Atari10M',
'description': '7 Atari games from Mnih et al. (2013), with pixel observations, 10M timesteps',
'tasks': [{'desc': _game, 'env_id': _game + _ATARI_SUFFIX, 'trials': 6, 'num_timesteps': int(10e6)} for _game in _atari7]
})
register_benchmark({
'name': 'Atari1Hr',
'description': '7 Atari games from Mnih et al. (2013), with pixel observations, 1 hour of walltime',
'tasks': [{'desc': _game, 'env_id': _game + _ATARI_SUFFIX, 'trials': 2, 'num_seconds': 60 * 60} for _game in _atari7]
})
register_benchmark({
'name': 'AtariExploration10M',
'description': '7 Atari games emphasizing exploration, with pixel observations, 10M timesteps',
'tasks': [{'desc': _game, 'env_id': _game + _ATARI_SUFFIX, 'trials': 2, 'num_timesteps': int(10e6)} for _game in _atariexpl7]
})
# MuJoCo
_mujocosmall = [
'InvertedDoublePendulum-v2', 'InvertedPendulum-v2',
'HalfCheetah-v2', 'Hopper-v2', 'Walker2d-v2',
'Reacher-v2', 'Swimmer-v2']
register_benchmark({
'name': 'Mujoco1M',
'description': 'Some small 2D MuJoCo tasks, run for 1M timesteps',
'tasks': [{'env_id': _envid, 'trials': 6, 'num_timesteps': int(1e6)} for _envid in _mujocosmall]
})
register_benchmark({
'name': 'MujocoWalkers',
'description': 'MuJoCo forward walkers, run for 8M, humanoid 100M',
'tasks': [
{'env_id': "Hopper-v1", 'trials': 4, 'num_timesteps': 8 * 1000000},
{'env_id': "Walker2d-v1", 'trials': 4, 'num_timesteps': 8 * 1000000},
{'env_id': "Humanoid-v1", 'trials': 4, 'num_timesteps': 100 * 1000000},
]
})
# Roboschool
register_benchmark({
'name': 'Roboschool8M',
'description': 'Small 2D tasks, up to 30 minutes to complete on 8 cores',
'tasks': [
{'env_id': "RoboschoolReacher-v1", 'trials': 4, 'num_timesteps': 2 * 1000000},
{'env_id': "RoboschoolAnt-v1", 'trials': 4, 'num_timesteps': 8 * 1000000},
{'env_id': "RoboschoolHalfCheetah-v1", 'trials': 4, 'num_timesteps': 8 * 1000000},
{'env_id': "RoboschoolHopper-v1", 'trials': 4, 'num_timesteps': 8 * 1000000},
{'env_id': "RoboschoolWalker2d-v1", 'trials': 4, 'num_timesteps': 8 * 1000000},
]
})
register_benchmark({
'name': 'RoboschoolHarder',
'description': 'Test your might!!! Up to 12 hours on 32 cores',
'tasks': [
{'env_id': "RoboschoolHumanoid-v1", 'trials': 4, 'num_timesteps': 100 * 1000000},
{'env_id': "RoboschoolHumanoidFlagrun-v1", 'trials': 4, 'num_timesteps': 200 * 1000000},
{'env_id': "RoboschoolHumanoidFlagrunHarder-v1", 'trials': 4, 'num_timesteps': 400 * 1000000},
]
})
# Other
_atari50 = [ # actually 47
'Alien', 'Amidar', 'Assault', 'Asterix', 'Asteroids',
'Atlantis', 'BankHeist', 'BattleZone', 'BeamRider', 'Bowling',
'Breakout', 'Centipede', 'ChopperCommand', 'CrazyClimber',
'DemonAttack', 'DoubleDunk', 'Enduro', 'FishingDerby', 'Freeway',
'Frostbite', 'Gopher', 'Gravitar', 'IceHockey', 'Jamesbond',
'Kangaroo', 'Krull', 'KungFuMaster', 'MontezumaRevenge', 'MsPacman',
'NameThisGame', 'Pitfall', 'Pong', 'PrivateEye', 'Qbert',
'RoadRunner', 'Robotank', 'Seaquest', 'SpaceInvaders', 'StarGunner',
'Tennis', 'TimePilot', 'Tutankham', 'UpNDown', 'Venture',
'VideoPinball', 'WizardOfWor', 'Zaxxon',
]
register_benchmark({
'name': 'Atari50_10M',
'description': '47 Atari games from Mnih et al. (2013), with pixel observations, 10M timesteps',
'tasks': [{'desc': _game, 'env_id': _game + _ATARI_SUFFIX, 'trials': 2, 'num_timesteps': int(10e6)} for _game in _atari50]
})
# HER DDPG
register_benchmark({
'name': 'HerDdpg',
'description': 'Smoke-test only benchmark of HER',
'tasks': [{'trials': 1, 'env_id': 'FetchReach-v1'}]
})
| 5,598 | 35.835526 | 129 |
py
|
rl-perturbed-reward
|
rl-perturbed-reward-master/gym-atari/baselines/baselines/bench/monitor.py
|
__all__ = ['Monitor', 'get_monitor_files', 'load_results']
import gym
from gym.core import Wrapper
import time
from glob import glob
import csv
import os.path as osp
import json
import numpy as np
class Monitor(Wrapper):
EXT = "monitor.csv"
f = None
def __init__(self, env, filename, allow_early_resets=False, reset_keywords=(), info_keywords=()):
Wrapper.__init__(self, env=env)
self.tstart = time.time()
if filename is None:
self.f = None
self.logger = None
else:
if not filename.endswith(Monitor.EXT):
if osp.isdir(filename):
filename = osp.join(filename, Monitor.EXT)
else:
filename = filename + "." + Monitor.EXT
self.f = open(filename, "wt")
self.f.write('#%s\n'%json.dumps({"t_start": self.tstart, 'env_id' : env.spec and env.spec.id}))
self.logger = csv.DictWriter(self.f, fieldnames=('r', 'l', 't')+reset_keywords+info_keywords)
self.logger.writeheader()
self.f.flush()
self.reset_keywords = reset_keywords
self.info_keywords = info_keywords
self.allow_early_resets = allow_early_resets
self.rewards = None
self.needs_reset = True
self.episode_rewards = []
self.episode_lengths = []
self.episode_times = []
self.total_steps = 0
self.current_reset_info = {} # extra info about the current episode, that was passed in during reset()
def reset(self, **kwargs):
if not self.allow_early_resets and not self.needs_reset:
raise RuntimeError("Tried to reset an environment before done. If you want to allow early resets, wrap your env with Monitor(env, path, allow_early_resets=True)")
self.rewards = []
self.needs_reset = False
for k in self.reset_keywords:
v = kwargs.get(k)
if v is None:
raise ValueError('Expected you to pass kwarg %s into reset'%k)
self.current_reset_info[k] = v
return self.env.reset(**kwargs)
def step(self, action):
if self.needs_reset:
raise RuntimeError("Tried to step environment that needs reset")
ob, rew, done, info = self.env.step(action)
self.rewards.append(rew)
if done:
self.needs_reset = True
eprew = sum(self.rewards)
eplen = len(self.rewards)
epinfo = {"r": round(eprew, 6), "l": eplen, "t": round(time.time() - self.tstart, 6)}
for k in self.info_keywords:
epinfo[k] = info[k]
self.episode_rewards.append(eprew)
self.episode_lengths.append(eplen)
self.episode_times.append(time.time() - self.tstart)
epinfo.update(self.current_reset_info)
if self.logger:
self.logger.writerow(epinfo)
self.f.flush()
info['episode'] = epinfo
self.total_steps += 1
return (ob, rew, done, info)
def close(self):
if self.f is not None:
self.f.close()
def get_total_steps(self):
return self.total_steps
def get_episode_rewards(self):
return self.episode_rewards
def get_episode_lengths(self):
return self.episode_lengths
def get_episode_times(self):
return self.episode_times
class LoadMonitorResultsError(Exception):
pass
def get_monitor_files(dir):
return glob(osp.join(dir, "*" + Monitor.EXT))
def load_results(dir):
import pandas
monitor_files = (
glob(osp.join(dir, "*monitor.json")) +
glob(osp.join(dir, "*monitor.csv"))) # get both csv and (old) json files
if not monitor_files:
raise LoadMonitorResultsError("no monitor files of the form *%s found in %s" % (Monitor.EXT, dir))
dfs = []
headers = []
for fname in monitor_files:
with open(fname, 'rt') as fh:
if fname.endswith('csv'):
firstline = fh.readline()
if not firstline:
continue
assert firstline[0] == '#'
header = json.loads(firstline[1:])
df = pandas.read_csv(fh, index_col=None)
headers.append(header)
elif fname.endswith('json'): # Deprecated json format
episodes = []
lines = fh.readlines()
header = json.loads(lines[0])
headers.append(header)
for line in lines[1:]:
episode = json.loads(line)
episodes.append(episode)
df = pandas.DataFrame(episodes)
else:
assert 0, 'unreachable'
df['t'] += header['t_start']
dfs.append(df)
df = pandas.concat(dfs)
df.sort_values('t', inplace=True)
df.reset_index(inplace=True)
df['t'] -= min(header['t_start'] for header in headers)
df.headers = headers # HACK to preserve backwards compatibility
return df
def test_monitor():
env = gym.make("CartPole-v1")
env.seed(0)
mon_file = "/tmp/baselines-test-%s.monitor.csv" % uuid.uuid4()
menv = Monitor(env, mon_file)
menv.reset()
for _ in range(1000):
_, _, done, _ = menv.step(0)
if done:
menv.reset()
f = open(mon_file, 'rt')
firstline = f.readline()
assert firstline.startswith('#')
metadata = json.loads(firstline[1:])
assert metadata['env_id'] == "CartPole-v1"
assert set(metadata.keys()) == {'env_id', 'gym_version', 't_start'}, "Incorrect keys in monitor metadata"
last_logline = pandas.read_csv(f, index_col=None)
assert set(last_logline.keys()) == {'l', 't', 'r'}, "Incorrect keys in monitor logline"
f.close()
os.remove(mon_file)
| 5,848 | 34.664634 | 174 |
py
|
rl-perturbed-reward
|
rl-perturbed-reward-master/gym-atari/baselines/baselines/bench/__init__.py
|
from baselines.bench.benchmarks import *
from baselines.bench.monitor import *
| 78 | 38.5 | 40 |
py
|
rl-perturbed-reward
|
rl-perturbed-reward-master/gym-atari/scripts/visualize.py
|
import argparse
import os
def str2bool(v):
if v.lower() in ('yes', 'true', 't', 'y', '1'):
return True
elif v.lower() in ('no', 'false', 'f', 'n', '0'):
return False
else:
raise argparse.ArgumentTypeError('Boolean value expected.')
parser = argparse.ArgumentParser()
parser.add_argument('--log_dir', type=str, default='baselines/logs',
help='The path of log directory [default: baselines/logs')
parser.add_argument('--all', type=str2bool, default=False,
help='Plot all the curves (diff errs) [default: False]')
parser.add_argument('--weight', type=float, default=0.2,
help='Weight of noise [default: 0.2]')
parser.add_argument('--noise_type', type=str, default='anti_iden',
help='Type of additional noise [default: anti_iden]')
parser.add_argument('--save_dir', type=str, default='../results',
help='Path of root directory to save plots [default: save_dir]')
parser.add_argument('--env_name', type=str, default='Pong',
help='Name of Atari game')
parser.add_argument('--num_timesteps', type=int, default=5e7,
help='Number of timesteps')
FLAGS = parser.parse_args()
LOG_DIR = FLAGS.log_dir
ALL = FLAGS.all
WEIGHT = FLAGS.weight
NOISE_TYPE = FLAGS.noise_type
SAVE_DIR = FLAGS.save_dir
ENV = FLAGS.env_name
NUM_TIMESTEPS = FLAGS.num_timesteps
assert (os.path.exists(LOG_DIR))
assert (NOISE_TYPE in ['norm_one', 'norm_all', 'anti_iden'])
SAVE_DIR = os.path.join(SAVE_DIR, ENV)
if not os.path.exists(SAVE_DIR):
os.makedirs(SAVE_DIR)
def visualize():
if ALL:
weights_list = [0.1, 0.2, 0.3, 0.4,
0.6, 0.7, 0.8, 0.9]
if NOISE_TYPE != "anti_iden":
weights_list.append(0.5)
else:
weights_list = [WEIGHT]
for weight in weights_list:
print ("python -m baselines.results_compare --log_dir %s --task_name %s \
--weight %s --noise_type %s --num_timesteps %s --save_dir %s" % \
(LOG_DIR, ENV, str(weight), NOISE_TYPE, str(NUM_TIMESTEPS), SAVE_DIR))
os.system("python -m baselines.results_compare --log_dir %s --task_name %s \
--weight %s --noise_type %s --num_timesteps %s --save_dir %s" % \
(LOG_DIR, ENV, str(weight), NOISE_TYPE, str(NUM_TIMESTEPS), SAVE_DIR))
print (LOG_DIR, ENV, str(weight), NOISE_TYPE, str(NUM_TIMESTEPS), SAVE_DIR)
#os.system("cd ..")
if __name__ == "__main__":
visualize()
| 2,552 | 38.276923 | 88 |
py
|
text-classification-cnn-rnn
|
text-classification-cnn-rnn-master/rnn_model.py
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
import tensorflow as tf
class TRNNConfig(object):
"""RNN配置参数"""
# 模型参数
embedding_dim = 64 # 词向量维度
seq_length = 600 # 序列长度
num_classes = 10 # 类别数
vocab_size = 5000 # 词汇表达小
num_layers= 2 # 隐藏层层数
hidden_dim = 128 # 隐藏层神经元
rnn = 'gru' # lstm 或 gru
dropout_keep_prob = 0.8 # dropout保留比例
learning_rate = 1e-3 # 学习率
batch_size = 128 # 每批训练大小
num_epochs = 10 # 总迭代轮次
print_per_batch = 100 # 每多少轮输出一次结果
save_per_batch = 10 # 每多少轮存入tensorboard
class TextRNN(object):
"""文本分类,RNN模型"""
def __init__(self, config):
self.config = config
# 三个待输入的数据
self.input_x = tf.placeholder(tf.int32, [None, self.config.seq_length], name='input_x')
self.input_y = tf.placeholder(tf.float32, [None, self.config.num_classes], name='input_y')
self.keep_prob = tf.placeholder(tf.float32, name='keep_prob')
self.rnn()
def rnn(self):
"""rnn模型"""
def lstm_cell(): # lstm核
return tf.contrib.rnn.BasicLSTMCell(self.config.hidden_dim, state_is_tuple=True)
def gru_cell(): # gru核
return tf.contrib.rnn.GRUCell(self.config.hidden_dim)
def dropout(): # 为每一个rnn核后面加一个dropout层
if (self.config.rnn == 'lstm'):
cell = lstm_cell()
else:
cell = gru_cell()
return tf.contrib.rnn.DropoutWrapper(cell, output_keep_prob=self.keep_prob)
# 词向量映射
with tf.device('/cpu:0'):
embedding = tf.get_variable('embedding', [self.config.vocab_size, self.config.embedding_dim])
embedding_inputs = tf.nn.embedding_lookup(embedding, self.input_x)
with tf.name_scope("rnn"):
# 多层rnn网络
cells = [dropout() for _ in range(self.config.num_layers)]
rnn_cell = tf.contrib.rnn.MultiRNNCell(cells, state_is_tuple=True)
_outputs, _ = tf.nn.dynamic_rnn(cell=rnn_cell, inputs=embedding_inputs, dtype=tf.float32)
last = _outputs[:, -1, :] # 取最后一个时序输出作为结果
with tf.name_scope("score"):
# 全连接层,后面接dropout以及relu激活
fc = tf.layers.dense(last, self.config.hidden_dim, name='fc1')
fc = tf.contrib.layers.dropout(fc, self.keep_prob)
fc = tf.nn.relu(fc)
# 分类器
self.logits = tf.layers.dense(fc, self.config.num_classes, name='fc2')
self.y_pred_cls = tf.argmax(tf.nn.softmax(self.logits), 1) # 预测类别
with tf.name_scope("optimize"):
# 损失函数,交叉熵
cross_entropy = tf.nn.softmax_cross_entropy_with_logits(logits=self.logits, labels=self.input_y)
self.loss = tf.reduce_mean(cross_entropy)
# 优化器
self.optim = tf.train.AdamOptimizer(learning_rate=self.config.learning_rate).minimize(self.loss)
with tf.name_scope("accuracy"):
# 准确率
correct_pred = tf.equal(tf.argmax(self.input_y, 1), self.y_pred_cls)
self.acc = tf.reduce_mean(tf.cast(correct_pred, tf.float32))
| 3,168 | 33.824176 | 108 |
py
|
text-classification-cnn-rnn
|
text-classification-cnn-rnn-master/cnn_model.py
|
# coding: utf-8
import tensorflow as tf
class TCNNConfig(object):
"""CNN配置参数"""
embedding_dim = 64 # 词向量维度
seq_length = 600 # 序列长度
num_classes = 10 # 类别数
num_filters = 256 # 卷积核数目
kernel_size = 5 # 卷积核尺寸
vocab_size = 5000 # 词汇表达小
hidden_dim = 128 # 全连接层神经元
dropout_keep_prob = 0.5 # dropout保留比例
learning_rate = 1e-3 # 学习率
batch_size = 64 # 每批训练大小
num_epochs = 10 # 总迭代轮次
print_per_batch = 100 # 每多少轮输出一次结果
save_per_batch = 10 # 每多少轮存入tensorboard
class TextCNN(object):
"""文本分类,CNN模型"""
def __init__(self, config):
self.config = config
# 三个待输入的数据
self.input_x = tf.placeholder(tf.int32, [None, self.config.seq_length], name='input_x')
self.input_y = tf.placeholder(tf.float32, [None, self.config.num_classes], name='input_y')
self.keep_prob = tf.placeholder(tf.float32, name='keep_prob')
self.cnn()
def cnn(self):
"""CNN模型"""
# 词向量映射
with tf.device('/cpu:0'):
embedding = tf.get_variable('embedding', [self.config.vocab_size, self.config.embedding_dim])
embedding_inputs = tf.nn.embedding_lookup(embedding, self.input_x)
with tf.name_scope("cnn"):
# CNN layer
conv = tf.layers.conv1d(embedding_inputs, self.config.num_filters, self.config.kernel_size, name='conv')
# global max pooling layer
gmp = tf.reduce_max(conv, reduction_indices=[1], name='gmp')
with tf.name_scope("score"):
# 全连接层,后面接dropout以及relu激活
fc = tf.layers.dense(gmp, self.config.hidden_dim, name='fc1')
fc = tf.contrib.layers.dropout(fc, self.keep_prob)
fc = tf.nn.relu(fc)
# 分类器
self.logits = tf.layers.dense(fc, self.config.num_classes, name='fc2')
self.y_pred_cls = tf.argmax(tf.nn.softmax(self.logits), 1) # 预测类别
with tf.name_scope("optimize"):
# 损失函数,交叉熵
cross_entropy = tf.nn.softmax_cross_entropy_with_logits(logits=self.logits, labels=self.input_y)
self.loss = tf.reduce_mean(cross_entropy)
# 优化器
self.optim = tf.train.AdamOptimizer(learning_rate=self.config.learning_rate).minimize(self.loss)
with tf.name_scope("accuracy"):
# 准确率
correct_pred = tf.equal(tf.argmax(self.input_y, 1), self.y_pred_cls)
self.acc = tf.reduce_mean(tf.cast(correct_pred, tf.float32))
| 2,493 | 32.253333 | 116 |
py
|
text-classification-cnn-rnn
|
text-classification-cnn-rnn-master/run_cnn.py
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
from __future__ import print_function
import os
import sys
import time
from datetime import timedelta
import numpy as np
import tensorflow as tf
from sklearn import metrics
from cnn_model import TCNNConfig, TextCNN
from data.cnews_loader import read_vocab, read_category, batch_iter, process_file, build_vocab
base_dir = 'data/cnews'
train_dir = os.path.join(base_dir, 'cnews.train.txt')
test_dir = os.path.join(base_dir, 'cnews.test.txt')
val_dir = os.path.join(base_dir, 'cnews.val.txt')
vocab_dir = os.path.join(base_dir, 'cnews.vocab.txt')
save_dir = 'checkpoints/textcnn'
save_path = os.path.join(save_dir, 'best_validation') # 最佳验证结果保存路径
def get_time_dif(start_time):
"""获取已使用时间"""
end_time = time.time()
time_dif = end_time - start_time
return timedelta(seconds=int(round(time_dif)))
def feed_data(x_batch, y_batch, keep_prob):
feed_dict = {
model.input_x: x_batch,
model.input_y: y_batch,
model.keep_prob: keep_prob
}
return feed_dict
def evaluate(sess, x_, y_):
"""评估在某一数据上的准确率和损失"""
data_len = len(x_)
batch_eval = batch_iter(x_, y_, 128)
total_loss = 0.0
total_acc = 0.0
for x_batch, y_batch in batch_eval:
batch_len = len(x_batch)
feed_dict = feed_data(x_batch, y_batch, 1.0)
loss, acc = sess.run([model.loss, model.acc], feed_dict=feed_dict)
total_loss += loss * batch_len
total_acc += acc * batch_len
return total_loss / data_len, total_acc / data_len
def train():
print("Configuring TensorBoard and Saver...")
# 配置 Tensorboard,重新训练时,请将tensorboard文件夹删除,不然图会覆盖
tensorboard_dir = 'tensorboard/textcnn'
if not os.path.exists(tensorboard_dir):
os.makedirs(tensorboard_dir)
tf.summary.scalar("loss", model.loss)
tf.summary.scalar("accuracy", model.acc)
merged_summary = tf.summary.merge_all()
writer = tf.summary.FileWriter(tensorboard_dir)
# 配置 Saver
saver = tf.train.Saver()
if not os.path.exists(save_dir):
os.makedirs(save_dir)
print("Loading training and validation data...")
# 载入训练集与验证集
start_time = time.time()
x_train, y_train = process_file(train_dir, word_to_id, cat_to_id, config.seq_length)
x_val, y_val = process_file(val_dir, word_to_id, cat_to_id, config.seq_length)
time_dif = get_time_dif(start_time)
print("Time usage:", time_dif)
# 创建session
session = tf.Session()
session.run(tf.global_variables_initializer())
writer.add_graph(session.graph)
print('Training and evaluating...')
start_time = time.time()
total_batch = 0 # 总批次
best_acc_val = 0.0 # 最佳验证集准确率
last_improved = 0 # 记录上一次提升批次
require_improvement = 1000 # 如果超过1000轮未提升,提前结束训练
flag = False
for epoch in range(config.num_epochs):
print('Epoch:', epoch + 1)
batch_train = batch_iter(x_train, y_train, config.batch_size)
for x_batch, y_batch in batch_train:
feed_dict = feed_data(x_batch, y_batch, config.dropout_keep_prob)
if total_batch % config.save_per_batch == 0:
# 每多少轮次将训练结果写入tensorboard scalar
s = session.run(merged_summary, feed_dict=feed_dict)
writer.add_summary(s, total_batch)
if total_batch % config.print_per_batch == 0:
# 每多少轮次输出在训练集和验证集上的性能
feed_dict[model.keep_prob] = 1.0
loss_train, acc_train = session.run([model.loss, model.acc], feed_dict=feed_dict)
loss_val, acc_val = evaluate(session, x_val, y_val) # todo
if acc_val > best_acc_val:
# 保存最好结果
best_acc_val = acc_val
last_improved = total_batch
saver.save(sess=session, save_path=save_path)
improved_str = '*'
else:
improved_str = ''
time_dif = get_time_dif(start_time)
msg = 'Iter: {0:>6}, Train Loss: {1:>6.2}, Train Acc: {2:>7.2%},' \
+ ' Val Loss: {3:>6.2}, Val Acc: {4:>7.2%}, Time: {5} {6}'
print(msg.format(total_batch, loss_train, acc_train, loss_val, acc_val, time_dif, improved_str))
feed_dict[model.keep_prob] = config.dropout_keep_prob
session.run(model.optim, feed_dict=feed_dict) # 运行优化
total_batch += 1
if total_batch - last_improved > require_improvement:
# 验证集正确率长期不提升,提前结束训练
print("No optimization for a long time, auto-stopping...")
flag = True
break # 跳出循环
if flag: # 同上
break
def test():
print("Loading test data...")
start_time = time.time()
x_test, y_test = process_file(test_dir, word_to_id, cat_to_id, config.seq_length)
session = tf.Session()
session.run(tf.global_variables_initializer())
saver = tf.train.Saver()
saver.restore(sess=session, save_path=save_path) # 读取保存的模型
print('Testing...')
loss_test, acc_test = evaluate(session, x_test, y_test)
msg = 'Test Loss: {0:>6.2}, Test Acc: {1:>7.2%}'
print(msg.format(loss_test, acc_test))
batch_size = 128
data_len = len(x_test)
num_batch = int((data_len - 1) / batch_size) + 1
y_test_cls = np.argmax(y_test, 1)
y_pred_cls = np.zeros(shape=len(x_test), dtype=np.int32) # 保存预测结果
for i in range(num_batch): # 逐批次处理
start_id = i * batch_size
end_id = min((i + 1) * batch_size, data_len)
feed_dict = {
model.input_x: x_test[start_id:end_id],
model.keep_prob: 1.0
}
y_pred_cls[start_id:end_id] = session.run(model.y_pred_cls, feed_dict=feed_dict)
# 评估
print("Precision, Recall and F1-Score...")
print(metrics.classification_report(y_test_cls, y_pred_cls, target_names=categories))
# 混淆矩阵
print("Confusion Matrix...")
cm = metrics.confusion_matrix(y_test_cls, y_pred_cls)
print(cm)
time_dif = get_time_dif(start_time)
print("Time usage:", time_dif)
if __name__ == '__main__':
if len(sys.argv) != 2 or sys.argv[1] not in ['train', 'test']:
raise ValueError("""usage: python run_cnn.py [train / test]""")
print('Configuring CNN model...')
config = TCNNConfig()
if not os.path.exists(vocab_dir): # 如果不存在词汇表,重建
build_vocab(train_dir, vocab_dir, config.vocab_size)
categories, cat_to_id = read_category()
words, word_to_id = read_vocab(vocab_dir)
config.vocab_size = len(words)
model = TextCNN(config)
if sys.argv[1] == 'train':
train()
else:
test()
| 6,689 | 32.118812 | 112 |
py
|
text-classification-cnn-rnn
|
text-classification-cnn-rnn-master/predict.py
|
# coding: utf-8
from __future__ import print_function
import os
import tensorflow as tf
import tensorflow.contrib.keras as kr
from cnn_model import TCNNConfig, TextCNN
from data.cnews_loader import read_category, read_vocab
try:
bool(type(unicode))
except NameError:
unicode = str
base_dir = 'data/cnews'
vocab_dir = os.path.join(base_dir, 'cnews.vocab.txt')
save_dir = 'checkpoints/textcnn'
save_path = os.path.join(save_dir, 'best_validation') # 最佳验证结果保存路径
class CnnModel:
def __init__(self):
self.config = TCNNConfig()
self.categories, self.cat_to_id = read_category()
self.words, self.word_to_id = read_vocab(vocab_dir)
self.config.vocab_size = len(self.words)
self.model = TextCNN(self.config)
self.session = tf.Session()
self.session.run(tf.global_variables_initializer())
saver = tf.train.Saver()
saver.restore(sess=self.session, save_path=save_path) # 读取保存的模型
def predict(self, message):
# 支持不论在python2还是python3下训练的模型都可以在2或者3的环境下运行
content = unicode(message)
data = [self.word_to_id[x] for x in content if x in self.word_to_id]
feed_dict = {
self.model.input_x: kr.preprocessing.sequence.pad_sequences([data], self.config.seq_length),
self.model.keep_prob: 1.0
}
y_pred_cls = self.session.run(self.model.y_pred_cls, feed_dict=feed_dict)
return self.categories[y_pred_cls[0]]
if __name__ == '__main__':
cnn_model = CnnModel()
test_demo = ['三星ST550以全新的拍摄方式超越了以往任何一款数码相机',
'热火vs骑士前瞻:皇帝回乡二番战 东部次席唾手可得新浪体育讯北京时间3月30日7:00']
for i in test_demo:
print(cnn_model.predict(i))
| 1,694 | 28.736842 | 104 |
py
|
text-classification-cnn-rnn
|
text-classification-cnn-rnn-master/run_rnn.py
|
# coding: utf-8
from __future__ import print_function
import os
import sys
import time
from datetime import timedelta
import numpy as np
import tensorflow as tf
from sklearn import metrics
from rnn_model import TRNNConfig, TextRNN
from data.cnews_loader import read_vocab, read_category, batch_iter, process_file, build_vocab
base_dir = 'data/cnews'
train_dir = os.path.join(base_dir, 'cnews.train.txt')
test_dir = os.path.join(base_dir, 'cnews.test.txt')
val_dir = os.path.join(base_dir, 'cnews.val.txt')
vocab_dir = os.path.join(base_dir, 'cnews.vocab.txt')
save_dir = 'checkpoints/textrnn'
save_path = os.path.join(save_dir, 'best_validation') # 最佳验证结果保存路径
def get_time_dif(start_time):
"""获取已使用时间"""
end_time = time.time()
time_dif = end_time - start_time
return timedelta(seconds=int(round(time_dif)))
def feed_data(x_batch, y_batch, keep_prob):
feed_dict = {
model.input_x: x_batch,
model.input_y: y_batch,
model.keep_prob: keep_prob
}
return feed_dict
def evaluate(sess, x_, y_):
"""评估在某一数据上的准确率和损失"""
data_len = len(x_)
batch_eval = batch_iter(x_, y_, 128)
total_loss = 0.0
total_acc = 0.0
for x_batch, y_batch in batch_eval:
batch_len = len(x_batch)
feed_dict = feed_data(x_batch, y_batch, 1.0)
loss, acc = sess.run([model.loss, model.acc], feed_dict=feed_dict)
total_loss += loss * batch_len
total_acc += acc * batch_len
return total_loss / data_len, total_acc / data_len
def train():
print("Configuring TensorBoard and Saver...")
# 配置 Tensorboard,重新训练时,请将tensorboard文件夹删除,不然图会覆盖
tensorboard_dir = 'tensorboard/textrnn'
if not os.path.exists(tensorboard_dir):
os.makedirs(tensorboard_dir)
tf.summary.scalar("loss", model.loss)
tf.summary.scalar("accuracy", model.acc)
merged_summary = tf.summary.merge_all()
writer = tf.summary.FileWriter(tensorboard_dir)
# 配置 Saver
saver = tf.train.Saver()
if not os.path.exists(save_dir):
os.makedirs(save_dir)
print("Loading training and validation data...")
# 载入训练集与验证集
start_time = time.time()
x_train, y_train = process_file(train_dir, word_to_id, cat_to_id, config.seq_length)
x_val, y_val = process_file(val_dir, word_to_id, cat_to_id, config.seq_length)
time_dif = get_time_dif(start_time)
print("Time usage:", time_dif)
# 创建session
session = tf.Session()
session.run(tf.global_variables_initializer())
writer.add_graph(session.graph)
print('Training and evaluating...')
start_time = time.time()
total_batch = 0 # 总批次
best_acc_val = 0.0 # 最佳验证集准确率
last_improved = 0 # 记录上一次提升批次
require_improvement = 1000 # 如果超过1000轮未提升,提前结束训练
flag = False
for epoch in range(config.num_epochs):
print('Epoch:', epoch + 1)
batch_train = batch_iter(x_train, y_train, config.batch_size)
for x_batch, y_batch in batch_train:
feed_dict = feed_data(x_batch, y_batch, config.dropout_keep_prob)
if total_batch % config.save_per_batch == 0:
# 每多少轮次将训练结果写入tensorboard scalar
s = session.run(merged_summary, feed_dict=feed_dict)
writer.add_summary(s, total_batch)
if total_batch % config.print_per_batch == 0:
# 每多少轮次输出在训练集和验证集上的性能
feed_dict[model.keep_prob] = 1.0
loss_train, acc_train = session.run([model.loss, model.acc], feed_dict=feed_dict)
loss_val, acc_val = evaluate(session, x_val, y_val) # todo
if acc_val > best_acc_val:
# 保存最好结果
best_acc_val = acc_val
last_improved = total_batch
saver.save(sess=session, save_path=save_path)
improved_str = '*'
else:
improved_str = ''
time_dif = get_time_dif(start_time)
msg = 'Iter: {0:>6}, Train Loss: {1:>6.2}, Train Acc: {2:>7.2%},' \
+ ' Val Loss: {3:>6.2}, Val Acc: {4:>7.2%}, Time: {5} {6}'
print(msg.format(total_batch, loss_train, acc_train, loss_val, acc_val, time_dif, improved_str))
feed_dict[model.keep_prob] = config.dropout_keep_prob
session.run(model.optim, feed_dict=feed_dict) # 运行优化
total_batch += 1
if total_batch - last_improved > require_improvement:
# 验证集正确率长期不提升,提前结束训练
print("No optimization for a long time, auto-stopping...")
flag = True
break # 跳出循环
if flag: # 同上
break
def test():
print("Loading test data...")
start_time = time.time()
x_test, y_test = process_file(test_dir, word_to_id, cat_to_id, config.seq_length)
session = tf.Session()
session.run(tf.global_variables_initializer())
saver = tf.train.Saver()
saver.restore(sess=session, save_path=save_path) # 读取保存的模型
print('Testing...')
loss_test, acc_test = evaluate(session, x_test, y_test)
msg = 'Test Loss: {0:>6.2}, Test Acc: {1:>7.2%}'
print(msg.format(loss_test, acc_test))
batch_size = 128
data_len = len(x_test)
num_batch = int((data_len - 1) / batch_size) + 1
y_test_cls = np.argmax(y_test, 1)
y_pred_cls = np.zeros(shape=len(x_test), dtype=np.int32) # 保存预测结果
for i in range(num_batch): # 逐批次处理
start_id = i * batch_size
end_id = min((i + 1) * batch_size, data_len)
feed_dict = {
model.input_x: x_test[start_id:end_id],
model.keep_prob: 1.0
}
y_pred_cls[start_id:end_id] = session.run(model.y_pred_cls, feed_dict=feed_dict)
# 评估
print("Precision, Recall and F1-Score...")
print(metrics.classification_report(y_test_cls, y_pred_cls, target_names=categories))
# 混淆矩阵
print("Confusion Matrix...")
cm = metrics.confusion_matrix(y_test_cls, y_pred_cls)
print(cm)
time_dif = get_time_dif(start_time)
print("Time usage:", time_dif)
if __name__ == '__main__':
if len(sys.argv) != 2 or sys.argv[1] not in ['train', 'test']:
raise ValueError("""usage: python run_rnn.py [train / test]""")
print('Configuring RNN model...')
config = TRNNConfig()
if not os.path.exists(vocab_dir): # 如果不存在词汇表,重建
build_vocab(train_dir, vocab_dir, config.vocab_size)
categories, cat_to_id = read_category()
words, word_to_id = read_vocab(vocab_dir)
config.vocab_size = len(words)
model = TextRNN(config)
if sys.argv[1] == 'train':
train()
else:
test()
| 6,675 | 32.21393 | 112 |
py
|
text-classification-cnn-rnn
|
text-classification-cnn-rnn-master/helper/cnews_group.py
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
"""
将文本整合到 train、test、val 三个文件中
"""
import os
def _read_file(filename):
"""读取一个文件并转换为一行"""
with open(filename, 'r', encoding='utf-8') as f:
return f.read().replace('\n', '').replace('\t', '').replace('\u3000', '')
def save_file(dirname):
"""
将多个文件整合并存到3个文件中
dirname: 原数据目录
文件内容格式: 类别\t内容
"""
f_train = open('data/cnews/cnews.train.txt', 'w', encoding='utf-8')
f_test = open('data/cnews/cnews.test.txt', 'w', encoding='utf-8')
f_val = open('data/cnews/cnews.val.txt', 'w', encoding='utf-8')
for category in os.listdir(dirname): # 分类目录
cat_dir = os.path.join(dirname, category)
if not os.path.isdir(cat_dir):
continue
files = os.listdir(cat_dir)
count = 0
for cur_file in files:
filename = os.path.join(cat_dir, cur_file)
content = _read_file(filename)
if count < 5000:
f_train.write(category + '\t' + content + '\n')
elif count < 6000:
f_test.write(category + '\t' + content + '\n')
else:
f_val.write(category + '\t' + content + '\n')
count += 1
print('Finished:', category)
f_train.close()
f_test.close()
f_val.close()
if __name__ == '__main__':
save_file('data/thucnews')
print(len(open('data/cnews/cnews.train.txt', 'r', encoding='utf-8').readlines()))
print(len(open('data/cnews/cnews.test.txt', 'r', encoding='utf-8').readlines()))
print(len(open('data/cnews/cnews.val.txt', 'r', encoding='utf-8').readlines()))
| 1,629 | 29.754717 | 85 |
py
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.