repo
stringlengths 2
99
| file
stringlengths 13
225
| code
stringlengths 0
18.3M
| file_length
int64 0
18.3M
| avg_line_length
float64 0
1.36M
| max_line_length
int64 0
4.26M
| extension_type
stringclasses 1
value |
---|---|---|---|---|---|---|
iharm3d
|
iharm3d-master/script/analysis/luminosity_study.py
|
################################################################################
# #
# LUMINOSITY COMPARISON #
# #
################################################################################
import os, sys
import pickle
import numpy as np
import matplotlib
matplotlib.use("Agg")
import matplotlib.pyplot as plt
import matplotlib.gridspec as gridspec
import hdf5_to_dict as io
import plot as bplt
from analysis_fns import *
from luminosity_th_study import overlay_rth_contours
USEARRSPACE=False
run_name = sys.argv[1]
if "SANE" in run_name:
SIZE = 50
AT_R = 40
else:
SIZE = 400
AT_R = 100
window=[0,SIZE/2,0,SIZE]
FIGX = 15
FIGY = 15
dumpfile = os.path.join("/scratch/03002/bprather/pharm_dumps/M87SimulationLibrary/GRMHD",run_name,"dumps/dump_00001500.h5")
hdr,geom,dump = io.load_all(dumpfile)
plotfile = os.path.join("/work/03002/bprather/stampede2/movies",run_name,"eht_out.p")
avg = pickle.load(open(plotfile, "rb"))
fig = plt.figure(figsize=(FIGX, FIGY))
gs = gridspec.GridSpec(2, 2, width_ratios=[1,2])
ax = plt.subplot(gs[0,0])
bplt.plot_xz(ax, geom, np.log10(d_fns['FE_EM'](dump)), arrayspace=USEARRSPACE, average=True, window=window)
ax.set_title(r"$\log_{10}( -{{T_{EM}}^r}_t )$")
bplt.overlay_contours(ax, geom, geom['r'], [AT_R], color='k')
overlay_rth_contours(ax, geom, avg, legend=True)
ax = plt.subplot(gs[1,0])
bplt.plot_xz(ax, geom, np.log10(d_fns['FE'](dump)), arrayspace=USEARRSPACE, average=True, window=window)
ax.set_title(r"$\log_{10}( -{T^r}_t - \rho u^r )$")
bplt.overlay_contours(ax, geom, geom['r'], [AT_R], color='k')
overlay_rth_contours(ax, geom, avg)
# I can rely on this for now
start = int(avg['avg_start'])//5
end = int(avg['avg_end'])//5
# Average over quiescence
mdav = np.mean(np.abs(avg['mdot'][start:end]))
ax = plt.subplot(gs[0,1])
ax.plot(avg['r'], avg['LBZ_bg1_r']/mdav, label=r"$L_{BZ}$ ($\beta\gamma > 1.0$ cut)", color='k')
ax.plot(avg['r'], avg['LBZ_sigma1_r']/mdav, label=r"$L_{BZ}$ ($\sigma$ > 1 cut)", color='xkcd:green')
ax.plot(avg['r'], avg['LBZ_allp_r']/mdav, label=r"$L_{BZ}$ (FE > 0 cut)", color='xkcd:pink')
ax.plot(avg['r'], avg['LBZ_Be_nob0_r']/mdav, label=r"$L_{BZ}$ ($Be > 0.02$ cut)", color='xkcd:red')
ax.plot(avg['r'], avg['LBZ_mu2_r']/mdav, label=r"$L_{BZ}$ ($\mu > 2$ cut)", color='xkcd:blue')
ax.set_title(r"$L_{BZ} / \dot{M} = \int -{{T_{EM}}^r}_t \sqrt{-g} dx^{\theta} dx^{\phi} / \dot{M}$")
ax.set_xlim([0,SIZE])
ax.set_xlabel("$r$ (M)")
ax.axvline(AT_R, color='k')
#maxes = [np.max(ab_av(avg['LBZ_'+tag+'_r'])[hdr['n1']//4:]) for tag in ['sigma1', 'be_nob1', 'be_nob0']]
#mins = [np.min(ab_av(avg['LBZ_'+tag+'_r'])[hdr['n1']//4:]) for tag in ['sigma1', 'be_nob1', 'be_nob0']]
#yhi = max(maxes); ylow = max(min(mins),1e-4*yhi)
#print(yhi, ylow)
#ax.set_ylim([ylow ,yhi])
if "SANE" in run_name:
ax.set_yscale('log')
ax.legend(loc='upper right')
ax = plt.subplot(gs[1,1])
ax.plot(avg['r'], avg['Lj_bg1_r']/mdav, label=r"$L_{j}$ ($\beta\gamma > 1.0$ cut)", color='k')
ax.plot(avg['r'], avg['Lj_sigma1_r']/mdav, label=r"$L_{j}$ ($\sigma$ > 1 cut)", color='xkcd:green')
ax.plot(avg['r'], avg['Lj_allp_r']/mdav, label=r"$L_{j}$ (FE > 0 cut)", color='xkcd:pink')
ax.plot(avg['r'], avg['Lj_Be_nob0_r']/mdav, label=r"$L_{j}$ ($Be > 0.02$ cut)", color='xkcd:red')
ax.plot(avg['r'], avg['Lj_mu2_r']/mdav, label=r"$L_{j}$ ($\mu > 2$ cut)", color='xkcd:blue')
ax.set_title(r"$L_{tot} / \dot{M} = \int (-{T^r}_t - \rho u^r) \sqrt{-g} dx^{\theta} dx^{\phi} / \dot{M}$")
ax.set_xlim([0,SIZE])
ax.set_xlabel("$r$ (M)")
ax.axvline(AT_R, color='k')
#maxes = [np.max(ab_av(avg['Ltot_'+tag+'_r'])[hdr['n1']//4:]) for tag in ['sigma1', 'be_nob1', 'be_nob0']]
#mins = [np.min(ab_av(avg['Ltot_'+tag+'_r'])[hdr['n1']//4:]) for tag in ['sigma1', 'be_nob1', 'be_nob0']]
#yhi = max(maxes); ylow = max(min(mins),1e-4*yhi)
#print(yhi, ylow)
#ax.set_ylim([ylow,yhi])
if "SANE" in run_name:
ax.set_yscale('log')
ax.legend(loc='lower right')
plt.tight_layout()
plt.savefig(run_name.replace("/","_")+"_L_study.png", dpi=100)
plt.close(fig)
| 4,229 | 36.433628 | 123 |
py
|
iharm3d
|
iharm3d-master/script/analysis/util.py
|
################################################################################
# #
# UTILITY FUNCTIONS #
# #
################################################################################
import subprocess
import glob
import os
import signal
import multiprocessing
import psutil
import numpy as np
# TODO fns to process argv
# Run a function in parallel with Python's multiprocessing
# 'function' must take only a number
def run_parallel(function, nmax, nthreads, debug=False):
# TODO if debug...
pool = multiprocessing.Pool(nthreads)
try:
pool.map_async(function, list(range(nmax))).get(720000)
except KeyboardInterrupt:
print('Caught interrupt!')
pool.terminate()
exit(1)
else:
pool.close()
pool.join()
# Run a function in parallel with Python's multiprocessing
# 'function' must take only a number
# 'merge_function' must take the same number plus whatever 'function' outputs, and adds to the dictionary out_dict
def iter_parallel(function, merge_function, out_dict, nmax, nthreads, debug=False):
# TODO if debug...
pool = multiprocessing.Pool(nthreads)
try:
# Map the above function to the dump numbers, returning an iterator of 'out' dicts to be merged one at a time
# This avoids keeping the (very large) full pre-average list in memory
out_iter = pool.imap(function, list(range(nmax)))
for n,result in enumerate(out_iter):
merge_function(n, result, out_dict)
except KeyboardInterrupt:
pool.terminate()
pool.join()
else:
pool.close()
pool.join()
# Calculate ideal # threads
# Lower pad values are safer
def calc_nthreads(hdr, n_mkl=8, pad=0.25):
# Limit threads for 192^3+ problem due to memory
# Try to add some parallelism w/MKL. Don't freak if it doesn't work
try:
import ctypes
mkl_rt = ctypes.CDLL('libmkl_rt.so')
mkl_set_num_threads = mkl_rt.MKL_Set_Num_Threads
mkl_get_max_threads = mkl_rt.MKL_Get_Max_Threads
mkl_set_num_threads(n_mkl)
print("Using {} MKL threads".format(mkl_get_max_threads()))
except Exception as e:
print(e)
# Roughly compute memory and leave some generous padding for multiple copies and Python games
# (N1*N2*N3*8)*(NPRIM + 4*4 + 6) = size of "dump," (N1*N2*N3*8)*(2*4*4 + 6) = size of "geom"
# TODO get a better model for this, and save memory in general
ncopies = hdr['n_prim'] + 4*4 + 6
nproc = int(pad * psutil.virtual_memory().total/(hdr['n1']*hdr['n2']*hdr['n3']*8*ncopies))
if nproc < 1: nproc = 1
if nproc > psutil.cpu_count(logical=False): nproc = psutil.cpu_count(logical=False)
print("Using {} Python processes".format(nproc))
return nproc
# COLORIZED OUTPUT
class color:
BOLD = '\033[1m'
WARNING = '\033[1;31m'
BLUE = '\033[94m'
NORMAL = '\033[0m'
def get_files(PATH, NAME):
return np.sort(glob.glob(os.path.join(PATH,'') + NAME))
# PRINT ERROR MESSAGE
def warn(mesg):
print((color.WARNING + "\n ERROR: " + color.NORMAL + mesg + "\n"))
# APPEND '/' TO PATH IF MISSING
def sanitize_path(path):
return os.path.join(path, '')
# SEND OUTPUT TO LOG FILE AS WELL AS TERMINAL
def log_output(sys, logfile_name):
import re
f = open(logfile_name, 'w')
class split(object):
def __init__(self, *files):
self.files = files
def write(self, obj):
n = 0
ansi_escape = re.compile(r'\x1b[^m]*m')
for f in self.files:
if n > 0:
f.write(ansi_escape.sub('', obj))
else:
f.write(obj)
f.flush()
n += 1
def flush(self):
for f in self.files:
f.flush()
sys.stdout = split(sys.stdout, f)
sys.stderr = split(sys.stderr, f)
# CREATE DIRECTORY
def make_dir(path):
if not os.path.exists(path):
os.makedirs(path)
# CALL rm -rf ON RELATIVE PATHS ONLY
def safe_remove(path):
import sys
from subprocess import call
# ONLY ALLOW RELATIVE PATHS
if path[0] == '/':
warn("DIRECTORY " + path + " IS NOT A RELATIVE PATH! DANGER OF DATA LOSS")
sys.exit()
elif os.path.exists(path):
call(['rm', '-rf', path])
| 4,311 | 30.474453 | 114 |
py
|
iharm3d
|
iharm3d-master/script/analysis/quickplot_thphi.py
|
################################################################################
# #
# PLOT ONE PRIMITIVE #
# #
################################################################################
import hdf5_to_dict as io
import plot as bplt
from analysis_fns import *
import matplotlib
import matplotlib.pyplot as plt
import sys
import numpy as np
USEARRSPACE=False
UNITS=False
FIGX = 12
FIGY = 12
# Decide where to measure fluxes
def i_of(rcoord):
i = 0
while geom['r'][i,hdr['n2']//2,0] < rcoord:
i += 1
i -= 1
return i
def overlay_thphi_contours(ax, geom, r):
s = "_" + str(r) + "_thphi"
r_i = i_of(r)
max_th = geom['n2']//2
x = bplt.loop_phi(geom['x'][r_i,:max_th,:])
y = bplt.loop_phi(geom['y'][r_i,:max_th,:])
prep = lambda var : bplt.loop_phi(var[r_i,:max_th,:])
#ax.contour(x,y, prep(dump['ucon']), [0.0], colors='k')
ax.contour(x,y, prep(dump['sigma']), [1.0], colors='xkcd:blue')
#ax.contour(x,y, prep(dump['sigma']), [10.0], colors='C3')
#ax.contour(x,y, prep(dump['Be_b']), [0.02], colors='C4')
#ax.contour(x,y, prep(dump['Be_b']), [1.0], colors='C5')
ax.contour(x,y, prep(dump['Be_nob']), [0.02], colors='xkcd:purple')
ax.contour(x,y, prep(dump['Be_nob']), [1.0], colors='xkcd:green')
#ax.contour(x,y, prep(geom['r']*dump['ucon'][:,:,:,1]), [1.0], color='C8')
#ax.contour(x,y, prep(dump['gamma']), [1.5], color='C9')
if len(sys.argv) > 2:
dumpfile = sys.argv[1]
gridfile = sys.argv[2]
elif len(sys.argv) > 1:
dumpfile = sys.argv[1]
gridfile = None
else:
print("Specify dump file!")
exit(-1)
if gridfile is not None:
hdr = io.load_hdr(dumpfile)
geom = io.load_geom(hdr, gridfile)
dump = io.load_dump(dumpfile, hdr, geom)
else:
# Assumes gridfile in same directory
hdr,geom,dump = io.load_all(dumpfile)
# BZ luminosity; see eht_analysis
if hdr['r_out'] < 100:
iBZ = i_of(40) # most SANEs
rstring="40"
else:
iBZ = i_of(100) # most MADs
rstring="100"
# Add bernoulli param to dump to plot/cut
dump['Be_b'] = bernoulli(dump, with_B=True)
dump['Be_nob'] = bernoulli(dump, with_B=False)
dump['sigma'] = dump['bsq']/dump['RHO']
fig, ax = plt.subplots(2,2,figsize=(FIGX, FIGY))
bplt.plot_thphi(ax[0,0], geom, T_mixed(dump, 1, 0)[iBZ,:,:], iBZ, label = "FE 2D Slice r="+rstring)
overlay_thphi_contours(ax[0,0], geom, 100)
bplt.plot_thphi(ax[0,1], geom, dump['RHO'][iBZ,:,:]*dump['ucon'][iBZ,:,:,1], iBZ, label = "FM 2D Slice r="+rstring)
overlay_thphi_contours(ax[0,1], geom, 100)
bplt.plot_thphi(ax[1,0], geom, T_mixed(dump, 1, 3)[iBZ,:,:], iBZ, label = "FL 2D Slice r="+rstring)
overlay_thphi_contours(ax[1,0], geom, 100)
bplt.plot_thphi(ax[1,1], geom, dump['RHO'][iBZ,:,:], iBZ, label = "RHO 2D Slice r="+rstring)
overlay_thphi_contours(ax[1,1], geom, 100)
plt.savefig("_".join(dumpfile.split("/")[-5:-2]) + '_L1_100_thphi.png')
plt.close(fig)
| 3,047 | 31.774194 | 115 |
py
|
iharm3d
|
iharm3d-master/script/analysis/eht_unify.py
|
#!/usr/bin/env python3
import os, sys
import pickle
import numpy as np
import hdf5_to_dict as io
avgs = []
for fname in sys.argv[1:-1]:
print("Loading {}".format(fname))
avgs.append(pickle.load(open(fname, "rb")))
avgs[-1]['fname'] = fname
#for avg in avgs:
# print("Name: {}, contents: {}".format(avg['fname'], avg.keys()))
num_keys = [len(avg.keys()) for avg in avgs]
avg_max_keys = num_keys.index(max(num_keys))
# TODO organize this damn dict. HDF5?
direct_list = ['fname', 'a', 'gam', 'gam_e', 'gam_p', 'r', 'th', 'th_eh', 'th_bz', 'phi', 'avg_start', 'avg_end', 'avg_w', 't']
keys_to_sum = [key for key in avgs[avg_max_keys].keys() if key not in direct_list]
uni = {}
for key in keys_to_sum:
uni[key] = np.zeros_like(avgs[avg_max_keys][key])
for avg in avgs:
if key in avg:
# Keep track of averages w/weights, otherwise just sum since everything's time-dependent
if (key[-2:] == '_r' or key[-3:] == '_th' or key[-4:] == '_hth' or key[-4:] == '_phi' or
key[-4:] == '_rth' or key[-6:] == '_thphi' or key[-5:] == '_rphi' or key[-4:] == '_pdf'):
uni[key] += avg[key]*avg['avg_w']
elif key[-1:] == 't':
if uni[key].shape[0] < avg[key].shape[0]:
uni[key] += avg[key][:uni[key].shape[0]]
else:
uni[key][:avg[key].shape[0]] += avg[key]
else:
if uni[key].size < avg[key].size:
uni[key] += avg[key][:uni[key].size]
else:
uni[key][:avg[key].size] += avg[key]
for key in direct_list:
if key in avgs[avg_max_keys].keys():
uni[key] = avgs[avg_max_keys][key]
# Add compat/completeness stuff
uni['mdot'] = uni['Mdot']
uni['phi_b'] = uni['Phi_b']/np.sqrt(uni['Mdot'])
# Add the log versions of variables, for completeness/better ffts
if os.path.exists(sys.argv[-1]):
uni['diags'] = io.load_log(sys.argv[-1])
with open("eht_out.p", "wb") as outf:
print("Writing eht_out.p")
pickle.dump(uni, outf)
| 1,937 | 31.3 | 127 |
py
|
iharm3d
|
iharm3d-master/script/analysis/coordinates.py
|
import numpy as np
from defs import Met
def coord_to_KS(X, mtype):
pass
def vec_to_KS(vec, X, mtype):
"""Translate a vector from """
return np.einsum("i...,ij...", vec, dxdX_KS_to(X, mtype))
def dxdX_to_KS(X, mtype, met_params, koral_rad=False):
"""Get transformation matrix to Kerr-Schild from several different coordinate systems.
X should be given in Kerr-Schild coordinates."""
# Play some index games to get the inverse from numpy
ks_t = np.einsum("ij...->...ij", dxdX_KS_to(X, mtype, met_params, koral_rad))
return np.einsum("...ij->ij...", np.linalg.inv(ks_t))
def dxdX_KS_to(X, mtype, met_params, koral_rad=False):
"""Get transformation to Kerr-Schild coordinates from another coordinate system.
X should be given in native coordinates"""
dxdX = np.zeros((4, 4, *X.shape[1:]))
dxdX[0,0] = 1 # We don't yet use anything _that_ exotic
if mtype == Met.MINKOWSKI:
# Handle Minkowski spacetime separately
raise ValueError("Cannot translate spacetimes!")
elif mtype == Met.MKS:
hslope = met_params['hslope']
dxdX[1, 1] = np.exp(X[1])
dxdX[2, 2] = np.pi - (hslope - 1.) * np.pi * np.cos(2. * np.pi * X[2])
dxdX[3, 3] = 1
elif mtype == Met.FMKS:
dxdX[1, 1] = np.exp(X[1])
hslope = met_params['hslope']
mks_smooth, poly_norm, poly_xt, poly_alpha = met_params['mks_smooth'], met_params['poly_norm'], met_params['poly_xt'], met_params['poly_alpha']
startx1 = met_params['startx1']
dxdX[2, 1] = -np.exp(mks_smooth * (startx1 - X[1])) * mks_smooth * (np.pi / 2. -
np.pi * X[2] + poly_norm * (
2. * X[2] - 1.) * (1 + (
np.power((-1. + 2 * X[2]) / poly_xt, poly_alpha)) / (1 + poly_alpha)) -
1. / 2. * (1. - hslope) * np.sin(
2. * np.pi * X[2]))
dxdX[2, 2] = np.pi + (1. - hslope) * np.pi * np.cos(2. * np.pi * X[2]) + np.exp(
mks_smooth * (startx1 - X[1])) * (-np.pi +
2. * poly_norm * (1. + np.power((2. * X[2] - 1.) / poly_xt,
poly_alpha) / (poly_alpha + 1.)) +
(2. * poly_alpha * poly_norm * (2. * X[2] - 1.) * np.power(
(2. * X[2] - 1.) / poly_xt, poly_alpha - 1.)) / (
(1. + poly_alpha) * poly_xt) -
(1. - hslope) * np.pi * np.cos(2. * np.pi * X[2]))
dxdX[3, 3] = 1.
elif mtype == Met.MKS3:
# TODO take these as params, bring this in line with above w.r.t function name
if koral_rad:
R0=-1.35; H0=0.7; MY1=0.002; MY2=0.02; MP0=1.3
else:
# MAD
#R0=0; H0=0.6; MY1=0.0025; MY2=0.025; MP0=1.2
#SANE
R0=-2; H0=0.6; MY1=0.0025; MY2=0.025; MP0=1.2
dxdX[1,1] = 1./(X[1] - R0)
dxdX[2, 1] = -((np.power(2, 1 + MP0) * np.power(X[1], -1 + MP0) * MP0 * (MY1 - MY2) * np.arctan(((-2 * X[2] + np.pi) * np.tan((H0 * np.pi) / 2.)) / np.pi)) /
(H0 * np.power(np.power(X[1], MP0) * (1 - 2 * MY1) + np.power(2, 1 + MP0) * (MY1 - MY2), 2) * np.pi))
dxdX[2, 2] = ( (-2 * np.power(X[1], MP0) * np.tan((H0 * np.pi) / 2.)) /
(H0 * (np.power(X[1], MP0) * (-1 + 2 * MY1) +
np.power(2, 1 + MP0) * (-MY1 + MY2)) * np.pi**2 * (1 + (np.power(-2 * X[2] + np.pi, 2) * np.power(np.tan((H0 * np.pi) / 2.), 2)) /
np.pi**2)))
dxdX[3,3] = 1.
elif mtype == Met.EKS:
dxdX[1,1] = 1. / X[1]
dxdX[2,2] = 1. / np.pi
dxdX[3,3] = 1.
else:
raise ValueError("Unsupported metric type {}!".format(mtype))
return dxdX
| 4,284 | 50.626506 | 166 |
py
|
iharm3d
|
iharm3d-master/script/analysis/plot.py
|
################################################################################
# #
# UTILITIES FOR PLOTTING #
# #
################################################################################
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
from mpl_toolkits.axes_grid1 import make_axes_locatable
import numpy as np
from scipy.integrate import trapz
from analysis_fns import *
# Get xz slice of 3D data
def flatten_xz(array, patch_pole=False, average=False):
if array.ndim == 2:
N1 = array.shape[0]
N2 = array.shape[1]
flat = np.zeros([2*N1,N2])
for i in range(N1):
flat[i,:] = array[N1 - 1 - i,:]
flat[i+N1,:] = array[i,:]
return flat
N1 = array.shape[0]; N2 = array.shape[1]; N3 = array.shape[2]
flat = np.zeros([2*N1,N2])
if average:
for i in range(N1):
# Produce identical hemispheres to get the right size output
flat[i,:] = np.mean(array[N1 - 1 - i,:,:], axis=-1)
flat[i+N1,:] = np.mean(array[i,:,:], axis=-1)
else:
for i in range(N1):
flat[i,:] = array[N1 - 1 - i,:,N3//2]
flat[i+N1,:] = array[i,:,0]
# Theta is technically [small,pi/2-small]
# This patches the X coord so the plot looks nice
if patch_pole:
flat[:,0] = 0
flat[:,-1] = 0
return flat
# Get xy slice of 3D data
def flatten_xy(array, average=False, loop=True):
if array.ndim == 2:
return array
if average:
slice = np.mean(array, axis=1)
else:
slice = array[:,array.shape[1]//2,:]
loop = False
if loop:
return loop_phi(slice)
else:
return slice
def loop_phi(array):
return np.vstack((array.transpose(),array.transpose()[0])).transpose()
# Plotting fns: pass dump file and var as either string (key) or ndarray
# Note integrate option overrides average
# Also note label convention:
# * "known labels" are assigned true or false,
# * "unknown labels" are assigned None or a string
# TODO pass through kwargs instead of all this duplication
def plot_xz(ax, geom, var, cmap='jet', vmin=None, vmax=None, window=[-40,40,-40,40],
cbar=True, cbar_ticks=None, label=None, xlabel=True, ylabel=True, xticks=True, yticks=True,
arrayspace=False, average=False, integrate=False, bh=True, half_cut=False, shading='gouraud'):
if integrate:
var *= geom['n3']
average = True
if (arrayspace):
x1_norm = (geom['X1'] - geom['startx1']) / (geom['n1'] * geom['dx1'])
x2_norm = (geom['X2'] - geom['startx2']) / (geom['n2'] * geom['dx2'])
x = flatten_xz(x1_norm)[geom['n1']:,:]
z = flatten_xz(x2_norm)[geom['n1']:,:]
if geom['n3'] > 1:
var = flatten_xz(var, average=average)[geom['n1']:,:]
else:
var = var[:,:,0]
else:
if half_cut:
x = flatten_xz(geom['x'], patch_pole=True)[geom['n1']:,:]
z = flatten_xz(geom['z'])[geom['n1']:,:]
var = flatten_xz(var, average=average)[geom['n1']:,:]
window[0] = 0
else:
x = flatten_xz(geom['x'], patch_pole=True)
z = flatten_xz(geom['z'])
var = flatten_xz(var, average=average)
#print 'xshape is ', x.shape, ', zshape is ', z.shape, ', varshape is ', var.shape
mesh = ax.pcolormesh(x, z, var, cmap=cmap, vmin=vmin, vmax=vmax,
shading=shading)
if arrayspace:
if xlabel: ax.set_xlabel("X1 (arbitrary)")
if ylabel: ax.set_ylabel("X2 (arbitrary)")
ax.set_xlim([0, 1]); ax.set_ylim([0, 1])
else:
if xlabel: ax.set_xlabel(r"$x \frac{c^2}{G M}$")
if ylabel: ax.set_ylabel(r"$z \frac{c^2}{G M}$")
if window:
ax.set_xlim(window[:2]); ax.set_ylim(window[2:])
if bh:
# BH silhouette
circle1=plt.Circle((0,0), geom['r_eh'], color='k');
ax.add_artist(circle1)
if not half_cut:
ax.set_aspect('equal')
if cbar:
divider = make_axes_locatable(ax)
cax = divider.append_axes("right", size="5%", pad=0.05)
plt.colorbar(mesh, cax=cax, ticks=cbar_ticks)
if not xticks:
plt.gca().set_xticks([])
plt.xticks([])
ax.set_xticks([])
if not yticks:
plt.gca().set_yticks([])
plt.yticks([])
ax.set_yticks([])
if not xticks and not yticks:
# Remove the whole frame for good measure
#fig.patch.set_visible(False)
ax.axis('off')
if label is not None:
ax.set_title(label)
def plot_xy(ax, geom, var, cmap='jet', vmin=None, vmax=None, window=[-40,40,-40,40],
cbar=True, label=None, xlabel=True, ylabel=True, xticks=True, yticks=True,
ticks=None, arrayspace=False, average=False, integrate=False, bh=True, shading='gouraud'):
if integrate:
var *= geom['n2']
average = True
if arrayspace:
# Flatten_xy adds a rank. TODO is this the way to handle it?
x1_norm = (geom['X1'] - geom['startx1']) / (geom['n1'] * geom['dx1'])
x3_norm = (geom['X3'] - geom['startx3']) / (geom['n3'] * geom['dx3'])
x = flatten_xy(x1_norm, loop=False)
y = flatten_xy(x3_norm, loop=False)
var = flatten_xy(var, average=average, loop=False)
else:
x = flatten_xy(geom['x'])
y = flatten_xy(geom['y'])
var = flatten_xy(var, average=average)
#print 'xshape is ', x.shape, ', yshape is ', y.shape, ', varshape is ', var.shape
mesh = ax.pcolormesh(x, y, var, cmap=cmap, vmin=vmin, vmax=vmax,
shading=shading)
if arrayspace:
if xlabel: ax.set_xlabel("X1 (arbitrary)")
if ylabel: ax.set_ylabel("X3 (arbitrary)")
ax.set_xlim([0, 1]); ax.set_ylim([0, 1])
else:
if xlabel: ax.set_xlabel(r"$x \frac{c^2}{G M}$")
if ylabel: ax.set_ylabel(r"$y \frac{c^2}{G M}$")
if window:
ax.set_xlim(window[:2]); ax.set_ylim(window[2:])
if bh:
# BH silhouette
circle1=plt.Circle((0,0), geom['r_eh'], color='k');
ax.add_artist(circle1)
ax.set_aspect('equal')
if cbar:
divider = make_axes_locatable(ax)
cax = divider.append_axes("right", size="5%", pad=0.05)
plt.colorbar(mesh, cax=cax, ticks=ticks)
if not xticks:
plt.gca().set_xticks([])
plt.xticks([])
ax.set_xticks([])
if not yticks:
plt.gca().set_yticks([])
plt.yticks([])
ax.set_yticks([])
if not xticks and not yticks:
# Remove the whole frame for good measure
#fig.patch.set_visible(False)
ax.axis('off')
if label:
ax.set_title(label)
# TODO this is currently just for profiles already in 2D
def plot_thphi(ax, geom, var, r_i, cmap='jet', vmin=None, vmax=None, window=None,
cbar=True, label=None, xlabel=True, ylabel=True, arrayspace=False,
ticks=None, project=False, shading='gouraud'):
if arrayspace:
# X3-X2 makes way more sense than X2-X3 since the disk is horizontal
x = (geom['X3'][r_i] - geom['startx3']) / (geom['n3'] * geom['dx3'])
y = (geom['X2'][r_i] - geom['startx2']) / (geom['n2'] * geom['dx2'])
else:
radius = geom['r'][r_i,0,0]
max_th = geom['n2']//2
if project:
x = loop_phi((geom['th']*np.cos(geom['phi']))[r_i,:max_th,:])
y = loop_phi((geom['th']*np.sin(geom['phi']))[r_i,:max_th,:])
else:
x = loop_phi(geom['x'][r_i,:max_th,:])
y = loop_phi(geom['y'][r_i,:max_th,:])
var = loop_phi(var[:max_th,:])
if window is None:
if arrayspace:
ax.set_xlim([0, 1]); ax.set_ylim([0, 1])
elif project:
window = [-1.6, 1.6, -1.6, 1.6]
else:
window = [-radius, radius, -radius, radius]
else:
ax.set_xlim(window[:2]); ax.set_ylim(window[2:])
#print 'xshape is ', x.shape, ', yshape is ', y.shape, ', varshape is ', var.shape
mesh = ax.pcolormesh(x, y, var, cmap=cmap, vmin=vmin, vmax=vmax,
shading=shading)
if arrayspace:
if xlabel: ax.set_xlabel("X3 (arbitrary)")
if ylabel: ax.set_ylabel("X2 (arbitrary)")
else:
if xlabel: ax.set_xlabel(r"$x \frac{c^2}{G M}$")
if ylabel: ax.set_ylabel(r"$y \frac{c^2}{G M}$")
ax.set_aspect('equal')
if cbar:
divider = make_axes_locatable(ax)
cax = divider.append_axes("right", size="5%", pad=0.05)
plt.colorbar(mesh, cax=cax, ticks=ticks)
if label:
ax.set_title(label)
def overlay_contours(ax, geom, var, levels, color='k'):
x = flatten_xz(geom['x'])
z = flatten_xz(geom['z'])
var = flatten_xz(var, average=True)
return ax.contour(x, z, var, levels=levels, colors=color)
def overlay_field(ax, geom, dump, **kwargs):
overlay_flowlines(ax, geom, dump['B1'], dump['B2'], **kwargs)
def overlay_flowlines(ax, geom, varx1, varx2, nlines=50, arrayspace=False, reverse=False):
N1 = geom['n1']; N2 = geom['n2']
if arrayspace:
x1_norm = (geom['X1'] - geom['startx1']) / (geom['n1'] * geom['dx1'])
x2_norm = (geom['X2'] - geom['startx2']) / (geom['n2'] * geom['dx2'])
x = flatten_xz(x1_norm)[geom['n1']:,:]
z = flatten_xz(x2_norm)[geom['n1']:,:]
else:
x = flatten_xz(geom['x'])
z = flatten_xz(geom['z'])
varx1 = varx1.mean(axis=-1)
varx2 = varx2.mean(axis=-1)
AJ_phi = np.zeros([2*N1, N2])
gdet = geom['gdet']
for j in range(N2):
for i in range(N1):
if not reverse:
AJ_phi[N1-1-i,j] = AJ_phi[i+N1,j] = (
trapz(gdet[:i,j]*varx2[:i,j], dx=geom['dx1']) -
trapz(gdet[i,:j]*varx1[i,:j], dx=geom['dx2']))
else:
AJ_phi[N1-1-i,j] = AJ_phi[i+N1,j] = (
trapz(gdet[:i,j]*varx2[:i,j], dx=geom['dx1']) +
trapz(gdet[i,j:]*varx1[i,j:], dx=geom['dx2']))
AJ_phi -= AJ_phi.min()
levels = np.linspace(0,AJ_phi.max(),nlines*2)
if arrayspace:
ax.contour(x, z, AJ_phi[N1:,:], levels=levels, colors='k')
else:
ax.contour(x, z, AJ_phi, levels=levels, colors='k')
def overlay_quiver(ax, geom, dump, JE1, JE2, cadence=64, norm=1):
JE1 *= geom['gdet']
JE2 *= geom['gdet']
max_J = np.max(np.sqrt(JE1**2 + JE2**2))
x1_norm = (geom['X1'] - geom['startx1']) / (geom['n1'] * geom['dx1'])
x2_norm = (geom['X2'] - geom['startx2']) / (geom['n2'] * geom['dx2'])
x = flatten_xz(x1_norm)[geom['n1']:,:]
z = flatten_xz(x2_norm)[geom['n1']:,:]
s1 = geom['n1']//cadence; s2 = geom['n2']//cadence
ax.quiver(x[::s1,::s2], z[::s1,::s2], JE1[::s1,::s2], JE2[::s1,::s2],
units='xy', angles='xy', scale_units='xy', scale=cadence*max_J/norm)
# Plot two slices together without duplicating everything in the caller
def plot_slices(ax1, ax2, geom, dump, var, field_overlay=True, nlines=10, **kwargs):
if 'arrspace' in list(kwargs.keys()):
arrspace = kwargs['arrspace']
else:
arrspace = False
plot_xz(ax1, geom, var, **kwargs)
if field_overlay:
overlay_field(ax1, geom, dump, nlines=nlines, arrayspace=arrspace)
plot_xy(ax2, geom, var, **kwargs)
# TODO Consistent idea of plane/average in x2,x3
def radial_plot(ax, geom, var, n2=0, n3=0, average=False,
logr=False, logy=False, rlim=None, ylim=None, arrayspace=False,
ylabel=None, style='k-'):
r = geom['r'][:, geom['n2']//2, 0]
if var.ndim == 1:
data = var
elif var.ndim == 2:
data = var[:,n2]
elif var.ndim == 3:
if average:
data = np.mean(var[:,n2,:], axis=-1)
else:
data = var[:,n2,n3]
if arrayspace:
ax.plot(list(range(geom['n1'])), data, style)
else:
ax.plot(r,data, style)
if logr: ax.set_xscale('log')
if logy: ax.set_yscale('log')
if rlim: ax.set_xlim(rlim)
if ylim: ax.set_ylim(ylim)
ax.set_xlabel(r"$r \frac{c^2}{G M}$")
if ylabel is not None: ax.set_ylabel(ylabel)
def diag_plot(ax, diag, varname, t=0, ylabel=None, ylim=None, logy=False, xlabel=True, style='k-'):
var = diag[varname]
ax.plot(diag['t'], var, style)
ax.set_xlim([diag['t'][0], diag['t'][-1]])
# Trace current t on finished plot
if t != 0:
ax.axvline(t, color='r')
if ylim is not None: ax.set_ylim(ylim)
if logy: ax.set_yscale('log')
if xlabel:
ax.set_xlabel(r"$t \frac{c^3}{G M}$")
if ylabel is not None:
ax.set_ylabel(ylabel)
else:
ax.set_ylabel(varname)
def hist_2d(ax, var_x, var_y, xlbl, ylbl, title=None, logcolor=False, bins=40, cbar=True, cmap='jet', ticks=None):
# Courtesy of George Wong
var_x_flat = var_x.flatten()
var_y_flat = var_y.flatten()
nidx = np.isfinite(var_x_flat) & np.isfinite(var_y_flat)
hist = np.histogram2d(var_x_flat[nidx], var_y_flat[nidx], bins=bins)
X,Y = np.meshgrid(hist[1], hist[2])
if logcolor:
hist[0][hist[0] == 0] = np.min(hist[0][np.nonzero(hist[0])])
mesh = ax.pcolormesh(X, Y, np.log10(hist[0]), cmap=cmap)
else:
mesh = ax.pcolormesh(X, Y, hist[0], cmap=cmap)
# Add the patented Ben Ryan colorbar
if cbar:
divider = make_axes_locatable(ax)
cax = divider.append_axes("right", size="5%", pad=0.05)
plt.colorbar(mesh, cax=cax, ticks=ticks)
if title is not None: ax.set_title(title)
ax.set_xlabel(xlbl)
ax.set_ylabel(ylbl)
| 12,865 | 31.004975 | 114 |
py
|
iharm3d
|
iharm3d-master/script/analysis/initial_cuts.py
|
## Initial conditions cuts
from __future__ import print_function, division
import hdf5_to_dict as io
import sys
import numpy as np
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
from pylab import *
COMPARE = False
dump_dir = sys.argv[1]
init_file = io.get_dumps_list(dump_dir)[0]
hdr, geom, dump = io.load_all(init_file, extras=False)
N2 = hdr['n2']
r = geom['r'][:, N2//2, 0]
rho = dump['RHO'][:, N2//2, 0]
uu = dump['UU'][:, N2//2, 0]
p = (hdr['gam']-1)*uu
b2 = dump['bsq'][:, N2//2, 0]
beta = dump['beta'][:, N2//2, 0]
gamma = dump['gamma'][:, N2//2, 0]
figname = 'initial-cuts.pdf'
if COMPARE:
tablename = 'initial-cuts.csv'
data=loadtxt('torus_cuts.csv')
#data1=loadtxt('data_d2_x+0.16D+01_n0000.csv',skiprows=1,delimiter=',')
r_=0
rho_=1
p_=2
lfac_=4
b2_=3
def betainv(data):
return data[:,b2_]/2./data[:,p_]
f, all_axes = plt.subplots(2, 3, sharex='col')
((ax1, ax2, ax3), (ax4, ax5, ax6)) = all_axes
f.subplots_adjust(wspace=.5)
f.set_size_inches(10,4)
if COMPARE:
ax1.plot(data[:,r_],data[:,rho_],'r-')
ax2.plot(data[:,r_],data[:,p_],'r-')
ax3.plot(data[:,r_],sqrt(data[:,b2_]),'r-')
ax4.plot(data[:,r_],betainv(data),'r-')
ax5.plot(data[:,r_],data[:,lfac_],'r-')
ax6.plot(data[:,r_],data[:,p_]+data[:,b2_]/2.,'r-')
ax1.plot(r,rho,'b')
ax1.set_ylabel(r'$\rho$')
ax1.set_ylim(1e-8,1)
ax2.plot(r,p,'b')
ax2.set_ylabel(r'$P_{\rm gas}$')
ax2.set_ylim(1e-12,0.2)
ax3.plot(r,sqrt(b2),'b')
ax3.set_ylabel(r'$\sqrt{b_\mu b^\mu}$')
ax3.set_ylim(1.e-4,1.e-2)
ax4.plot(r,1/beta,'b')
ax4.set_ylabel(r'$\beta^{-1}$')
ax4.set_xlabel(r'$r_{\rm KS} [GM/c^2]$')
ax4.set_ylim(1.e-7,1.e-1)
ax5.plot(r,gamma,'b')
ax5.set_ylabel(r'$\Gamma$')
ax5.set_xlabel(r'$r_{\rm KS} [GM/c^2]$')
ax5.set_ylim(0.98,1.25)
ax6.plot(r,(p + b2/2.),'b')
ax6.set_ylabel(r'$P_{\rm gas}+P_{\rm mag}$')
ax6.set_xlabel(r'$r_{\rm KS} [GM/c^2]$')
ax6.set_ylim(1e-12,0.01)
for ax in all_axes.flatten():
ax.grid(True)
ax.set_yscale('log')
ax.set_xlim(2,50)
f.savefig(figname,bbox_inches='tight')
close()
#ascii.write(data[:,[r_,rho_,p_,lfac_,b2_]],tablename,delimiter=',',names=['r','rho','p','lfac','balphabalpha'])
| 2,156 | 21.705263 | 112 |
py
|
iharm3d
|
iharm3d-master/script/analysis/hdf5_to_dict.py
|
################################################################################
# #
# READ HARM OUTPUT #
# #
################################################################################
from __future__ import print_function, division
import os, sys
from pkg_resources import parse_version
import numpy as np
import h5py
import glob
import units
from analysis_fns import *
# New infra
from defs import Loci, Met
from coordinates import dxdX_to_KS, dxdX_KS_to
class HARMdump(object):
def __init__(self, dfname):
self.dfile = h5py.File(dfname)
def __getitem__(self, name):
return d_fns[name](self.dfile)
def __del__(self):
self.dfile.close()
def get_dumps_list(path):
# Funny how many names output has
files_harm = [file for file in glob.glob(os.path.join(path,"*dump*.h5"))]
files_koral = [file for file in glob.glob(os.path.join(path,"*sim*.h5"))]
files_bhac = [file for file in glob.glob(os.path.join(path,"*data*.h5"))]
return np.sort(files_harm + files_koral + files_bhac)
def get_full_dumps_list(path):
alldumps = get_dumps_list(path)
fulldumps = []
for fname in alldumps:
dfile = h5py.File(fname, 'r')
if dfile['is_full_dump'][()] == 1:
fulldumps.append(fname)
dfile.close()
return np.sort(fulldumps)
# For single plotting scripts
def load_all(fname, **kwargs):
hdr = load_hdr(fname)
path = os.path.dirname(fname)
geom = load_geom(hdr, path)
dump = load_dump(fname, hdr, geom, **kwargs)
return hdr, geom, dump
# For cutting on time without loading everything
def get_dump_time(fname):
dfile = h5py.File(fname, 'r')
if 't' in dfile.keys():
t = dfile['t'][()]
else:
t = 0
dfile.close()
return t
# Function to recursively un-bytes all the dumb HDF5 strings
def decode_all(dict):
for key in dict:
# Decode bytes
if type(dict[key]) == np.bytes_:
dict[key] = dict[key].decode('UTF-8')
# Split ndarray of bytes into list of strings
elif type(dict[key]) == np.ndarray:
if dict[key].dtype.kind == 'S':
dict[key] = [el.decode('UTF-8') for el in dict[key]]
# Recurse for any subfolders
elif type(dict[key]) in [list, dict]:
decode_all(dict[key])
def load_hdr(fname):
dfile = h5py.File(fname, 'r')
hdr = {}
try:
# Scoop all the keys that are not folders
for key in [key for key in list(dfile['header'].keys()) if not (key == 'geom' or key == 'problem')]:
hdr[key] = dfile['header/' + key][()]
for key in [key for key in list(dfile['header/problem'].keys())]:
hdr[key] = dfile['header/problem/'+key][()]
# TODO load these from grid.h5? Or is the header actually the place for them?
for key in [key for key in list(dfile['header/geom'].keys()) if not key in ['mks', 'mmks', 'mks3'] ]:
hdr[key] = dfile['header/geom/' + key][()]
# TODO there must be a shorter/more compat way to do the following
if 'mks' in list(dfile['header/geom'].keys()):
for key in dfile['header/geom/mks']:
hdr[key] = dfile['header/geom/mks/' + key][()]
if 'mmks' in list(dfile['header/geom'].keys()):
for key in dfile['header/geom/mmks']:
hdr[key] = dfile['header/geom/mmks/' + key][()]
if 'mks3' in list(dfile['header/geom'].keys()):
for key in dfile['header/geom/mks3']:
hdr[key] = dfile['header/geom/mks3/' + key][()]
except KeyError as e:
util.warn("File is older than supported by this library. Use hdf5_to_dict_old.py")
exit(-1)
decode_all(hdr)
# Turn the version string into components
if 'version' not in hdr.keys():
hdr['version'] = "iharm-alpha-3.6"
print("Unknown version: defaulting to {}".format(hdr['version']))
hdr['codename'], hdr['codestatus'], hdr['vnum'] = hdr['version'].split("-")
hdr['vnum'] = [int(x) for x in hdr['vnum'].split(".")]
# HARM-specific workarounds:
if hdr['codename'] == "iharm":
# Work around naming bug before output v3.4
if hdr['vnum'] < [3,4]:
names = []
for name in hdr['prim_names'][0]:
names.append( name )
hdr['prim_names'] = names
# Work around bad radius names before output v3.6
if ('r_in' not in hdr) and ('Rin' in hdr):
hdr['r_in'], hdr['r_out'] = hdr['Rin'], hdr['Rout']
# Grab the git revision if that's something we output
if 'extras' in dfile.keys() and 'git_version' in dfile['extras'].keys():
hdr['git_version'] = dfile['/extras/git_version'][()].decode('UTF-8')
dfile.close()
# Patch things that sometimes people forget to put in the header
if 'n_dim' not in hdr:
hdr['n_dim'] = 4
if 'prim_names' not in hdr:
if hdr['n_prim'] == 10:
hdr['prim_names'] = ["RHO", "UU", "U1", "U2", "U3", "B1", "B2", "B3", "KEL", "KTOT"]
else:
hdr['prim_names'] = ["RHO", "UU", "U1", "U2", "U3", "B1", "B2", "B3"]
if 'has_electrons' not in hdr:
if hdr['n_prim'] == 10:
hdr['has_electrons'] = True
else:
hdr['has_electrons'] = False
# TODO this is KS-specific
if 'r_eh' not in hdr and hdr['metric'] != "MINKOWSKI":
hdr['r_eh'] = (1. + np.sqrt(1. - hdr['a']**2))
if 'poly_norm' not in hdr and hdr['metric'] == "MMKS":
hdr['poly_norm'] = 0.5 * np.pi * 1. / (1. + 1. / (hdr['poly_alpha'] + 1.) *
1. / np.power(hdr['poly_xt'], hdr['poly_alpha']))
if 'git_version' in hdr:
print("Loaded header from code {}, git rev {}".format(hdr['version'], hdr['git_version']))
else:
print("Loaded header from code {}".format(hdr['version']))
return hdr
def load_geom(hdr, path):
# Allow override by making path a filename
if ".h5" in path:
fname = path
else:
# Otherwise use encoded or default info
if 'gridfile' in hdr:
fname = os.path.join(path, hdr['gridfile'])
else:
fname = os.path.join(path, "grid.h5")
gfile = h5py.File(fname, 'r')
geom = {}
for key in list(gfile['/'].keys()):
geom[key] = gfile[key][()]
# Useful stuff for direct access in geom. TODO r_isco if available
for key in ['n1', 'n2', 'n3', 'dx1', 'dx2', 'dx3', 'startx1', 'startx2', 'startx3', 'n_dim', 'metric']:
geom[key] = hdr[key]
if hdr['metric'] in ["MKS", "MMKS", "FMKS"]:
for key in ['r_eh', 'r_in', 'r_out', 'a', 'hslope']:
geom[key] = hdr[key]
if hdr['metric'] == "MMKS": # TODO standardize names !!!
for key in ['poly_norm', 'poly_alpha', 'poly_xt', 'mks_smooth']:
geom[key] = hdr[key]
elif hdr['metric'] in ["MKS3"]:
for key in ['r_eh']:
geom[key] = hdr[key]
geom['r_out'] = geom['r'][-1,hdr['n2']//2,0]
# these get used interchangeably and I don't care
geom['x'] = geom['X']
geom['y'] = geom['Y']
geom['z'] = geom['Z']
if 'phi' not in geom and hdr['metric'] in ["MKS", "MMKS", "FMKS", "MKS3"]:
geom['phi'] = geom['X3']
# Sometimes the vectors and zones use different coordinate systems
# TODO allow specifying both systems
if 'gdet_zone' in geom:
# Preserve
geom['gcon_vec'] = geom['gcon']
geom['gcov_vec'] = geom['gcov']
geom['gdet_vec'] = geom['gdet']
geom['lapse_vec'] = geom['lapse']
# But default to the grid metric. Lots of integrals and later manipulation with this
geom['gcon'] = geom.pop('gcon_zone',None)
geom['gcov'] = geom.pop('gcov_zone',None)
geom['gdet'] = geom.pop('gdet_zone',None)
geom['lapse'] = geom.pop('lapse_zone',None)
geom['mixed_metrics'] = True
else:
geom['mixed_metrics'] = False
# Compress geom in phi for normal use
for key in ['gdet', 'lapse', 'gdet_vec', 'lapse_vec']:
if key in geom:
geom[key] = geom[key][:,:,0]
for key in ['gcon', 'gcov', 'gcon_vec', 'gcov_vec']:
if key in geom:
geom[key] = geom[key][:,:,0,:,:]
if geom['mixed_metrics']:
# Get all Kerr-Schild coordinates for generating transformation matrices
Xgeom = np.zeros((4,geom['n1'],geom['n2']))
Xgeom[1] = geom['r'][:,:,0]
Xgeom[2] = geom['th'][:,:,0]
# TODO add all metric params to the geom dict
eks2ks = dxdX_to_KS(Xgeom, Met.EKS, hdr, koral_rad=hdr['has_electrons'])
ks2mks3 = dxdX_KS_to(Xgeom, Met[geom['metric']], hdr, koral_rad=hdr['has_electrons'])
print("Will convert vectors in EKS to zone metric {}".format(geom['metric']))
geom['vec_to_grid'] = np.einsum("ij...,jk...->...ik", eks2ks, ks2mks3)
return geom
def load_dump(fname, hdr, geom, derived_vars=True, extras=True):
dfile = h5py.File(fname, 'r')
dump = {}
# Carry pointers to header. Saves some pain getting shapes/parameters for plots
# Geometry, however, _must be carried separately_ due to size in memory
dump['hdr'] = hdr
# TODO this necessarily grabs the /whole/ primitives array
for key in [key for key in list(dfile['/'].keys()) if key not in ['header', 'extras', 'prims'] ]:
dump[key] = dfile[key][()]
# TODO should probably error at this one
if 't' not in dump:
dump['t'] = 0.
for name, num in zip(hdr['prim_names'], list(range(hdr['n_prim']))):
dump[name] = dfile['prims'][:,:,:,num]
if extras and 'extras' in dfile.keys():
# Load the extras.
for key in list(dfile['extras'].keys()):
dump[key] = dfile['extras/' + key][()]
dfile.close()
# Recalculate all the derived variables, if we need to
if derived_vars:
dump['ucon'], dump['ucov'], dump['bcon'], dump['bcov'] = get_state(hdr, geom, dump)
dump['bsq'] = (dump['bcon']*dump['bcov']).sum(axis=-1)
dump['beta'] = 2.*(hdr['gam']-1.)*dump['UU']/(dump['bsq'])
if hdr['has_electrons']:
ref = units.get_cgs()
dump['Thetae'] = ref['MP']/ref['ME']*dump['KEL']*dump['RHO']**(hdr['gam_e']-1.)
dump['ue'] = dump['KEL']*dump['RHO']**(hdr['gam_e']) / (hdr['gam_e']-1.)
dump['up'] = dump['UU'] - dump['ue']
dump['TpTe'] = (hdr['gam_p']-1.)*dump['up']/((hdr['gam_e']-1.)*dump['ue'])
return dump
def load_log(path):
# TODO specify log name in dumps, like grid
logfname = os.path.join(path,"log.out")
if not os.path.exists(logfname):
return None
dfile = np.loadtxt(logfname).transpose()
# TODO log should probably have a header
diag = {}
diag['t'] = dfile[0]
diag['rmed'] = dfile[1]
diag['pp'] = dfile[2]
diag['e'] = dfile[3]
diag['uu_rho_gam_cent'] = dfile[4]
diag['uu_cent'] = dfile[5]
diag['mdot'] = dfile[6]
diag['edot'] = dfile[7]
diag['ldot'] = dfile[8]
diag['mass'] = dfile[9]
diag['egas'] = dfile[10]
diag['Phi'] = dfile[11]
diag['phi'] = dfile[12]
diag['jet_EM_flux'] = dfile[13]
diag['divbmax'] = dfile[14]
diag['lum_eht'] = dfile[15]
diag['mdot_eh'] = dfile[16]
diag['edot_eh'] = dfile[17]
diag['ldot_eh'] = dfile[18]
return diag
# For adding contents of the log to dumps
def log_time(diag, var, t):
if len(diag['t'].shape) < 1:
return diag[var]
else:
i = 0
while i < len(diag['t']) and diag['t'][i] < t:
i += 1
return diag[var][i-1]
| 11,123 | 32.709091 | 105 |
py
|
iharm3d
|
iharm3d-master/script/analysis/eht_image_analysis.py
|
################################################################################
# #
# CALCULATE TIME-AVERAGED QUANTITIES FROM IPOLE IMAGES #
# #
################################################################################
from __future__ import print_function, division
from analysis_fns import *
import os, glob
import util
import sys
import pickle
import psutil,multiprocessing
import numpy as np
impath = sys.argv[1]
debug = 0
# M87 parameters
Msun = 1.989e33
M = 6.2e9*Msun
G = 6.67428e-8
c = 2.99792e10
pc = 3.08568e18
d = 16.9*1.e6*pc
# size of single pixel in rad: M/pixel . muas/pix . rad/muas
muas_per_M = G*M/(c*c*d) * 1.e6 * 206264.8
M_per_muas = 1./muas_per_M
# pixel size in radians
da = 1. / (1.e6 * 206265.)
# solid angle subtended by pixel
dO = da*da
Jy = 1.e-23 # cgs
# Shamelessly stolen from CFG's 'ipole_plot.py'
# TODO new ipole format too
files = np.sort(glob.glob(os.path.join(impath,"*.dat")))
foldername = os.path.basename(impath)
# Image names store a bunch of info we want to keep around
# FORM: image_a+0.94_1000_163_0_230.e9_6.2e9_7.791e+24_10.dat
# Or something like it...
# Hackish heuristic detection follows, not for the squeamish
def parse_params(fname):
fname_split = os.path.basename(fname)[:-4].split("_")
ints = []
floats = []
for bit in fname_split:
try:
if len(bit) != 4:
ints.append(int(bit))
except ValueError as e:
pass
try:
floats.append(float(bit))
except ValueError as e:
pass
params = {}
params['spin'] = [bit for bit in fname_split if ("a+" in bit or "a-" in bit or bit == "a0")]
params['angle'] = [bit for bit in ints if bit in [158,163,168,12,17,22]]
params['freq'] = [bit for bit in floats if bit > 100.e9 and bit < 1000.e9]
params['mass'] = [bit for bit in floats if bit > 1.e9 and bit < 10.e9]
params['munit'] = [bit for bit in floats if bit > 1.e20 and bit < 1.e50]
params['rhigh'] = [bit for bit in ints if bit in [1,10,20,40,80,160]]
if len(params['rhigh']) == 2 and (1 in params['rhigh']):
params['rhigh'].remove(1)
for key in params:
if len(params[key]) > 1:
print("Failed to parse fileaname!")
print("Parameter {} has values {} for file {}!".format(key, params[key], os.path.basename(files[0])))
exit(-1)
elif len(params[key]) == 0:
if key == "rhigh":
params['rhigh'] = None
else:
print("Param {} not present in filename {}".format(key, os.path.basename(files[0])))
else:
params[key] = params[key][0]
return(params)
params_global = parse_params(files[0])
# Make sure we get the low-angle runs of negative spins
n = 0
if "a-" in params_global['spin']:
while params_global['angle'] not in [12,17,22]:
n += 1
params_global = parse_params(files[n])
global_param_n = n
#print("Run parameters: fname={}, spin={}, angle={}, freq={}, mass={}, munit={}, rhigh={}".format(
# fname, spin, angle, freq, mass, munit, rhigh))
def process(n):
# Skip file if it wasn't the same run
if parse_params(files[n]) != params_global:
print("File {} is from different run than {}. Skipping.".format(files[n],files[global_param_n]))
return None
# read in data
i0, j0, Ia, Is, Qs, Us, Vs = np.loadtxt(files[n], unpack=True)
print("Read {} / {}".format(n,len(files)))
out = {}
# Keep full images to average them into another
out['i0'] = i0
out['j0'] = j0
out['Ia'] = Ia
out['Is'] = Is
out['Qs'] = Qs
out['Us'] = Us
out['Vs'] = Vs
# set image size: assumed square!
out['ImRes'] = ImRes = int(round(np.sqrt(len(i0))))
out['FOV'] = ImRes*M_per_muas
out['flux_pol'] = dO*sum(Is)/Jy
out['flux_unpol'] = dO*sum(Ia)/Jy
out['I_sum'] = Ib = sum(Is)
out['Q_sum'] = Qb = sum(Qs)
out['U_sum'] = Ub = sum(Us)
out['V_sum'] = Vb = sum(Vs)
out['LP_frac'] = np.sqrt(Qb*Qb + Ub*Ub)/Ib
out['CHI'] = (180./3.14159)*0.5*np.arctan2(Ub,Qb)
out['CP_frac'] = Vb/Ib
#TODO EVPA?
return out
if __name__ == "__main__":
if debug:
# SERIAL (very slow)
out_list = [process(n) for n in range(len(files))]
else:
# PARALLEL
#NTHREADS = util.calc_nthreads(hdr, pad=0.3)
NTHREADS = psutil.cpu_count(logical=False)
pool = multiprocessing.Pool(NTHREADS)
try:
# Map the above function to the dump numbers, returning a list of 'out' dicts
out_list = pool.map_async(process, list(range(len(files)))).get(99999999)
#print out_list[0].keys()
except KeyboardInterrupt:
pool.terminate()
pool.join()
else:
pool.close()
pool.join()
out_list = [x for x in out_list if x is not None]
ND = len(out_list)
out_full = {}
for key in out_list[0].keys():
if key in ['i0', 'j0', 'Ia', 'Is', 'Qs', 'Us', 'Vs']:
# Average the image parameter keys
out_full[key] = np.zeros_like(out_list[0][key])
for n in range(ND):
out_full[key] += out_list[n][key]
out_full[key] /= ND
else:
# Record all the individual number keys
out_full[key] = np.zeros(ND)
for n in range(ND):
out_full[key][n] = out_list[n][key]
for key in out_full:
if key not in ['i0', 'j0', 'Ia', 'Is', 'Qs', 'Us', 'Vs']:
print("Average {} is {}".format(key, np.mean(out_full[key])))
# Output average image
cols_array = np.c_[out_full['i0'], out_full['j0'], out_full['Ia'], out_full['Is'], out_full['Qs'], out_full['Us'], out_full['Vs']]
datfile = open("avg_img_{}.dat".format(foldername), "w")
for i in range(out_full['i0'].size):
datfile.write("{:.0f} {:.0f} {:g} {:g} {:g} {:g} {:g}\n".format(*cols_array[i]))
datfile.close()
# Add params too
out_full.update(params_global)
# Tag output with model to avoid writing more bash code
pickle.dump(out_full, open("im_avgs_{}.p".format(foldername), "wb"))
| 5,965 | 29.131313 | 132 |
py
|
iharm3d
|
iharm3d-master/script/analysis/plot_init.py
|
################################################################################
# #
# GENERATE PLOT OF INITIAL CONDITIONS #
# #
################################################################################
from __future__ import print_function, division
import plot as bplt
import util
import hdf5_to_dict as io
import os,sys
import numpy as np
import matplotlib.pyplot as plt
NLINES = 20
SIZE = 600
PLOT_EXTRA = True
if PLOT_EXTRA:
FIGX = 10
FIGY = 13
NPLOTSX = 2
NPLOTSY = 3
else:
FIGX = 10
FIGY = 8
NPLOTSX = 2
NPLOTSY = 2
imname = "initial_conditions.png"
if sys.argv[1] == '-d':
debug = True
path = sys.argv[2]
else:
debug = False
path = sys.argv[1]
files = io.get_dumps_list(path)
if len(files) == 0:
util.warn("INVALID PATH TO DUMP FOLDER")
sys.exit(1)
hdr, geom, dump = io.load_all(files[0])
# Plot the first dump, specifically init as in Narayan '12
N1 = hdr['n1']; N2 = hdr['n2']; N3 = hdr['n3']
# Zoom in for smaller SANE torii
if SIZE > geom['r'][-1,0,0]:
SIZE = geom['r'][-1,0,0]
fig = plt.figure(figsize=(FIGX, FIGY))
# Density profile
ax = plt.subplot(NPLOTSY,NPLOTSX,1)
bplt.radial_plot(ax, geom, dump['RHO'], ylabel=r"$\rho$", n2=N2//2, n3=N3//2,
rlim=[8, 2*10**3], ylim=[10**(-4), 2], logr=True, logy=True)
# B-flux thru midplane inside radius
#flux = np.sum(dump['B2'][:,N2//2,:]*geom['gdet'][:,N2//2,None]*hdr['dx1']*hdr['dx3'],axis=-1)
flux_in = np.zeros((N1,))
flux_in[0] = np.sum(dump['B2'][0,N2//2,:]*geom['gdet'][0,N2//2,None]*hdr['dx1']*hdr['dx3'])
for n in range(1,N1):
flux_in[n] = flux_in[n-1] + np.sum(dump['B2'][n,N2//2,:]*geom['gdet'][n,N2//2,None]*hdr['dx1']*hdr['dx3'])
ax = plt.subplot(NPLOTSY,NPLOTSX,2)
bplt.radial_plot(ax, geom, flux_in, ylabel=r"Flux in r", rlim=[0, SIZE])
# Density 2D
ax = plt.subplot(NPLOTSY,NPLOTSX,3)
bplt.plot_xz(ax, geom, np.log10(dump['RHO']),
vmin=-4, vmax = 0, label=r"$\log_{10}(\rho)$", window=[0,SIZE,-SIZE/2,SIZE/2])
# Beta 2D
ax = plt.subplot(NPLOTSY,NPLOTSX,4)
bplt.plot_xz(ax, geom, np.log10(dump['beta']),
label=r"$\beta$", cmap='RdBu_r', vmin=1, vmax=4,
window=[0,SIZE,-SIZE/2,SIZE/2])
bplt.overlay_field(ax, geom, dump, nlines=NLINES)
if PLOT_EXTRA:
ax = plt.subplot(NPLOTSY,NPLOTSX,5)
bplt.plot_xz(ax, geom, np.log10(dump['UU']),
vmin=-4, vmax = 0, label=r"$\log_{10}(U)$",
window=[0,SIZE,-SIZE/2,SIZE/2])
ax = plt.subplot(NPLOTSY,NPLOTSX,6)
bplt.plot_xz(ax, geom, np.log10(dump['bsq']),
label=r"$\log_{10}(b^2)$", cmap='RdBu_r', vmin=-8, vmax=2,
window=[0,SIZE,-SIZE/2,SIZE/2])
bplt.overlay_field(ax, geom, dump, nlines=NLINES)
plt.tight_layout()
plt.savefig(imname, dpi=100)
plt.close(fig)
| 2,954 | 28.257426 | 108 |
py
|
iharm3d
|
iharm3d-master/script/analysis/simple/basic_analysis.py
|
######################################################################
#
# Simple analysis and plotting script to process problem output
# Plots mhdmodes, bondi and torus problem (2D and 3D)
#
######################################################################
import numpy as np
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
from matplotlib import gridspec
import os,psutil,sys
import h5py
from mpl_toolkits.axes_grid1 import make_axes_locatable
import multiprocessing as mp
# Parallelize analysis by spawning several processes using multiprocessing's Pool object
def run_parallel(function,dlist,nthreads):
pool = mp.Pool(nthreads)
pool.map_async(function,dlist).get(720000)
pool.close()
pool.join()
# Initialize global variables
globalvars_keys = ['PROB','NDIMS','DUMPSDIR','PLOTSDIR']
globalvars = {}
grid ={}
# Function to generate poloidal (x,z) slice
# Argument must be variable, patch pole (to have x coordinate plotted correctly), averaging in phi option
def xz_slice(var, patch_pole=False, average=False):
xz_var = np.zeros((2*grid['n1'],grid['n2']))
if average:
var = np.mean(var,axis=2)
for i in range(grid['n1']):
xz_var[i,:] = var[grid['n1']-1-i,:]
xz_var[i+grid['n1'],:] = var[i,:]
else:
angle = 0.; ind = 0
for i in range(grid['n1']):
xz_var[i,:] = var[grid['n1']-1-i,:,ind+grid['n3']//2]
xz_var[i+grid['n1'],:] = var[i,:,ind]
if patch_pole:
xz_var[:,0] = xz_var[:,-1] = 0
return xz_var
# Function to generate poloidal (y,z) slice
# Argument must be variable, patch pole (to have y coordinate plotted correctly), averaging in phi option
# Not really called but can include a function call
def yz_slice(var, patch_pole=False, average=False):
yz_var = np.zeros((2*grid['n1'],grid['n2']))
if average:
var = np.mean(var,axis=2)
for i in range(grid['n1']):
yz_var[i,:] = var[grid['n1']-1-i,:]
yz_var[i+grid['n1'],:] = var[i,:]
else:
angle = np.pi/2; ind = np.argmin(abs(grid['phi'][0,0,:]-angle))
for i in range(grid['n1']):
yz_var[i,:] = var[grid['n1']-1-i,:,ind+grid['n3']//2]
yz_var[i+grid['n1'],:] = var[i,:,ind]
if patch_pole:
yz_var[:,0] = yz_var[:,-1] = 0
return yz_var
# Function to generate toroidal (x,y) slice
# Argument must be variable, averaging in theta option
def xy_slice(var, average=False, patch_phi=False):
if average:
xy_var = np.mean(var,axis=1)
else:
xy_var = var[:,grid['n2']//2,:]
#xy_var = np.vstack((xy_var.transpose(),xy_var.transpose()[0])).transpose()
if patch_phi:
xy_var[:,0] = xy_var[:,-1] = 0
return xy_var
# Function to overlay field lines
# Argument must be axes object, B1, B2 and 'nlines' -> a parameter to account for density of field lines
def plotting_bfield_lines(ax,B1,B2,nlines=20):
xp = xz_slice(grid['x'], patch_pole=True)
zp = xz_slice(grid['z'])
B1_phi_avg = B1.mean(axis=-1)
B2_phi_avg = B2.mean(axis=-1)
AJ_phi = np.zeros([2*grid['n1'],grid['n2']])
for j in range(grid['n2']):
for i in range(grid['n1']):
AJ_phi[grid['n1']-1-i,j] = AJ_phi[i+grid['n1'],j] = (np.trapz(grid['gdet'][:i,j,0]*B2_phi_avg[:i,j],dx=grid['dx1']) - np.trapz(grid['gdet'][i,:j,0]*B1_phi_avg[i,:j],dx=grid['dx2']))
AJ_phi -=AJ_phi.min()
levels = np.linspace(0,AJ_phi.max(),nlines*2)
ax.contour(xp, zp, AJ_phi, levels=levels, colors='k')
# The actual function that computes and plots diagnostics for PROB=mhdmodes
def analysis_mhdmodes(dumpval, cmap='jet', vmin=-4e-5, vmax=4e-5, domain = [0,1,0,1], shading='gouraud'):
plt.clf()
print("Analyzing {0:04d} dump".format(dumpval))
dfile = h5py.File(os.path.join(globalvars['DUMPSDIR'],'dump_0000{0:04d}.h5'.format(dumpval)),'r')
rho = dfile['prims'][()][Ellipsis,0]
t = dfile['t'][()]
dfile.close()
t = "{:.3f}".format(t)
logrho=np.log10(rho)
fig = plt.figure(figsize=(16,9))
heights = [1,5]
gs = gridspec.GridSpec(nrows=2, ncols=2, height_ratios=heights, figure=fig)
ax0 = fig.add_subplot(gs[0,:])
ax0.annotate('t= '+str(t),xy=(0.5,0.5),xycoords='axes fraction',va='center',ha='center',fontsize='x-large')
ax0.axis("off")
ax1 = fig.add_subplot(gs[1,0])
rhoxzplot = ax1.pcolormesh(grid['x'][:,0,:], grid['z'][:,0,:], logrho[:,0,:], cmap=cmap, vmin=vmin, vmax=vmax, shading=shading)
ax1.set_xlabel('$x$')
ax1.set_ylabel('$z$')
ax1.set_xlim(domain[:2])
ax1.set_ylim(domain[2:])
ax1.set_title('Log($\\rho$)',fontsize='large')
ax1.set_aspect('equal')
divider = make_axes_locatable(ax1)
cax = divider.append_axes("right", size="5%", pad=0.05)
plt.colorbar(rhoxzplot, cax=cax)
ax2 = fig.add_subplot(gs[1,1])
rhoxyplot = ax2.pcolormesh(grid['x'][:,:,0], grid['y'][:,:,0], logrho[:,:,0], cmap=cmap, vmin=vmin, vmax=vmax, shading=shading)
ax2.set_xlabel('$x$')
ax2.set_ylabel('$y$')
ax2.set_xlim(domain[:2])
ax2.set_ylim(domain[2:])
ax2.set_title('Log($\\rho$)',fontsize='large')
ax2.set_aspect('equal')
divider = make_axes_locatable(ax2)
cax = divider.append_axes("right", size="5%", pad=0.05)
plt.colorbar(rhoxyplot, cax=cax)
plt.tight_layout()
plt.savefig(os.path.join(globalvars['PLOTSDIR'],'{}_basic_plot_{:04d}.png'.format(globalvars['PROB'],dumpval)))
plt.close()
# The actual function that computes and plots diagnostics for PROB=bondi
def analysis_bondi(dumpval, cmap='jet', vmin=-3, vmax=-1, domain = [-20,0,-20,20], bh=True, shading='gouraud'):
plt.clf()
print("Analyzing {0:04d} dump".format(dumpval))
dfile = h5py.File(os.path.join(globalvars['DUMPSDIR'],'dump_0000{0:04d}.h5'.format(dumpval)),'r')
rho = dfile['prims'][()][Ellipsis,0]
t = dfile['t'][()]
dfile.close()
t = "{:.3f}".format(t)
logrho=np.log10(rho)
xp = xz_slice(grid['x'], patch_pole=True)
zp = xz_slice(grid['z'])
rhop = xz_slice(logrho)
fig = plt.figure(figsize=(16,9))
heights = [1,5]
gs = gridspec.GridSpec(nrows=2, ncols=1, height_ratios=heights, figure=fig)
ax0 = fig.add_subplot(gs[0,0])
ax0.annotate('t= '+str(t),xy=(0.5,0.5),xycoords='axes fraction',va='center',ha='center',fontsize='x-large')
ax0.axis("off")
ax1 = fig.add_subplot(gs[1,0])
rhopolplot = ax1.pcolormesh(xp, zp, rhop, cmap=cmap, vmin=vmin, vmax=vmax, shading=shading)
ax1.set_xlabel('$x (GM/c^2)$')
ax1.set_ylabel('$z (GM/c^2)$')
ax1.set_xlim(domain[:2])
ax1.set_ylim(domain[2:])
ax1.set_title('Log($\\rho$)',fontsize='large')
if bh:
circle = plt.Circle((0,0),grid['rEH'],color='k')
ax1.add_artist(circle)
ax1.set_aspect('equal')
divider = make_axes_locatable(ax1)
cax = divider.append_axes("right", size="5%", pad=0.05)
plt.colorbar(rhopolplot, cax=cax)
plt.tight_layout()
plt.savefig(os.path.join(globalvars['PLOTSDIR'],'{}_basic_plot_{:04d}.png'.format(globalvars['PROB'],dumpval)))
plt.close()
# The actual function that computes and plots diagnostics for PROB=torus and NDIMS=2
def analysis_torus2d(dumpval, cmap='jet', vmin=-5, vmax=0, domain = [-50,0,-50,50], bh=True, shading='gouraud'):
plt.clf()
print("Analyzing {0:04d} dump".format(dumpval))
dfile = h5py.File(os.path.join(globalvars['DUMPSDIR'],'dump_0000{0:04d}.h5'.format(dumpval)),'r')
rho = dfile['prims'][()][Ellipsis,0]
uu = np.array(dfile['prims'][()][Ellipsis,1])
u = np.array(dfile['prims'][()][Ellipsis,2:5])
B = np.array(dfile['prims'][()][Ellipsis,5:8])
gam = np.array(dfile['header/gam'][()])
t = dfile['t'][()]
dfile.close()
t = "{:.3f}".format(t)
logrho=np.log10(rho)
pg = (gam-1)*uu
gti = grid['gcon'][Ellipsis,0,1:4]
gij = grid['gcov'][Ellipsis,1:4,1:4]
beta_i = np.einsum('ijks,ijk->ijks',gti,grid['lapse']**2)
qsq = np.einsum('ijky,ijky->ijk',np.einsum('ijkxy,ijkx->ijky',gij,u),u)
gamma = np.sqrt(1+qsq)
ui = u-np.einsum('ijks,ijk->ijks',beta_i,gamma/grid['lapse'])
ut = gamma/grid['lapse']
ucon = np.append(ut[Ellipsis,None],ui,axis=3)
ucov = np.einsum('ijkmn,ijkn->ijkm',grid['gcov'],ucon)
bt = np.einsum('ijkm,ijkm->ijk',np.einsum('ijksm,ijks->ijkm',grid['gcov'][Ellipsis,1:4,:],B),ucon)
bi = (B+np.einsum('ijks,ijk->ijks',ui,bt))/ut[Ellipsis,None]
bcon = np.append(bt[Ellipsis,None],bi,axis=3)
bcov = np.einsum('ijkmn,ijkn->ijkm',grid['gcov'],bcon)
bsq = np.einsum('ijkm,ijkm->ijk',bcon,bcov)
betainv = 0.5*bsq/pg
logbetainv = np.log10(betainv)
xp = xz_slice(grid['x'], patch_pole=True)
zp = xz_slice(grid['z'])
rhop = xz_slice(logrho)
betainvp = xz_slice(logbetainv)
fig = plt.figure(figsize=(16,9))
heights = [1,5]
gs = gridspec.GridSpec(nrows=2, ncols=2, height_ratios=heights, figure=fig)
ax0 = fig.add_subplot(gs[0,:])
ax0.annotate('t= '+str(t),xy=(0.5,0.5),xycoords='axes fraction',va='center',ha='center',fontsize='x-large')
ax0.axis("off")
ax1 = fig.add_subplot(gs[1,0])
rhopolplot = ax1.pcolormesh(xp, zp, rhop, cmap=cmap, vmin=vmin, vmax=vmax, shading=shading)
plotting_bfield_lines(ax1,B[Ellipsis,0],B[Ellipsis,1],nlines=40)
ax1.set_xlabel('$x (GM/c^2)$')
ax1.set_ylabel('$z (GM/c^2)$')
ax1.set_xlim(domain[:2])
ax1.set_ylim(domain[2:])
ax1.set_title('Log($\\rho$)',fontsize='large')
if bh:
circle = plt.Circle((0,0),grid['rEH'],color='k')
ax1.add_artist(circle)
ax1.set_aspect('equal')
divider = make_axes_locatable(ax1)
cax = divider.append_axes("right", size="5%", pad=0.05)
plt.colorbar(rhopolplot, cax=cax)
ax2 = fig.add_subplot(gs[1,1])
betainvpolplot = ax2.pcolormesh(xp, zp, betainvp, cmap=cmap, vmin=-3, vmax=3, shading=shading)
ax2.set_xlabel('$x (GM/c^2)$')
ax2.set_ylabel('$z (GM/c^2)$')
ax2.set_xlim(domain[:2])
ax2.set_ylim(domain[2:])
ax2.set_title('Log($\\beta^{-1}$)',fontsize='large')
if bh:
circle = plt.Circle((0,0),grid['rEH'],color='k')
ax2.add_artist(circle)
ax2.set_aspect('equal')
divider = make_axes_locatable(ax2)
cax = divider.append_axes("right", size="5%", pad=0.05)
plt.colorbar(betainvpolplot, cax=cax)
plt.tight_layout()
plt.savefig(os.path.join(globalvars['PLOTSDIR'],'{}_basic_plot_{:04d}.png'.format(globalvars['PROB'],dumpval)))
plt.close()
# The actual function that computes and plots diagnostics for PROB=torus and NDIMS=3
def analysis_torus3d(dumpval, cmap='jet', vmin=-5, vmax=0, domain = [-50,50,-50,50], bh=True, shading='gouraud'):
plt.clf()
print("Analyzing {0:04d} dump".format(dumpval))
dfile = h5py.File(os.path.join(globalvars['DUMPSDIR'],'dump_0000{0:04d}.h5'.format(dumpval)),'r')
rho = dfile['prims'][()][Ellipsis,0]
uu = np.array(dfile['prims'][()][Ellipsis,1])
u = np.array(dfile['prims'][()][Ellipsis,2:5])
B = np.array(dfile['prims'][()][Ellipsis,5:8])
gam = np.array(dfile['header/gam'][()])
t = dfile['t'][()]
dfile.close()
t = "{:.3f}".format(t)
logrho=np.log10(rho)
pg = (gam-1)*uu
gti = grid['gcon'][Ellipsis,0,1:4]
gij = grid['gcov'][Ellipsis,1:4,1:4]
beta_i = np.einsum('ijks,ijk->ijks',gti,grid['lapse']**2)
qsq = np.einsum('ijky,ijky->ijk',np.einsum('ijkxy,ijkx->ijky',gij,u),u)
gamma = np.sqrt(1+qsq)
ui = u-np.einsum('ijks,ijk->ijks',beta_i,gamma/grid['lapse'])
ut = gamma/grid['lapse']
ucon = np.append(ut[Ellipsis,None],ui,axis=3)
ucov = np.einsum('ijkmn,ijkn->ijkm',grid['gcov'],ucon)
bt = np.einsum('ijkm,ijkm->ijk',np.einsum('ijksm,ijks->ijkm',grid['gcov'][Ellipsis,1:4,:],B),ucon)
bi = (B+np.einsum('ijks,ijk->ijks',ui,bt))/ut[Ellipsis,None]
bcon = np.append(bt[Ellipsis,None],bi,axis=3)
bcov = np.einsum('ijkmn,ijkn->ijkm',grid['gcov'],bcon)
bsq = np.einsum('ijkm,ijkm->ijk',bcon,bcov)
betainv = 0.5*bsq/pg
logbetainv = np.log10(betainv)
xp = xz_slice(grid['x'], patch_pole=True)
zp = xz_slice(grid['z'])
rhop = xz_slice(logrho)
betainvp = xz_slice(logbetainv)
xt = xy_slice(grid['x'])
yt = xy_slice(grid['y'],patch_phi=True)
rhot = xy_slice(logrho)
betainvt = xy_slice(logbetainv)
fig = plt.figure(figsize=(16,9))
heights = [1,5,5]
gs = gridspec.GridSpec(nrows=3, ncols=2, height_ratios=heights, figure=fig)
ax0 = fig.add_subplot(gs[0,:])
ax0.annotate('t= '+str(t),xy=(0.5,0.5),xycoords='axes fraction',va='center',ha='center',fontsize='x-large')
ax0.axis("off")
ax1 = fig.add_subplot(gs[1,0])
rhopolplot = ax1.pcolormesh(xp, zp, rhop, cmap=cmap, vmin=vmin, vmax=vmax, shading=shading)
plotting_bfield_lines(ax1,B[Ellipsis,0],B[Ellipsis,1],nlines=40)
ax1.set_xlabel('$x (GM/c^2)$')
ax1.set_ylabel('$z (GM/c^2)$')
ax1.set_xlim(domain[:2])
ax1.set_ylim(domain[2:])
ax1.set_title('Log($\\rho$)',fontsize='large')
if bh:
circle = plt.Circle((0,0),grid['rEH'],color='k')
ax1.add_artist(circle)
ax1.set_aspect('equal')
divider = make_axes_locatable(ax1)
cax = divider.append_axes("right", size="5%", pad=0.05)
plt.colorbar(rhopolplot, cax=cax)
ax2 = fig.add_subplot(gs[1,1])
rhotorplot = ax2.pcolormesh(xt, yt, rhot, cmap=cmap, vmin=vmin, vmax=vmax, shading=shading)
ax2.set_xlabel('$x (GM/c^2)$')
ax2.set_ylabel('$y (GM/c^2)$')
ax2.set_xlim(domain[:2])
ax2.set_ylim(domain[2:])
ax2.set_title('Log($\\rho$)',fontsize='large')
if bh:
circle = plt.Circle((0,0),grid['rEH'],color='k')
ax2.add_artist(circle)
ax2.set_aspect('equal')
divider = make_axes_locatable(ax2)
cax = divider.append_axes("right", size="5%", pad=0.05)
plt.colorbar(rhotorplot, cax=cax)
ax3 = fig.add_subplot(gs[2,0])
betainvpolplot = ax3.pcolormesh(xp, zp, betainvp, cmap=cmap, vmin=-3, vmax=3, shading=shading)
ax3.set_xlabel('$x (GM/c^2)$')
ax3.set_ylabel('$z (GM/c^2)$')
ax3.set_xlim(domain[:2])
ax3.set_ylim(domain[2:])
ax3.set_title('Log($\\beta^{-1}$)',fontsize='large')
if bh:
circle = plt.Circle((0,0),grid['rEH'],color='k')
ax3.add_artist(circle)
ax3.set_aspect('equal')
divider = make_axes_locatable(ax3)
cax = divider.append_axes("right", size="5%", pad=0.05)
plt.colorbar(betainvpolplot, cax=cax)
ax4 = fig.add_subplot(gs[2,1])
betainvtorplot = ax4.pcolormesh(xt, yt, betainvt, cmap=cmap, vmin=-3, vmax=3, shading=shading)
ax4.set_xlabel('$x (GM/c^2)$')
ax4.set_ylabel('$y (GM/c^2)$')
ax4.set_xlim(domain[:2])
ax4.set_ylim(domain[2:])
ax4.set_title('Log($\\beta^{-1}$)',fontsize='large')
if bh:
circle = plt.Circle((0,0),grid['rEH'],color='k')
ax4.add_artist(circle)
ax4.set_aspect('equal')
divider = make_axes_locatable(ax4)
cax = divider.append_axes("right", size="5%", pad=0.05)
plt.colorbar(betainvtorplot, cax=cax)
plt.tight_layout()
plt.savefig(os.path.join(globalvars['PLOTSDIR'],'{}_basic_plot_{:04d}.png'.format(globalvars['PROB'],dumpval)))
plt.close()
# main(): Reads param file, writes grid dict and calls analysis function
if __name__=="__main__":
if len(sys.argv) > 1 and sys.argv[1]=='-p':
fparams_name = sys.argv[2]
else:
sys.exit('No param file provided')
# Reading the param file
with open(fparams_name,'r') as fparams:
lines = fparams.readlines()
for line in lines:
if line[0]=='#' or line.isspace(): pass
elif line.split()[0] in globalvars_keys: globalvars[line.split()[0]]=line.split()[-1]
# Creating the output directory if it doesn't exist
if not os.path.exists(globalvars['PLOTSDIR']):
os.makedirs(globalvars['PLOTSDIR'])
# Calculating total dump files
dstart = int(sorted(os.listdir(globalvars['DUMPSDIR']))[0][-7:-3])
dend = int(sorted(list(filter(lambda dump: 'dump' in dump,os.listdir(globalvars['DUMPSDIR']))))[-1][-7:-3])
dlist = range(dstart,dend+1)
Ndumps = dend-dstart+1
# Setting grid dict
gfile = h5py.File(os.path.join(globalvars['DUMPSDIR'],'grid.h5'),'r')
dfile = h5py.File(os.path.join(globalvars['DUMPSDIR'],'dump_0000{0:04d}.h5'.format(dstart)),'r')
grid['n1'] = dfile['/header/n1'][()]; grid['n2'] = dfile['/header/n2'][()]; grid['n3'] = dfile['/header/n3'][()]
grid['dx1'] = dfile['/header/geom/dx1'][()]; grid['dx2'] = dfile['/header/geom/dx2'][()]; grid['dx3'] = dfile['/header/geom/dx3'][()]
grid['startx1'] = dfile['header/geom/startx1'][()]; grid['startx2'] = dfile['header/geom/startx2'][()]; grid['startx3'] = dfile['header/geom/startx3'][()]
grid['metric'] = dfile['header/metric'][()].decode('UTF-8')
if grid['metric']=='MKS' or grid['metric']=='MMKS':
try:
grid['a'] = dfile['header/geom/mks/a'][()]
except KeyError:
grid['a'] = dfile['header/geom/mmks/a'][()]
try:
grid['rEH'] = dfile['header/geom/mks/Reh'][()]
except KeyError:
pass
try:
grid['rEH'] = dfile['header/geom/mks/r_eh'][()]
except KeyError:
pass
try:
grid['rEH'] = dfile['header/geom/mmks/Reh'][()]
except KeyError:
pass
try:
grid['rEH'] = dfile['header/geom/mmks/r_eh'][()]
except KeyError:
pass
try:
grid['hslope'] = dfile['header/geom/mks/hslope'][()]
except KeyError:
grid['hslope'] = dfile['header/geom/mmks/hslope'][()]
if grid['metric']=='MMKS':
grid['mks_smooth'] = dfile['header/geom/mmks/mks_smooth'][()]
grid['poly_alpha'] = dfile['header/geom/mmks/poly_alpha'][()]
grid['poly_xt'] = dfile['header/geom/mmks/poly_xt'][()]
grid['D'] = (np.pi*grid['poly_xt']**grid['poly_alpha'])/(2*grid['poly_xt']**grid['poly_alpha']+(2/(1+grid['poly_alpha'])))
grid['x1'] = gfile['X1'][()]; grid['x2'] = gfile['X2'][()]; grid['x3'] = gfile['X3'][()]
grid['r'] = gfile['r'][()]; grid['th'] = gfile['th'][()]; grid['phi'] = gfile['phi'][()]
grid['x'] = gfile['X'][()]; grid['y'] = gfile['Y'][()]; grid['z'] = gfile['Z'][()]
grid['gcov'] = gfile['gcov'][()]; grid['gcon'] = gfile['gcon'][()]
grid['gdet'] = gfile['gdet'][()]
grid['lapse'] = gfile['lapse'][()]
dfile.close()
gfile.close()
ncores = psutil.cpu_count(logical=True)
pad = 0.25
nthreads = int(ncores*pad); print("Number of threads: {0:03d}".format(nthreads))
# Calling analysis function for mhdmodes
if globalvars['PROB']=='mhdmodes':
run_parallel(analysis_mhdmodes,dlist,nthreads)
# Calling analysis function for bondi
if globalvars['PROB']=='bondi':
if globalvars['NDIMS']=='2':
run_parallel(analysis_bondi,dlist,nthreads)
else:
print('Bondi problem => NDIMS=2')
# Calling analysis function for torus2d
if globalvars['PROB']=='torus' and globalvars['NDIMS']=='2':
run_parallel(analysis_torus2d,dlist,nthreads)
# Calling analysis function for torus3d
if globalvars['PROB']=='torus' and globalvars['NDIMS']=='3':
run_parallel(analysis_torus3d,dlist,nthreads)
| 19,206 | 39.521097 | 193 |
py
|
iharm3d
|
iharm3d-master/script/analysis/misc/find-dt.py
|
#!/usr/bin/env python
from hdf5_to_dict import load_geom, load_hdr
import sys
import math
import numpy as np
hdr = load_hdr(sys.argv[1])
geom = load_geom(sys.argv[2])
nnodes = int(sys.argv[3])
tf = float(sys.argv[4])
SMALL = 1e-20
dt_light_min = 1./SMALL;
N1, N2, N3 = hdr['n1'], hdr['n2'], hdr['n3']
dx = [ 0, hdr['dx1'],hdr['dx2'], hdr['dx3'] ]
dt_light = np.zeros((N1,N2))
for i in range(N1):
for j in range(N2):
dt_light[i,j] = 1.e30
light_phase_speed = SMALL
dt_light_local = 0.
for mu in range(1,4):
if(math.pow(geom['gcon'][i,j,0,mu], 2.) -
geom['gcon'][i,j,mu,mu]*geom['gcon'][i,j,0,0] >= 0.):
cplus = np.fabs((-geom['gcon'][i,j,0,mu] +
np.sqrt(math.pow(geom['gcon'][i,j,0,mu], 2.) -
geom['gcon'][i,j,mu,mu]*geom['gcon'][i,j,0,0]))/
(geom['gcon'][i,j,0,0]))
cminus = np.fabs((-geom['gcon'][i,j,0,mu] -
np.sqrt(math.pow(geom['gcon'][i,j,0,mu], 2.) -
geom['gcon'][i,j,mu,mu]*geom['gcon'][i,j,0,0]))/
(geom['gcon'][i,j,0,0]))
light_phase_speed= max([cplus,cminus])
else:
light_phase_speed = SMALL
dt_light_local += 1./(dx[mu]/light_phase_speed);
if (dx[mu]/light_phase_speed < dt_light[i,j]):
dt_light[i,j] = dx[mu]/light_phase_speed
dt_light_local = 1./dt_light_local
if (dt_light_local < dt_light_min):
dt_light_min = dt_light_local
print("bhlight min is", dt_light_min)
#print "directional min is", np.min(dt_light)
tstep = 0.9*dt_light_min
print("timestep is then", tstep)
size = N1*N2*N3/nnodes
zcps = 813609*np.log(size) - 6327477
print("zcps per node is", zcps, ", total is", zcps*nnodes)
wall_per_step = (N1*N2*N3)/(zcps*nnodes)
print("walltime per step is", wall_per_step)
print("total time is", tf/tstep*wall_per_step/3600, " hours")
| 1,852 | 26.656716 | 61 |
py
|
iharm3d
|
iharm3d-master/script/analysis/misc/ana_from_log.py
|
# Pass along HARM's own diagnostics for comparison
# TODO implement
#diag = io.load_log(path)
#out_full['t_d'] = diag['t']
#out_full['Mdot_d'] = diag['mdot']
#out_full['Phi_d'] = diag['Phi']
#out_full['Ldot_d'] = diag['ldot']
#out_full['Edot_d'] = diag['edot']
#out_full['Lum_d'] = diag['lum_eht']
#out_full['divbmax_d'] = diag['divbmax']
| 341 | 25.307692 | 50 |
py
|
CTDL
|
CTDL-master/RunGridWorld.py
|
from GridWorld.Functions.RandomSeedSweep import RunRandomSeedSweep
from GridWorld.Functions.MazeTypeSweep import RunMazeTypeSweep
from GridWorld.Functions.RevaluationSweep import RunRevaluationSweep
RunRandomSeedSweep()
| 220 | 43.2 | 68 |
py
|
CTDL
|
CTDL-master/RunGym.py
|
from Gym.Functions.RunTask import RunTask
RunTask()
| 54 | 10 | 41 |
py
|
CTDL
|
CTDL-master/Utilities.py
|
import os
import numpy as np
import shutil
def RecordSettings(directory, maze_params, agent_params):
file = open(directory + 'Settings.txt', 'w')
for key, value in maze_params.items():
file.write(key + ': ' + str(value) + '\n')
for key, value in agent_params.items():
file.write(key + ': ' + str(value) + '\n')
file.close()
return
def RecordSetting(directory, key, value):
file = open(directory + 'Settings.txt', 'a')
file.write(key + ': ' + str(value) + '\n')
file.close()
return
| 538 | 23.5 | 57 |
py
|
CTDL
|
CTDL-master/Gym/Parameters.py
|
from Gym.Enums.Enums import EnvType, AgentType
env_params = {'env': EnvType.MountainCarContinuous,
'num_trials': 200,
'max_steps': 1000,
'num_repeats': 50
}
agent_params = {'agent_type': AgentType.CTDL_A2C,
'bSOM': True,
'SOM_alpha': .01,
'SOM_sigma': .1,
'SOM_sigma_const': .1,
'Q_alpha': .9,
'w_decay': 10,
'TD_decay': 1,
'SOM_size': 15,
'e_trials': 200
}
| 575 | 29.315789 | 51 |
py
|
CTDL
|
CTDL-master/Gym/AnalyseResults.py
|
from Gym.Functions.Parsers import ParseIntoDataframes
from Gym.Functions.Plotters import PlotComparisons
dir = 'ContinuousMountainCar'
to_compare = ['CTDL_A2C', 'A2C']
data_frames, labels = ParseIntoDataframes(dir, to_compare)
PlotComparisons(data_frames, labels)
| 267 | 25.8 | 58 |
py
|
CTDL
|
CTDL-master/Gym/Agents/A2C/Agent.py
|
import matplotlib.pyplot as plt
import numpy as np
import pickle
import sklearn.preprocessing
from Gym.Agents.A2C.ACGraph import ACGraph
class Agent(object):
def __init__(self, directory, env_params, agent_params):
self.directory = directory
self.action_maxs = env_params['action_maxs']
self.action_mins = env_params['action_mins']
self.input_dim = env_params['state_dim']
self.ac_graph = ACGraph(self.input_dim, self.action_mins, self.action_maxs, self.directory)
self.ac_graph.SaveGraphAndVariables()
self.discount_factor = 0.99
self.epsilon = 1
self.results = {'rewards': [], 'lengths': []}
self.trial_reward = 0
self.trial_length = 0
self.plot_num = 0
self.prev_state = None
self.prev_action = None
self.bStart_learning = False
state_space_samples = np.array(
[env_params['env_obj'].observation_space.sample() for x in range(10000)])
self.scaler = sklearn.preprocessing.StandardScaler()
self.scaler.fit(state_space_samples)
return
def ScaleState(self, state):
scaled = self.scaler.transform([state])
return scaled
def Update(self, reward, state, bTrial_over):
state = self.ScaleState(np.squeeze(state))
self.RecordResults(bTrial_over, reward)
if (self.bStart_learning):
self.UpdateACGraph(reward, state, bTrial_over)
action = self.SelectAction(state)
if (not self.bStart_learning):
self.bStart_learning = True
return action
def UpdateACGraph(self, reward, state, bTrial_over):
state_value = self.ac_graph.GetStateValue(state)
prev_state_value = self.ac_graph.GetStateValue(self.prev_state)
if(bTrial_over):
target = reward
else:
target = reward + self.discount_factor * np.squeeze(state_value)
delta = target - prev_state_value
self.ac_graph.GradientDescentStep(self.prev_state, self.prev_action, target, delta)
return
def RecordResults(self, bTrial_over, reward):
self.trial_reward += reward
self.trial_length += 1
if (bTrial_over):
self.results['rewards'].append(self.trial_reward)
print('Cumulative Episode Reward: ' + str(self.trial_reward))
self.trial_reward = 0
self.results['lengths'].append(self.trial_length)
self.trial_length = 0
return
def SelectAction(self, state):
action = self.ac_graph.GetAction(state)
self.prev_action = action
self.prev_state = np.copy(state)
return action
def PlotResults(self):
plt.figure()
plt.plot(self.results['rewards'])
plt.savefig(self.directory + 'AgentTrialRewards.pdf')
plt.close()
with open(self.directory + 'Results.pkl', 'wb') as handle:
pickle.dump(self.results, handle, protocol=pickle.HIGHEST_PROTOCOL)
return
| 3,046 | 26.7 | 99 |
py
|
CTDL
|
CTDL-master/Gym/Agents/A2C/ACGraph.py
|
import os
import tensorflow as tf
import numpy as np
class ACGraph(object):
def __init__(self, input_dim, action_mins, action_maxs, directory):
self.ti = 0
self.input_dim = input_dim
self.action_mins = action_mins
self.action_maxs = action_maxs
self.action_dim = action_mins.shape[0]
self.directory = directory
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '1'
tf.reset_default_graph()
self.graph = tf.Graph()
with self.graph.as_default():
""" Construction phase """
self.init_xavier = tf.contrib.layers.xavier_initializer()
self.X = tf.placeholder(tf.float32, shape=(None, self.input_dim), name="X")
self.action_y = tf.placeholder(tf.float32, shape=(None, self.action_dim), name="action_y")
self.value_y = tf.placeholder(tf.float32, shape=(None), name="value_y")
self.delta = tf.placeholder(tf.float32, shape=(None), name="delta")
# Layers
self.dense1_val = tf.layers.dense(inputs=self.X, units=128, activation=tf.nn.elu, kernel_initializer=self.init_xavier)
self.dense2_val = tf.layers.dense(inputs=self.dense1_val, units=128, activation=tf.nn.elu, kernel_initializer=self.init_xavier)
self.state_value = tf.layers.dense(inputs=self.dense2_val, units=1, activation=None, kernel_initializer=self.init_xavier)
self.dense1_pol = tf.layers.dense(inputs=self.X, units=128, activation=tf.nn.elu, kernel_initializer=self.init_xavier)
self.dense2_pol = tf.layers.dense(inputs=self.dense1_pol, units=128, activation=tf.nn.elu, kernel_initializer=self.init_xavier)
self.action_means = tf.layers.dense(inputs=self.dense2_pol, units=self.action_dim, activation=None, kernel_initializer=self.init_xavier)
self.action_sigmas = tf.nn.softplus(tf.layers.dense(inputs=self.dense2_pol, units=self.action_dim, activation=None, kernel_initializer=self.init_xavier))
self.dist = tf.contrib.distributions.Normal(loc=self.action_means, scale=self.action_sigmas)
self.action_sample = tf.squeeze(self.dist.sample(1), axis=0)
self.action = tf.clip_by_value(self.action_sample, self.action_mins[0], self.action_maxs[0])
# Loss functions
with tf.name_scope("loss"):
self.policy_loss = -tf.log(self.dist.prob(self.action_y) + 1e-5) * self.delta
self.value_loss = tf.reduce_mean(tf.square(self.value_y - self.state_value), axis=0, name='value_loss')
# Minimizer
self.learning_rate_policy = 0.00001
self.learning_rate_value = 0.0001
with tf.name_scope("train"):
self.training_op_policy = tf.train.AdamOptimizer(self.learning_rate_policy, name='optimizer').minimize(self.policy_loss)
self.training_op_value = tf.train.AdamOptimizer(self.learning_rate_value, name='optimizer').minimize(self.value_loss)
self.init = tf.global_variables_initializer()
self.saver = tf.train.Saver()
tf.add_to_collection('action', self.action)
tf.add_to_collection('state_value', self.state_value)
self.sess = tf.Session(graph=self.graph)
self.sess.run(self.init)
return
def GetAction(self, X):
action = self.action.eval(feed_dict={self.X: X}, session=self.sess)
return action
def GetStateValue(self, X):
value = self.state_value.eval(feed_dict={self.X: X}, session=self.sess)
return value
def GradientDescentStep(self, X_batch, action_batch, value_batch, delta_batch):
self.sess.run(self.training_op_policy, feed_dict={self.X: X_batch,
self.action_y: action_batch,
self.delta: np.squeeze(delta_batch)})
self.sess.run(self.training_op_value, feed_dict={self.X: X_batch, self.value_y: np.squeeze(value_batch)})
return
def SaveGraphAndVariables(self):
save_path = self.saver.save(self.sess, self.directory)
print('Model saved in ' + save_path)
return
def LoadGraphAndVariables(self):
self.saver.restore(self.sess, self.directory)
print('Model loaded from ' + self.directory)
return
| 4,390 | 40.819048 | 165 |
py
|
CTDL
|
CTDL-master/Gym/Agents/DQN/Minibatch.py
|
class MiniBatch(object):
def __init__(self):
self.prev_states = []
self.actions = []
self.rewards = []
self.states = []
self.bTrial_over = []
| 187 | 19.888889 | 29 |
py
|
CTDL
|
CTDL-master/Gym/Agents/DQN/Memory.py
|
import numpy as np
from Gym.Agents.DQN.Minibatch import MiniBatch
class Memory(object):
def __init__(self):
self.capacity = 100000
self.prev_states = []
self.states = []
self.actions = []
self.rewards = []
self.bTrial_over = []
return
def RecordExperience(self, prev_state, state, action, reward, bTrial_over):
self.prev_states.append(prev_state)
self.states.append(state)
self.rewards.append(reward)
self.bTrial_over.append(bTrial_over)
self.actions.append(action)
if(self.rewards.__len__() > self.capacity):
del self.prev_states[0]
del self.states[0]
del self.actions[0]
del self.rewards[0]
del self.bTrial_over[0]
return
def GetMinibatch(self, minibatch_size):
minibatch = MiniBatch()
experience_indices = np.random.randint(0, self.rewards.__len__(), minibatch_size)
prev_states = []
actions = []
rewards = []
states = []
bTrial_over = []
for i in experience_indices:
prev_states.append(self.prev_states[i])
actions.append(self.actions[i])
rewards.append(self.rewards[i])
states.append(self.states[i])
bTrial_over.append(self.bTrial_over[i])
minibatch.prev_states = np.squeeze(np.array(prev_states, dtype=float))
minibatch.actions = np.array(actions, dtype=int)
minibatch.rewards = np.array(rewards, dtype=float)
minibatch.states = np.squeeze(np.array(states, dtype=float))
minibatch.bTrial_over = bTrial_over
return minibatch
| 1,704 | 26.063492 | 89 |
py
|
CTDL
|
CTDL-master/Gym/Agents/DQN/Agent.py
|
import matplotlib.pyplot as plt
import numpy as np
import pickle
from Gym.Agents.DQN.Memory import Memory
from Gym.Agents.DQN.QTargetGraph import QTargetGraph
from Gym.Agents.DQN.QGraph import QGraph
class Agent(object):
def __init__(self, directory, env_params, agent_params):
self.directory = directory
self.num_actions = env_params['num_actions']
self.input_dim = env_params['state_dim']
self.minibatch_size = 32
self.q_graph = QGraph(self.input_dim, self.num_actions, self.directory)
self.q_graph.SaveGraphAndVariables()
self.q_target_graph = QTargetGraph(self.directory)
self.memory = Memory()
self.discount_factor = 0.99
self.epsilon = 0
self.final_epsilon = .9
self.num_epsilon_trials = agent_params['e_trials']
self.epsilon_increment = self.final_epsilon / self.num_epsilon_trials
self.c = 500
self.ci = 0
self.results = {'rewards': [], 'lengths': []}
self.trial_reward = 0
self.trial_length = 0
self.plot_num = 0
self.prev_state = None
self.prev_action = None
self.bStart_learning = False
return
def Update(self, reward, state, bTrial_over):
state = np.expand_dims(state, axis=0)
if (bTrial_over and self.epsilon < self.final_epsilon):
self.epsilon += self.epsilon_increment
self.RecordResults(bTrial_over, reward)
if(self.bStart_learning):
self.memory.RecordExperience(self.prev_state, state, self.prev_action, reward, bTrial_over)
self.UpdateQGraph()
action = self.SelectAction(state)
if (not self.bStart_learning):
self.bStart_learning = True
return action
def RecordResults(self, bTrial_over, reward):
self.trial_reward += reward
self.trial_length += 1
if (bTrial_over):
self.results['rewards'].append(self.trial_reward)
self.trial_reward = 0
self.results['lengths'].append(self.trial_length)
self.trial_length = 0
return
def UpdateQGraph(self):
self.ci += 1
if(self.ci >= self.c):
print('Loading New target Graph')
self.ci = 0
self.q_graph.SaveGraphAndVariables()
self.q_target_graph = QTargetGraph(self.directory)
minibatch = self.memory.GetMinibatch(self.minibatch_size)
max_action_values = np.amax(np.squeeze(np.array(self.q_target_graph.GetActionValues(minibatch.states))), axis=1)
targets = np.zeros(minibatch.rewards.__len__())
for i in range(targets.shape[0]):
if(minibatch.bTrial_over[i]):
targets[i] = minibatch.rewards[i]
else:
targets[i] = minibatch.rewards[i] + (max_action_values[i] * self.discount_factor)
self.q_graph.GradientDescentStep(minibatch.prev_states, minibatch.actions, targets)
return
def SelectAction(self, state):
if(np.random.rand() > self.epsilon):
action = np.random.randint(self.num_actions)
else:
action = np.argmax(np.squeeze(np.array(self.q_graph.GetActionValues(state))))
self.prev_action = action
self.prev_state = np.copy(state)
return action
def PlotResults(self):
plt.figure()
plt.plot(self.results['rewards'])
plt.savefig(self.directory + 'AgentTrialRewards.pdf')
plt.close()
with open(self.directory + 'Results.pkl', 'wb') as handle:
pickle.dump(self.results, handle, protocol=pickle.HIGHEST_PROTOCOL)
return
| 3,692 | 27.627907 | 120 |
py
|
CTDL
|
CTDL-master/Gym/Agents/DQN/QTargetGraph.py
|
import tensorflow as tf
import numpy as np
class QTargetGraph(object):
def __init__(self, directory):
tf.reset_default_graph()
self.graph = tf.Graph()
with self.graph.as_default():
saver = tf.train.import_meta_graph(directory + ".meta")
self.logits = tf.get_collection('logits')
self.sess = tf.Session(graph=self.graph)
saver.restore(self.sess, directory)
def GetActionValues(self, X):
preds = self.sess.run(self.logits, feed_dict={'X:0': X})
return preds
| 563 | 15.588235 | 67 |
py
|
CTDL
|
CTDL-master/Gym/Agents/DQN/QGraph.py
|
import os
import tensorflow as tf
import numpy as np
class QGraph(object):
def __init__(self, input_dim, num_actions, directory):
self.ti = 0
self.input_dim = input_dim
self.num_actions = num_actions
self.directory = directory
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '1'
tf.reset_default_graph()
self.graph = tf.Graph()
with self.graph.as_default():
""" Construction phase """
self.X = tf.placeholder(tf.float32, shape=(None, self.input_dim), name="X")
self.y = tf.placeholder(tf.float32, shape=(None), name="y")
self.actions = tf.placeholder(tf.float32, shape=[None, self.num_actions], name="actions")
# Layers
self.dense1 = tf.layers.dense(inputs=self.X, units=128, activation=tf.nn.relu)
self.dense2 = tf.layers.dense(inputs=self.dense1, units=128, activation=tf.nn.relu)
self.logits = tf.layers.dense(inputs=self.dense2, units=self.num_actions)
# Loss function
with tf.name_scope("loss"):
self.predictions = tf.reduce_sum(tf.multiply(self.logits, self.actions), 1)
self.targets = tf.stop_gradient(self.y)
self.error = self.targets - self.predictions
self.clipped_error = tf.clip_by_value(self.targets - self.predictions, -1., 1.)
self.loss = tf.reduce_mean(tf.multiply(self.error, self.clipped_error), axis=0, name='loss')
# Minimizer
self.learning_rate = 0.00025
self.momentum = 0.95
self.epsilon = 0.01
self.batch_size = 32
with tf.name_scope("train"):
self.optimizer = tf.train.RMSPropOptimizer(learning_rate=self.learning_rate, momentum=self.momentum, epsilon=self.epsilon)
self.training_op = self.optimizer.minimize(self.loss)
self.init = tf.global_variables_initializer()
self.saver = tf.train.Saver()
tf.add_to_collection('logits', self.logits)
self.sess = tf.Session(graph=self.graph)
self.sess.run(self.init)
return
def GetActionValues(self, X):
preds = self.logits.eval(feed_dict={self.X: X}, session=self.sess)
return preds
def GradientDescentStep(self, X_batch, action_batch, y_batch):
# One hot encoded action tensor
actions = np.zeros((self.batch_size, self.num_actions))
for i in range(self.batch_size):
actions[i, action_batch[i]] = 1
self.sess.run(self.training_op,
feed_dict={self.X: X_batch, self.y: y_batch, self.actions: actions})
return
def SaveGraphAndVariables(self):
save_path = self.saver.save(self.sess, self.directory)
print('Model saved in ' + save_path)
return
def LoadGraphAndVariables(self):
self.saver.restore(self.sess, self.directory)
print('Model loaded from ' + self.directory)
return
| 3,036 | 33.123596 | 138 |
py
|
CTDL
|
CTDL-master/Gym/Agents/CTDL_A2C/Agent.py
|
import matplotlib.pyplot as plt
import numpy as np
import pickle
import sklearn.preprocessing
from Gym.Agents.A2C.ACGraph import ACGraph
from Gym.Agents.CTDL_A2C.SOM import DeepSOM
class Agent(object):
def __init__(self, directory, env_params, agent_params):
self.bSOM = agent_params['bSOM']
self.directory = directory
self.action_maxs = env_params['action_maxs']
self.action_mins = env_params['action_mins']
self.input_dim = env_params['state_dim']
self.ac_graph = ACGraph(self.input_dim, self.action_mins, self.action_maxs, self.directory)
self.ac_graph.SaveGraphAndVariables()
if (self.bSOM):
self.CreateSOM(agent_params)
self.weighting_decay = agent_params['w_decay']
self.TD_decay = agent_params['TD_decay']
self.discount_factor = 0.99
self.epsilon = 1
self.results = {'rewards': [], 'lengths': []}
self.trial_reward = 0
self.trial_length = 0
self.plot_num = 0
self.prev_state = None
self.prev_action = None
self.prev_Vvalue = None
self.bStart_learning = False
state_space_samples = np.array(
[env_params['env_obj'].observation_space.sample() for x in range(10000)])
self.scaler = sklearn.preprocessing.StandardScaler()
self.scaler.fit(state_space_samples)
return
def CreateSOM(self, agent_params):
self.SOM = DeepSOM(self.directory, self.input_dim, agent_params['SOM_size'],
agent_params['SOM_alpha'], agent_params['SOM_sigma'],
agent_params['SOM_sigma_const'])
self.V_alpha = agent_params['Q_alpha']
self.VValues = np.zeros((agent_params['SOM_size'] * agent_params['SOM_size']))
return
def ScaleState(self, state):
scaled = self.scaler.transform([state])
return scaled
def Update(self, reward, state, bTrial_over):
state = self.ScaleState(np.squeeze(state))
self.RecordResults(bTrial_over, reward)
if (self.bStart_learning):
self.UpdateACGraph(bTrial_over, reward, state)
action = self.SelectAction(state)
if (not self.bStart_learning):
self.bStart_learning = True
return action
def RecordResults(self, bTrial_over, reward):
self.trial_reward += reward
self.trial_length += 1
if (bTrial_over):
self.results['rewards'].append(self.trial_reward)
print('Cumulative Episode Reward: ' + str(self.trial_reward))
self.trial_reward = 0
self.results['lengths'].append(self.trial_length)
self.trial_length = 0
return
def GetWeighting(self, best_unit, state):
diff = np.sum(np.square(self.SOM.SOM_layer.units['w'][best_unit, :] - state))
w = np.exp(-diff / self.weighting_decay)
return w
def GetVValues(self, state, critic_value):
best_unit = self.SOM.GetOutput(state)
som_value = self.VValues[best_unit]
w = self.GetWeighting(best_unit, state)
state_value = (w * som_value) + ((1 - w) * critic_value)
return state_value
def UpdateACGraph(self, bTrial_over, reward, state):
prev_state_value = self.ac_graph.GetStateValue(self.prev_state)
target = self.GetTargetValue(bTrial_over, reward, state)
delta = target - prev_state_value
self.ac_graph.GradientDescentStep(self.prev_state, self.prev_action, target, delta)
if (self.bSOM):
self.UpdateSOM(target)
return
def UpdateSOM(self, target):
prev_best_unit = self.SOM.GetOutput(self.prev_state)
delta = np.exp(np.abs(target -
np.squeeze(self.ac_graph.GetStateValue(
self.prev_state))) / self.TD_decay) - 1
delta = np.clip(delta, 0, 1)
self.SOM.Update(self.prev_state, prev_best_unit, delta)
prev_best_unit = self.SOM.GetOutput(self.prev_state)
w = self.GetWeighting(prev_best_unit, self.prev_state)
self.VValues[prev_best_unit] += self.V_alpha * w * (
target - self.VValues[prev_best_unit])
return
def GetTargetValue(self, bTrial_over, reward, state):
critic_value = np.squeeze(np.array(self.ac_graph.GetStateValue(state)))
if(self.bSOM):
state_value = self.GetVValues(state, critic_value)
else:
state_value = critic_value
if (bTrial_over):
target = reward
else:
target = reward + (state_value * self.discount_factor)
return target
def SelectAction(self, state):
action = self.ac_graph.GetAction(state)
self.prev_action = action
self.prev_state = np.copy(state)
return action
def PlotResults(self):
plt.switch_backend('agg')
plt.figure()
plt.plot(self.results['rewards'])
plt.savefig(self.directory + 'AgentTrialRewards.pdf')
plt.close()
with open(self.directory + 'Results.pkl', 'wb') as handle:
pickle.dump(self.results, handle, protocol=pickle.HIGHEST_PROTOCOL)
return
| 5,266 | 28.757062 | 99 |
py
|
CTDL
|
CTDL-master/Gym/Agents/CTDL_A2C/SOM.py
|
from Gym.Agents.CTDL.SOMLayer import SOMLayer
class DeepSOM(object):
def __init__(self, directory, input_dim, map_size, learning_rate, sigma, sigma_const):
self.directory = directory
self.SOM_layer = SOMLayer(input_dim, map_size, learning_rate, sigma, sigma_const)
return
def Update(self, state, best_unit, reward_value):
self.SOM_layer.Update(state, best_unit, reward_value)
return
def GetOutput(self, state):
best_unit = self.SOM_layer.GetBestUnit(state)
return best_unit
| 553 | 22.083333 | 90 |
py
|
CTDL
|
CTDL-master/Gym/Agents/CTDL_A2C/ACGraph.py
|
import os
import tensorflow as tf
import numpy as np
class ACGraph(object):
def __init__(self, input_dim, action_mins, action_maxs, directory):
self.ti = 0
self.input_dim = input_dim
self.action_mins = action_mins
self.action_maxs = action_maxs
self.action_dim = action_mins.shape[0]
self.directory = directory
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '1'
tf.reset_default_graph()
self.graph = tf.Graph()
with self.graph.as_default():
""" Construction phase """
self.init_xavier = tf.contrib.layers.xavier_initializer()
self.X = tf.placeholder(tf.float32, shape=(None, self.input_dim), name="X")
self.action_y = tf.placeholder(tf.float32, shape=(None, self.action_dim), name="action_y")
self.value_y = tf.placeholder(tf.float32, shape=(None), name="value_y")
self.delta = tf.placeholder(tf.float32, shape=(None), name="delta")
# Layers
self.dense1_val = tf.layers.dense(inputs=self.X, units=128, activation=tf.nn.elu, kernel_initializer=self.init_xavier)
self.dense2_val = tf.layers.dense(inputs=self.dense1_val, units=128, activation=tf.nn.elu, kernel_initializer=self.init_xavier)
self.state_value = tf.layers.dense(inputs=self.dense2_val, units=1, activation=None, kernel_initializer=self.init_xavier)
self.dense1_pol = tf.layers.dense(inputs=self.X, units=128, activation=tf.nn.elu, kernel_initializer=self.init_xavier)
self.dense2_pol = tf.layers.dense(inputs=self.dense1_pol, units=128, activation=tf.nn.elu, kernel_initializer=self.init_xavier)
self.action_means = tf.layers.dense(inputs=self.dense2_pol, units=self.action_dim, activation=None, kernel_initializer=self.init_xavier)
self.action_sigmas = tf.nn.softplus(tf.layers.dense(inputs=self.dense2_pol, units=self.action_dim, activation=None, kernel_initializer=self.init_xavier))
self.dist = tf.contrib.distributions.Normal(loc=self.action_means, scale=self.action_sigmas)
self.action_sample = tf.squeeze(self.dist.sample(1), axis=0)
self.action = tf.clip_by_value(self.action_sample, self.action_mins[0], self.action_maxs[0])
# Loss functions
with tf.name_scope("loss"):
self.policy_loss = (-tf.log(self.dist.prob(self.action_y) + 1e-5) * self.delta)# - self.entropy
self.value_loss = tf.reduce_mean(tf.square(self.value_y - self.state_value), axis=0, name='value_loss')
# Minimizer
self.learning_rate_policy = 0.00001
self.learning_rate_value = 0.0001
with tf.name_scope("train"):
self.training_op_policy = tf.train.AdamOptimizer(self.learning_rate_policy, name='optimizer').minimize(self.policy_loss)
self.training_op_value = tf.train.AdamOptimizer(self.learning_rate_value, name='optimizer').minimize(self.value_loss)
self.init = tf.global_variables_initializer()
self.saver = tf.train.Saver()
tf.add_to_collection('action', self.action)
tf.add_to_collection('state_value', self.state_value)
self.sess = tf.Session(graph=self.graph)
self.sess.run(self.init)
return
def GetAction(self, X):
action = self.action.eval(feed_dict={self.X: X}, session=self.sess)
return action
def GetStateValue(self, X):
value = self.state_value.eval(feed_dict={self.X: X}, session=self.sess)
return value
def GradientDescentStep(self, X_batch, action_batch, value_batch, delta_batch):
self.sess.run(self.training_op_policy, feed_dict={self.X: X_batch,
self.action_y: action_batch,
self.delta: np.squeeze(delta_batch)})
self.sess.run(self.training_op_value, feed_dict={self.X: X_batch, self.value_y: np.squeeze(value_batch)})
return
def SaveGraphAndVariables(self):
save_path = self.saver.save(self.sess, self.directory)
print('Model saved in ' + save_path)
return
def LoadGraphAndVariables(self):
self.saver.restore(self.sess, self.directory)
print('Model loaded from ' + self.directory)
return
| 4,408 | 40.990476 | 165 |
py
|
CTDL
|
CTDL-master/Gym/Agents/CTDL_A2C/SOMLayer.py
|
import numpy as np
class SOMLayer():
def __init__(self, input_dim, size, learning_rate, sigma, sigma_const):
self.size = size
self.num_units = size * size
self.num_weights = input_dim
self.learning_rate = learning_rate
self.sigma = sigma
self.sigma_const = sigma_const
self.units = {'xy': [], 'w': []}
self.ConstructMap()
return
def ConstructMap(self):
x = 0
y = 0
# Construct map
for u in range(self.num_units):
self.units['xy'].append([x, y])
self.units['w'].append(np.random.randn(self.num_weights))#np.random.randn(self.num_weights))
x += 1
if (x >= self.size):
x = 0
y += 1
self.units['xy'] = np.array(self.units['xy'])
self.units['w'] = np.array(self.units['w'])
return
def Update(self, state, best_unit, reward_value):
diffs = self.units['xy'] - self.units['xy'][best_unit, :]
location_distances = np.sqrt(np.sum(np.square(diffs), axis=-1))
neighbourhood_values = np.exp(-np.square(location_distances) / (2.0 * (self.sigma_const + (reward_value * self.sigma))))
self.units['w'] += (reward_value * self.learning_rate) * \
np.expand_dims(neighbourhood_values, axis=-1) * (state - self.units['w'])
return
def GetBestUnit(self, state):
best_unit = np.argmin(np.sum((self.units['w'] - state) ** 2, axis=-1), axis=0)
return best_unit
| 1,562 | 26.421053 | 128 |
py
|
CTDL
|
CTDL-master/Gym/Agents/CTDL/Agent.py
|
import matplotlib.pyplot as plt
import numpy as np
import pickle
from Gym.Agents.CTDL.QGraph import QGraph
from Gym.Agents.CTDL.SOM import DeepSOM
from Gym.Agents.CTDL.QTargetGraph import QTargetGraph
class Agent(object):
def __init__(self, directory, env_params, agent_params):
self.bSOM = agent_params['bSOM']
self.directory = directory
self.input_dim = env_params['state_dim']
self.input_min = env_params['state_mins']
self.input_max = env_params['state_maxs']
self.num_actions = env_params['num_actions']
self.q_graph = QGraph(self.input_dim, self.num_actions, self.directory)
self.q_graph.SaveGraphAndVariables()
self.q_target_graph = QTargetGraph(self.directory)
if(self.bSOM):
self.CreateSOM(agent_params)
self.weighting_decay = agent_params['w_decay']
self.TD_decay = agent_params['TD_decay']
self.discount_factor = 0.99
self.epsilon = 0
self.final_epsilon = .9
self.num_epsilon_trials = agent_params['e_trials']
self.epsilon_increment = self.final_epsilon / self.num_epsilon_trials
self.batch_size = 32
self.c = 500
self.ci = 0
self.results = {'rewards': [], 'lengths': []}
self.trial_reward = 0
self.trial_length = 0
self.plot_num = 0
self.prev_state = None
self.prev_action = None
self.prev_Qvalue = None
self.bStart_learning = False
self.state_max = np.zeros(self.input_dim)
self.state_min = np.zeros(self.input_dim)
return
def CreateSOM(self, agent_params):
self.SOM = DeepSOM(self.directory, self.input_dim, agent_params['SOM_size'],
agent_params['SOM_alpha'], agent_params['SOM_sigma'],
agent_params['SOM_sigma_const'])
self.Q_alpha = agent_params['Q_alpha']
self.QValues = np.zeros((agent_params['SOM_size'] * agent_params['SOM_size'], self.num_actions))
return
def Update(self, reward, state, bTrial_over):
for i, s in enumerate(state):
if(s > self.state_max[i]):
self.state_max[i] = s
elif (s < self.state_min[i]):
self.state_min[i] = s
state = (state - self.state_min) / (self.state_max - self.state_min)
if (bTrial_over and self.epsilon < self.final_epsilon):
self.epsilon += self.epsilon_increment
self.RecordResults(bTrial_over, reward)
if(self.bStart_learning):
self.UpdateQGraph(reward, state, bTrial_over)
action = self.SelectAction(state)
if(not self.bStart_learning):
self.bStart_learning = True
return action
def RecordResults(self, bTrial_over, reward):
self.trial_reward += reward
self.trial_length += 1
if (bTrial_over):
self.results['rewards'].append(self.trial_reward)
self.trial_reward = 0
self.results['lengths'].append(self.trial_length)
self.trial_length = 0
return
def GetWeighting(self, best_unit, state):
diff = np.sum(np.square(self.SOM.SOM_layer.units['w'][best_unit, :] - state))
w = np.exp(-diff / self.weighting_decay)
return w
def GetQValues(self, state, q_graph_values):
best_unit = self.SOM.GetOutput(state)
som_action_values = self.QValues[best_unit, :]
w = self.GetWeighting(best_unit, state)
q_values = (w * som_action_values) + ((1 - w) * q_graph_values)
return q_values
def UpdateQGraph(self, reward, state, bTrial_over):
self.ci += 1
if (self.ci >= self.c):
print('Loading New target Graph')
self.ci = 0
self.q_graph.SaveGraphAndVariables()
self.q_target_graph = QTargetGraph(self.directory)
target = self.GetTargetValue(bTrial_over, reward, state)
self.q_graph.GradientDescentStep(np.expand_dims(self.prev_state, axis=0),
np.expand_dims(self.prev_action, axis=0),
np.expand_dims(target, axis=0))
if(self.bSOM):
self.UpdateSOM(target)
return
def UpdateSOM(self, target):
prev_best_unit = self.SOM.GetOutput(self.prev_state)
delta = np.exp(np.abs(target -
np.squeeze(self.q_graph.GetActionValues(
np.expand_dims(self.prev_state, axis=0)))[self.prev_action]) / self.TD_decay) - 1
delta = np.clip(delta, 0, 1)
self.SOM.Update(self.prev_state, prev_best_unit, delta)
prev_best_unit = self.SOM.GetOutput(self.prev_state)
w = self.GetWeighting(prev_best_unit, self.prev_state)
self.QValues[prev_best_unit, self.prev_action] += self.Q_alpha * w * (target - self.QValues[prev_best_unit, self.prev_action])
self.Replay()
return
def GetTargetValue(self, bTrial_over, reward, state):
q_graph_values = np.squeeze(np.array(self.q_target_graph.GetActionValues(np.expand_dims(state, axis=0))))
if(self.bSOM):
q_values = self.GetQValues(state, q_graph_values)
else:
q_values = q_graph_values
max_q_value = np.amax(q_values)
if (bTrial_over):
target = reward
else:
target = reward + (max_q_value * self.discount_factor)
return target
def Replay(self):
units = np.random.randint(0, self.SOM.SOM_layer.num_units, self.batch_size)
actions = np.random.randint(0, self.num_actions, self.batch_size)
self.q_graph.GradientDescentStep(self.SOM.SOM_layer.units['w'][units, :], actions, self.QValues[units, actions])
return
def SelectAction(self, state):
q_graph_values = np.squeeze(np.array(self.q_graph.GetActionValues(np.expand_dims(state, axis=0))))
if(self.bSOM):
q_values = self.GetQValues(state, q_graph_values)
else:
q_values = q_graph_values
if(np.random.rand() > self.epsilon):
action = np.random.randint(self.num_actions)
else:
action = np.argmax(q_values)
self.prev_Qvalue = q_values[action]
self.prev_action = action
self.prev_state = np.copy(state)
return action
def PlotResults(self):
plt.figure()
plt.plot(self.results['rewards'])
plt.savefig(self.directory + 'AgentTrialRewards.pdf')
plt.close()
with open(self.directory + 'Results.pkl', 'wb') as handle:
pickle.dump(self.results, handle, protocol=pickle.HIGHEST_PROTOCOL)
return
| 6,788 | 29.039823 | 134 |
py
|
CTDL
|
CTDL-master/Gym/Agents/CTDL/SOM.py
|
from Gym.Agents.CTDL.SOMLayer import SOMLayer
class DeepSOM(object):
def __init__(self, directory, input_dim, map_size, learning_rate, sigma, sigma_const):
self.directory = directory
self.SOM_layer = SOMLayer(input_dim, map_size, learning_rate, sigma, sigma_const)
return
def Update(self, state, best_unit, reward_value):
self.SOM_layer.Update(state, best_unit, reward_value)
return
def GetOutput(self, state):
best_unit = self.SOM_layer.GetBestUnit(state)
return best_unit
| 553 | 22.083333 | 90 |
py
|
CTDL
|
CTDL-master/Gym/Agents/CTDL/QTargetGraph.py
|
import tensorflow as tf
import numpy as np
class QTargetGraph(object):
def __init__(self, directory):
tf.reset_default_graph()
self.graph = tf.Graph()
with self.graph.as_default():
saver = tf.train.import_meta_graph(directory + ".meta")
self.logits = tf.get_collection('logits')
self.sess = tf.Session(graph=self.graph)
saver.restore(self.sess, directory)
def GetActionValues(self, X):
preds = self.sess.run(self.logits, feed_dict={'X:0': X})
return preds
| 563 | 15.588235 | 67 |
py
|
CTDL
|
CTDL-master/Gym/Agents/CTDL/QGraph.py
|
import os
import tensorflow as tf
import numpy as np
class QGraph(object):
def __init__(self, input_dim, num_actions, directory):
self.ti = 0
self.num_actions = num_actions
self.directory = directory
self.input_dim = input_dim
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '1'
tf.reset_default_graph()
self.graph = tf.Graph()
with self.graph.as_default():
""" Construction phase """
self.X = tf.placeholder(tf.float32, shape=(None, self.input_dim), name="X")
self.y = tf.placeholder(tf.float32, shape=(None), name="y")
self.actions = tf.placeholder(tf.float32, shape=[None, self.num_actions], name="actions")
# Layers
self.dense1 = tf.layers.dense(inputs=self.X, units=128, activation=tf.nn.relu)
self.dense2 = tf.layers.dense(inputs=self.dense1, units=128, activation=tf.nn.relu)
self.logits = tf.layers.dense(inputs=self.dense2, units=self.num_actions)
# Loss function
with tf.name_scope("loss"):
self.predictions = tf.reduce_sum(tf.multiply(self.logits, self.actions), 1)
self.targets = tf.stop_gradient(self.y)
self.error = self.targets - self.predictions
self.clipped_error = tf.clip_by_value(self.targets - self.predictions, -1., 1.)
self.loss = tf.reduce_mean(tf.multiply(self.error, self.clipped_error), axis=0, name='loss')
# Minimizer
self.learning_rate = 0.00025
self.momentum = 0.95
self.epsilon = 0.01
with tf.name_scope("train"):
self.optimizer = tf.train.RMSPropOptimizer(learning_rate=self.learning_rate, momentum=self.momentum, epsilon=self.epsilon)
self.training_op = self.optimizer.minimize(self.loss)
self.init = tf.global_variables_initializer()
self.saver = tf.train.Saver()
tf.add_to_collection('logits', self.logits)
self.sess = tf.Session(graph=self.graph)
self.sess.run(self.init)
return
def GetActionValues(self, X):
preds = self.logits.eval(feed_dict={self.X: X}, session=self.sess)
return preds
def GradientDescentStep(self, X_batch, action_batch, y_batch):
# One hot encoded action tensor
actions = np.zeros((X_batch.shape[0], self.num_actions))
for i in range(X_batch.shape[0]):
actions[i, action_batch[i]] = 1
self.sess.run(self.training_op,
feed_dict={self.X: X_batch, self.y: y_batch, self.actions: actions})
return
def SaveGraphAndVariables(self):
save_path = self.saver.save(self.sess, self.directory)
print('Model saved in ' + save_path)
return
def LoadGraphAndVariables(self):
self.saver.restore(self.sess, self.directory)
print('Model loaded from ' + self.directory)
return
| 3,005 | 33.159091 | 138 |
py
|
CTDL
|
CTDL-master/Gym/Agents/CTDL/SOMLayer.py
|
import numpy as np
class SOMLayer():
def __init__(self, input_dim, size, learning_rate, sigma, sigma_const):
self.size = size
self.num_units = size * size
self.num_weights = input_dim
self.learning_rate = learning_rate
self.sigma = sigma
self.sigma_const = sigma_const
self.units = {'xy': [], 'w': []}
self.ConstructMap()
return
def ConstructMap(self):
x = 0
y = 0
# Construct map
for u in range(self.num_units):
self.units['xy'].append([x, y])
self.units['w'].append(np.random.rand(self.num_weights))#np.random.randn(self.num_weights))
x += 1
if (x >= self.size):
x = 0
y += 1
self.units['xy'] = np.array(self.units['xy'])
self.units['w'] = np.array(self.units['w'])
return
def Update(self, state, best_unit, reward_value):
diffs = self.units['xy'] - self.units['xy'][best_unit, :]
location_distances = np.sqrt(np.sum(np.square(diffs), axis=-1))
neighbourhood_values = np.exp(-np.square(location_distances) / (2.0 * (self.sigma_const + (reward_value * self.sigma))))
self.units['w'] += (reward_value * self.learning_rate) * \
np.expand_dims(neighbourhood_values, axis=-1) * (state - self.units['w'])
return
def GetBestUnit(self, state):
best_unit = np.argmin(np.sum((self.units['w'] - state) ** 2, axis=-1), axis=0)
return best_unit
| 1,561 | 26.403509 | 128 |
py
|
CTDL
|
CTDL-master/Gym/Enums/Enums.py
|
from enum import Enum
class EnvType(Enum):
CartPole = 0
MountainCarContinuous = 1
class AgentType(Enum):
DQN = 0
CTDL = 1
A2C = 2
CTDL_A2C = 3
| 170 | 12.153846 | 29 |
py
|
CTDL
|
CTDL-master/Gym/Functions/RunTask.py
|
from Gym.Parameters import env_params, agent_params
from Gym.Functions.Run import Run
def RunTask():
for i in range(env_params['num_repeats']):
Run(env_params, agent_params)
return
| 200 | 19.1 | 51 |
py
|
CTDL
|
CTDL-master/Gym/Functions/Run.py
|
import os
import gym
from datetime import datetime
from Utilities import RecordSettings
from Gym.Enums.Enums import EnvType, AgentType
def Run(env_params, agent_params):
results_dir = CreateResultsDirectory()
# Setup envrionment
if (env_params['env'] == EnvType.CartPole):
env = gym.make('CartPole-v1')
elif (env_params['env'] == EnvType.MountainCarContinuous):
env = gym.make('MountainCarContinuous-v0')
env_params['env_obj'] = env
env_params['state_mins'] = env.observation_space.low
env_params['state_maxs'] = env.observation_space.high
env_params['state_dim'] = env.observation_space.shape[0]
if(isinstance(env.action_space, gym.spaces.Box)):
env_params['action_maxs'] = env.action_space.high
env_params['action_mins'] = env.action_space.low
else:
env_params['num_actions'] = env.action_space.n
# Setup agent
if(agent_params['agent_type'] == AgentType.CTDL):
from Gym.Agents.CTDL.Agent import Agent
elif(agent_params['agent_type'] == AgentType.DQN):
from Gym.Agents.DQN.Agent import Agent
elif (agent_params['agent_type'] == AgentType.A2C):
from Gym.Agents.A2C.Agent import Agent
elif (agent_params['agent_type'] == AgentType.CTDL_A2C):
from Gym.Agents.CTDL_A2C.Agent import Agent
agent = Agent(results_dir, env_params, agent_params)
# Record settings
RecordSettings(results_dir, env_params, agent_params)
# Run
RunEnv(agent, env, env_params)
return
def RunEnv(agent, env, env_params):
trial = 0
reward = 0
bTrial_over = False
state = env.reset()
ti = 0
print('Starting Trial ' + str(trial) + '...')
while trial < env_params['num_trials']:
if (ti % 50 == 0):
print('Time Step: ' + str(ti) + ' Agent Epsilon: ' + str(agent.epsilon))
ti += 1
action = agent.Update(reward, state, bTrial_over)
state, reward, bTrial_over, info = env.step(action)
if(ti % env_params['max_steps'] == 0):
bTrial_over = True
if (bTrial_over):
trial += 1
ti = 0
state = env.reset()
print('Starting Trial ' + str(trial) + '...')
env.close()
agent.PlotResults()
def CreateResultsDirectory():
date_time = str(datetime.now())
date_time = date_time.replace(" ", "_")
date_time = date_time.replace(".", "_")
date_time = date_time.replace("-", "_")
date_time = date_time.replace(":", "_")
# Make the results directory
dir_path = os.path.abspath(os.path.join(os.path.dirname(__file__)))
results_dir = dir_path + '/../Results/' + date_time + '/'
os.mkdir(results_dir)
return results_dir
| 2,734 | 27.489583 | 84 |
py
|
CTDL
|
CTDL-master/Gym/Functions/Plotters.py
|
import numpy as np
import matplotlib.pyplot as plt
def PlotComparisons(data_frames, labels):
fig, axes = plt.subplots(1, 2, figsize=(6, 3))
axes[0].set_xlabel('Episode')
axes[0].set_ylabel('Episode Reward')
axes[1].set_xlabel('Episode')
axes[1].set_ylabel('Cumulative Episode Reward')
axes[1].spines['top'].set_visible(False)
axes[1].spines['right'].set_visible(False)
axes[0].spines['top'].set_visible(False)
axes[0].spines['right'].set_visible(False)
axes[0].ticklabel_format(style='sci', axis='y', scilimits=(0, 0))
axes[1].ticklabel_format(style='sci', axis='y', scilimits=(0, 0))
colors = ['b', 'r', 'g', 'k', 'c', 'm']
for df, label, color in zip(data_frames, labels, colors):
reward_results = []
cum_reward_results = []
for rewards, lengths in zip(df['rewards'], df['lengths']):
reward_results.append(rewards)
cum_reward_results.append(np.cumsum(rewards))
y = np.mean(reward_results, axis=0)
x = np.arange(y.shape[0])
error = np.std(reward_results, axis=0)
axes[0].plot(x, y, color=color, label=label)
axes[0].fill_between(x, y-error, y+error, color=color, alpha=.25)
y = np.mean(cum_reward_results, axis=0)
x = np.arange(y.shape[0])
error = np.std(cum_reward_results, axis=0)
axes[1].plot(x, y, color=color, label=label)
axes[1].fill_between(x, y - error, y + error, color=color, alpha=.25)
for s in axes.ravel():
s.legend(loc='lower left')
fig.tight_layout()
fig.savefig('Plots/ComparisonPlot.pdf')
plt.close(fig)
return
| 1,654 | 29.648148 | 77 |
py
|
CTDL
|
CTDL-master/Gym/Functions/Parsers.py
|
import os
import pickle
import numpy as np
import pandas as pd
def ParseIntoDataframes(dir, to_compare):
folders = os.listdir('Results/' + dir)
data_frames = []
labels = []
sorted_folders = [[] for i in range(to_compare.__len__())]
for folder in folders:
if (folder == '.DS_Store' or folder == '.keep'):
pass
else:
files = os.listdir('Results/' + dir + '/' + folder)
if ('.DS_Store' in files):
files.remove('.DS_Store')
file = open('Results/' + dir + '/' + folder + '/Settings.txt', 'r')
settings = file.readlines()
file.close()
for setting in settings:
vals = setting.strip('\n').split(': ')
if (vals[0] == 'agent_type'):
try:
ind = np.where(np.array(to_compare) == vals[1].split('.')[1])[0][0]
sorted_folders[ind].append(folder)
except:
pass
for model, folders in zip(to_compare, sorted_folders):
data_frames.append(ParseDataFrame(folders, dir))
labels.append(model)
return data_frames, labels
def ParseDataFrame(folders, dir):
results_dict = {'dir': [], 'rewards': [], 'lengths': []}
for folder in folders:
if(folder == '.DS_Store'):
pass
else:
results_dict['dir'].append(folder)
with open('Results/' + dir + '/' + folder + '/Results.pkl', 'rb') as handle:
dict = pickle.load(handle)
results_dict['rewards'].append(dict['rewards'])
results_dict['lengths'].append(dict['lengths'])
file = open('Results/' + dir + '/' + folder + '/Settings.txt', 'r')
settings = file.readlines()
file.close()
for setting in settings:
vals = setting.split(': ')
if(vals[0] not in results_dict):
results_dict[vals[0]] = []
try:
results_dict[vals[0]].append(float(vals[1]))
except:
results_dict[vals[0]].append(vals[1])
df = pd.DataFrame.from_dict(results_dict)
return df
| 2,256 | 28.697368 | 91 |
py
|
CTDL
|
CTDL-master/GridWorld/Parameters.py
|
from GridWorld.Enums.Enums import MazeType, AgentType
maze_params = {'type': MazeType.random,
'width': 10,
'height': 10,
'num_rewards': 1,
'num_trials': 1000,
'random_seed': 0,
'max_steps': 1000,
'num_repeats': 30
}
agent_params = {'agent_type': AgentType.CTDL,
'bSOM': True,
'SOM_alpha': .01,
'SOM_sigma': .1,
'SOM_sigma_const': .1,
'Q_alpha': .9,
'w_decay': 10,
'TD_decay': 1,
'SOM_size': 6,
'e_trials': 200
}
| 694 | 27.958333 | 53 |
py
|
CTDL
|
CTDL-master/GridWorld/AnalyseMazeTypeSweep.py
|
from GridWorld.Functions.Parsers import ParseIntoDataframes
from GridWorld.Functions.Plotters import PlotComparisons, PlotMeanSOMLocations
dir = 'MazeTypeSweep'
to_compare = ['CTDL', 'DQN']
data_frames, labels = ParseIntoDataframes(dir, to_compare)
PlotComparisons('type', data_frames, labels)
PlotMeanSOMLocations('Results/' + dir + '/', data_frames[0])
| 360 | 26.769231 | 78 |
py
|
CTDL
|
CTDL-master/GridWorld/AnalyseRevaluationSweep.py
|
from GridWorld.Functions.Parsers import ParseIntoDataframes
from GridWorld.Functions.Plotters import PlotRevaluationComparisons, PlotMeanSOMLocations
dir = 'RevaluationSweep'
to_compare = ['CTDL', 'DQN']
data_frames, labels = ParseIntoDataframes(dir, to_compare)
PlotRevaluationComparisons(data_frames, labels)
PlotMeanSOMLocations('Results/' + dir + '/', data_frames[0])
| 377 | 28.076923 | 89 |
py
|
CTDL
|
CTDL-master/GridWorld/AnalyseRandomSeedSweep.py
|
from GridWorld.Functions.Parsers import ParseIntoDataframes
from GridWorld.Functions.Plotters import PlotComparisons, PlotPairwiseComparison
dir = 'RandomSeedSweep'
to_compare = ['CTDL', 'DQN']
data_frames, labels = ParseIntoDataframes(dir, to_compare)
PlotComparisons('random_seed', data_frames, labels)
PlotPairwiseComparison(data_frames[0], data_frames[1], labels)
| 371 | 32.818182 | 80 |
py
|
CTDL
|
CTDL-master/GridWorld/Agents/DQN/Minibatch.py
|
class MiniBatch(object):
def __init__(self):
self.prev_states = []
self.actions = []
self.rewards = []
self.states = []
self.bTrial_over = []
| 187 | 19.888889 | 29 |
py
|
CTDL
|
CTDL-master/GridWorld/Agents/DQN/Memory.py
|
import numpy as np
from GridWorld.Agents.DQN.Minibatch import MiniBatch
class Memory(object):
def __init__(self):
self.capacity = 100000
self.prev_states = []
self.states = []
self.actions = []
self.rewards = []
self.bTrial_over = []
return
def RecordExperience(self, prev_state, state, action, reward, bTrial_over):
self.prev_states.append(prev_state)
self.states.append(state)
self.rewards.append(reward)
self.bTrial_over.append(bTrial_over)
self.actions.append(action)
if(self.rewards.__len__() > self.capacity):
del self.prev_states[0]
del self.states[0]
del self.actions[0]
del self.rewards[0]
del self.bTrial_over[0]
return
def GetMinibatch(self, minibatch_size):
minibatch = MiniBatch()
experience_indices = np.random.randint(0, self.rewards.__len__(), minibatch_size)
prev_states = []
actions = []
rewards = []
states = []
bTrial_over = []
for i in experience_indices:
prev_states.append(self.prev_states[i])
actions.append(self.actions[i])
rewards.append(self.rewards[i])
states.append(self.states[i])
bTrial_over.append(self.bTrial_over[i])
minibatch.prev_states = np.squeeze(np.array(prev_states, dtype=int))
minibatch.actions = np.array(actions, dtype=int)
minibatch.rewards = np.array(rewards, dtype=float)
minibatch.states = np.squeeze(np.array(states, dtype=int))
minibatch.bTrial_over = bTrial_over
return minibatch
| 1,706 | 26.095238 | 89 |
py
|
CTDL
|
CTDL-master/GridWorld/Agents/DQN/Agent.py
|
import matplotlib.pyplot as plt
import numpy as np
import pickle
from GridWorld.Agents.DQN.Memory import Memory
from GridWorld.Agents.DQN.QTargetGraph import QTargetGraph
from GridWorld.Agents.DQN.QGraph import QGraph
class Agent(object):
def __init__(self, directory, maze_params, agent_params):
self.directory = directory
self.maze_width = maze_params['width']
self.maze_height = maze_params['height']
self.minibatch_size = 32
self.q_graph = QGraph(4, self.directory, self.maze_width)
self.q_graph.SaveGraphAndVariables()
self.q_target_graph = QTargetGraph(self.directory, self.maze_width)
self.memory = Memory()
self.discount_factor = 0.99
self.epsilon = 0
self.final_epsilon = .9
self.num_epsilon_trials = agent_params['e_trials']
self.epsilon_increment = self.final_epsilon / self.num_epsilon_trials
self.c = 10000
self.ci = 0
self.results = {'rewards': [], 'lengths': []}
self.trial_reward = 0
self.trial_length = 0
self.plot_num = 0
self.prev_state = None
self.prev_action = None
self.bStart_learning = False
return
def Update(self, reward, state, bTrial_over):
state = np.expand_dims(state, axis=0)
if (bTrial_over and self.epsilon < self.final_epsilon):
self.epsilon += self.epsilon_increment
self.RecordResults(bTrial_over, reward)
if(self.bStart_learning):
self.memory.RecordExperience(self.prev_state, state, self.prev_action, reward, bTrial_over)
self.UpdateQGraph()
action = self.SelectAction(state)
if (not self.bStart_learning):
self.bStart_learning = True
return action
def RecordResults(self, bTrial_over, reward):
self.trial_reward += reward
self.trial_length += 1
if (bTrial_over):
self.results['rewards'].append(self.trial_reward)
self.trial_reward = 0
self.results['lengths'].append(self.trial_length)
self.trial_length = 0
return
def NewMaze(self, directory):
self.directory = directory
self.q_graph.directory = directory
self.UpdateTargetGraph()
self.results = {'rewards': [], 'lengths': []}
self.trial_reward = 0
self.trial_length = 0
self.plot_num = 0
self.prev_state = None
self.prev_action = None
self.prev_Qvalue = None
self.bStart_learning = False
return
def UpdateQGraph(self):
self.ci += 1
if (self.ci >= self.c):
self.UpdateTargetGraph()
minibatch = self.memory.GetMinibatch(self.minibatch_size)
max_action_values = np.amax(np.squeeze(np.array(self.q_target_graph.GetActionValues(minibatch.states))), axis=1)
targets = np.zeros(minibatch.rewards.__len__())
for i in range(targets.shape[0]):
if(minibatch.bTrial_over[i]):
targets[i] = minibatch.rewards[i]
else:
targets[i] = minibatch.rewards[i] + (max_action_values[i] * self.discount_factor)
self.q_graph.GradientDescentStep(minibatch.prev_states, minibatch.actions, targets)
return
def UpdateTargetGraph(self):
print('Loading New target Graph')
self.ci = 0
self.q_graph.SaveGraphAndVariables()
self.q_target_graph = QTargetGraph(self.directory, self.maze_width)
return
def SelectAction(self, state):
if(np.random.rand() > self.epsilon):
action = np.random.randint(4)
else:
action = np.argmax(np.squeeze(np.array(self.q_graph.GetActionValues(state))))
self.prev_action = action
self.prev_state = np.copy(state)
return action
def PlotResults(self):
plt.figure()
plt.plot(self.results['rewards'])
found_goal = np.where(np.array(self.results['rewards']) > 0)
if (found_goal):
for loc in found_goal[0]:
plt.axvline(x=loc, color='g')
plt.savefig(self.directory + 'AgentTrialRewards.pdf')
plt.close()
with open(self.directory + 'Results.pkl', 'wb') as handle:
pickle.dump(self.results, handle, protocol=pickle.HIGHEST_PROTOCOL)
return
def PlotValueFunction(self):
up_value_function = np.zeros((self.maze_height, self.maze_width))
down_value_function = np.zeros((self.maze_height, self.maze_width))
left_value_function = np.zeros((self.maze_height, self.maze_width))
right_value_function = np.zeros((self.maze_height, self.maze_width))
for row in range(self.maze_height):
for col in range(self.maze_width):
action_values = np.squeeze(np.array(self.q_graph.GetActionValues(np.array([[row, col]]))))
up_value_function[row, col] = action_values[0]
down_value_function[row, col] = action_values[1]
left_value_function[row, col] = action_values[2]
right_value_function[row, col] = action_values[3]
fig, axes = plt.subplots(2, 2)
im = axes[0, 0].imshow(up_value_function, cmap='hot')
axes[0, 0].set_title('Up Value Function')
im = axes[0, 1].imshow(down_value_function, cmap='hot')
axes[0, 1].set_title('Down Value Function')
im = axes[1, 0].imshow(left_value_function, cmap='hot')
axes[1, 0].set_title('Left Value Function')
im = axes[1, 1].imshow(right_value_function, cmap='hot')
axes[1, 1].set_title('Right Value Function')
for axis in axes.ravel():
axis.set_xticklabels([])
axis.set_xticks([])
axis.set_yticklabels([])
axis.set_yticks([])
fig.colorbar(im, ax=axes.ravel().tolist())
plt.savefig(self.directory + 'ValueFunction%06d.pdf' % self.plot_num)
plt.close()
self.plot_num += 1
return
| 6,066 | 29.796954 | 120 |
py
|
CTDL
|
CTDL-master/GridWorld/Agents/DQN/QTargetGraph.py
|
import tensorflow as tf
import numpy as np
class QTargetGraph(object):
def __init__(self, directory, maze_size):
self.maze_size = maze_size
tf.reset_default_graph()
self.graph = tf.Graph()
with self.graph.as_default():
saver = tf.train.import_meta_graph(directory + ".meta")
self.logits = tf.get_collection('logits')
self.sess = tf.Session(graph=self.graph)
saver.restore(self.sess, directory)
def GetActionValues(self, X):
preds = self.sess.run(self.logits, feed_dict={'X:0': X / self.maze_size})
return preds
| 626 | 16.914286 | 81 |
py
|
CTDL
|
CTDL-master/GridWorld/Agents/DQN/QGraph.py
|
import os
import tensorflow as tf
import numpy as np
class QGraph(object):
def __init__(self, num_actions, directory, maze_size):
self.ti = 0
self.num_actions = num_actions
self.directory = directory
self.maze_size = maze_size
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '1'
tf.reset_default_graph()
self.graph = tf.Graph()
with self.graph.as_default():
""" Construction phase """
self.X = tf.placeholder(tf.float32, shape=(None, 2), name="X")
self.y = tf.placeholder(tf.float32, shape=(None), name="y")
self.actions = tf.placeholder(tf.float32, shape=[None, self.num_actions], name="actions")
# Layers
self.dense1 = tf.layers.dense(inputs=self.X, units=128, activation=tf.nn.relu)
self.dense2 = tf.layers.dense(inputs=self.dense1, units=128, activation=tf.nn.relu)
self.logits = tf.layers.dense(inputs=self.dense2, units=self.num_actions)
# Loss function
with tf.name_scope("loss"):
self.predictions = tf.reduce_sum(tf.multiply(self.logits, self.actions), 1)
self.targets = tf.stop_gradient(self.y)
self.error = self.targets - self.predictions
self.clipped_error = tf.clip_by_value(self.targets - self.predictions, -1., 1.)
self.loss = tf.reduce_mean(tf.multiply(self.error, self.clipped_error), axis=0, name='loss')
# Minimizer
self.learning_rate = 0.00025
self.momentum = 0.95
self.epsilon = 0.01
self.batch_size = 32
with tf.name_scope("train"):
self.optimizer = tf.train.RMSPropOptimizer(learning_rate=self.learning_rate, momentum=self.momentum, epsilon=self.epsilon)
self.training_op = self.optimizer.minimize(self.loss)
self.init = tf.global_variables_initializer()
self.saver = tf.train.Saver()
tf.add_to_collection('logits', self.logits)
self.sess = tf.Session(graph=self.graph)
self.sess.run(self.init)
return
def GetActionValues(self, X):
preds = self.logits.eval(feed_dict={self.X: X / self.maze_size}, session=self.sess)
return preds
def GradientDescentStep(self, X_batch, action_batch, y_batch):
# One hot encoded action tensor
actions = np.zeros((self.batch_size, self.num_actions))
for i in range(self.batch_size):
actions[i, action_batch[i]] = 1
self.sess.run(self.training_op,
feed_dict={self.X: X_batch / self.maze_size, self.y: y_batch, self.actions: actions})
return
def SaveGraphAndVariables(self):
save_path = self.saver.save(self.sess, self.directory)
print('Model saved in ' + save_path)
return
def LoadGraphAndVariables(self):
self.saver.restore(self.sess, self.directory)
print('Model loaded from ' + self.directory)
return
| 3,057 | 33.359551 | 138 |
py
|
CTDL
|
CTDL-master/GridWorld/Agents/CTDL/Agent.py
|
import matplotlib.pyplot as plt
import numpy as np
import pickle
from GridWorld.Agents.CTDL.QGraph import QGraph
from GridWorld.Agents.CTDL.SOM import SOM
from GridWorld.Agents.CTDL.QTargetGraph import QTargetGraph
class Agent(object):
def __init__(self, directory, maze_params, agent_params):
self.bSOM = agent_params['bSOM']
self.directory = directory
self.maze_width = maze_params['width']
self.maze_height = maze_params['height']
self.q_graph = QGraph(4, self.directory, self.maze_width)
self.q_graph.SaveGraphAndVariables()
self.q_target_graph = QTargetGraph(self.directory, self.maze_width)
if(self.bSOM):
self.CreateSOM(agent_params)
self.weighting_decay = agent_params['w_decay']
self.TD_decay = agent_params['TD_decay']
self.discount_factor = 0.99
self.epsilon = 0
self.final_epsilon = .9
self.num_epsilon_trials = agent_params['e_trials']
self.epsilon_increment = self.final_epsilon / self.num_epsilon_trials
self.c = 10000
self.ci = 0
self.results = {'rewards': [], 'lengths': []}
self.trial_reward = 0
self.trial_length = 0
self.plot_num = 0
self.prev_state = None
self.prev_action = None
self.prev_Qvalue = None
self.bStart_learning = False
return
def CreateSOM(self, agent_params):
self.SOM = SOM(self.directory, self.maze_width, self.maze_height, 2, agent_params['SOM_size'],
agent_params['SOM_alpha'], agent_params['SOM_sigma'],
agent_params['SOM_sigma_const'])
self.Q_alpha = agent_params['Q_alpha']
self.QValues = np.zeros((agent_params['SOM_size'] * agent_params['SOM_size'], 4))
return
def Update(self, reward, state, bTrial_over):
if (bTrial_over and self.epsilon < self.final_epsilon):
self.epsilon += self.epsilon_increment
self.RecordResults(bTrial_over, reward)
if(self.bStart_learning):
self.UpdateQGraph(reward, state, bTrial_over)
action = self.SelectAction(state)
if(not self.bStart_learning):
self.bStart_learning = True
return action
def RecordResults(self, bTrial_over, reward):
self.trial_reward += reward
self.trial_length += 1
if (bTrial_over):
self.results['rewards'].append(self.trial_reward)
self.trial_reward = 0
self.results['lengths'].append(self.trial_length)
self.trial_length = 0
return
def NewMaze(self, directory):
self.directory = directory
self.q_graph.directory = directory
self.SOM.directory = directory
self.UpdateTargetGraph()
self.results = {'rewards': [], 'lengths': []}
self.trial_reward = 0
self.trial_length = 0
self.plot_num = 0
self.prev_state = None
self.prev_action = None
self.prev_Qvalue = None
self.bStart_learning = False
self.SOM.location_counts = np.zeros((self.maze_height, self.maze_width))
return
def GetWeighting(self, best_unit, state):
diff = np.sum(np.square(self.SOM.SOM_layer.units['w'][best_unit, :] - state))
w = np.exp(-diff / self.weighting_decay)
return w
def GetQValues(self, state, q_graph_values):
best_unit = self.SOM.GetOutput(state)
som_action_values = self.QValues[best_unit, :]
w = self.GetWeighting(best_unit, state)
q_values = (w * som_action_values) + ((1 - w) * q_graph_values)
return q_values
def UpdateQGraph(self, reward, state, bTrial_over):
self.ci += 1
if (self.ci >= self.c):
self.UpdateTargetGraph()
target = self.GetTargetValue(bTrial_over, reward, state)
self.q_graph.GradientDescentStep(np.expand_dims(self.prev_state, axis=0),
np.expand_dims(self.prev_action, axis=0),
np.expand_dims(target, axis=0))
if(self.bSOM):
self.UpdateSOM(target)
return
def UpdateTargetGraph(self):
print('Loading New target Graph')
self.ci = 0
self.q_graph.SaveGraphAndVariables()
self.q_target_graph = QTargetGraph(self.directory, self.maze_width)
return
def UpdateSOM(self, target):
prev_best_unit = self.SOM.GetOutput(self.prev_state)
delta = np.exp(np.abs(target -
np.squeeze(self.q_graph.GetActionValues(
np.expand_dims(self.prev_state, axis=0)))[self.prev_action]) / self.TD_decay) - 1
delta = np.clip(delta, 0, 1)
self.SOM.Update(self.prev_state, prev_best_unit, delta)
prev_best_unit = self.SOM.GetOutput(self.prev_state)
w = self.GetWeighting(prev_best_unit, self.prev_state)
self.QValues[prev_best_unit, self.prev_action] += self.Q_alpha * w * (target - self.QValues[prev_best_unit, self.prev_action])
self.Replay()
self.SOM.RecordLocationCounts()
return
def GetTargetValue(self, bTrial_over, reward, state):
q_graph_values = np.squeeze(np.array(self.q_target_graph.GetActionValues(np.expand_dims(state, axis=0))))
if(self.bSOM):
q_values = self.GetQValues(state, q_graph_values)
else:
q_values = q_graph_values
max_q_value = np.amax(q_values)
if (bTrial_over):
target = reward
else:
target = reward + (max_q_value * self.discount_factor)
return target
def Replay(self):
units = np.random.randint(0, self.SOM.SOM_layer.num_units, 32)
actions = np.random.randint(0, 4, 32)
self.q_graph.GradientDescentStep(self.SOM.SOM_layer.units['w'][units, :], actions, self.QValues[units, actions])
return
def SelectAction(self, state):
q_graph_values = np.squeeze(np.array(self.q_graph.GetActionValues(np.expand_dims(state, axis=0))))
if(self.bSOM):
q_values = self.GetQValues(state, q_graph_values)
else:
q_values = q_graph_values
if(np.random.rand() > self.epsilon):
action = np.random.randint(4)
else:
action = np.argmax(q_values)
self.prev_Qvalue = q_values[action]
self.prev_action = action
self.prev_state = np.copy(state)
return action
def PlotResults(self):
plt.figure()
plt.plot(self.results['rewards'])
found_goal = np.where(np.array(self.results['rewards']) > 0)
if(found_goal):
for loc in found_goal[0]:
plt.axvline(x=loc, color='g')
plt.savefig(self.directory + 'AgentTrialRewards.pdf')
plt.close()
with open(self.directory + 'Results.pkl', 'wb') as handle:
pickle.dump(self.results, handle, protocol=pickle.HIGHEST_PROTOCOL)
if (self.bSOM):
np.save(self.directory + 'LocationCounts', self.SOM.location_counts)
return
def PlotValueFunction(self):
up_value_function = np.zeros((self.maze_height, self.maze_width))
down_value_function = np.zeros((self.maze_height, self.maze_width))
left_value_function = np.zeros((self.maze_height, self.maze_width))
right_value_function = np.zeros((self.maze_height, self.maze_width))
for row in range(self.maze_height):
for col in range(self.maze_width):
q_graph_values = np.squeeze(np.array(self.q_graph.GetActionValues(np.array([[row, col]]))))
if(self.bSOM):
vals = self.GetQValues([row, col], q_graph_values)
else:
vals = q_graph_values
up_value_function[row, col] = vals[0]
down_value_function[row, col] = vals[1]
left_value_function[row, col] = vals[2]
right_value_function[row, col] = vals[3]
fig, axes = plt.subplots(2, 2)
im = axes[0, 0].imshow(up_value_function, cmap='hot')
axes[0, 0].set_title('Up Value Function')
im = axes[0, 1].imshow(down_value_function, cmap='hot')
axes[0, 1].set_title('Down Value Function')
im = axes[1, 0].imshow(left_value_function, cmap='hot')
axes[1, 0].set_title('Left Value Function')
im = axes[1, 1].imshow(right_value_function, cmap='hot')
axes[1, 1].set_title('Right Value Function')
for axis in axes.ravel():
axis.set_xticklabels([])
axis.set_xticks([])
axis.set_yticklabels([])
axis.set_yticks([])
fig.colorbar(im, ax=axes.ravel().tolist())
plt.savefig(self.directory + 'ValueFunction%06d.pdf' % self.plot_num)
plt.close()
if(self.bSOM):
self.SOM.PlotResults(self.plot_num)
self.plot_num += 1
return
| 9,074 | 30.078767 | 134 |
py
|
CTDL
|
CTDL-master/GridWorld/Agents/CTDL/SOM.py
|
import numpy as np
import matplotlib.pyplot as plt
from GridWorld.Agents.CTDL.SOMLayer import SOMLayer
class SOM(object):
def __init__(self, directory, maze_width, maze_height, input_dim, map_size, learning_rate, sigma, sigma_const):
self.directory = directory
self.maze_width = maze_width
self.maze_height = maze_height
self.SOM_layer = SOMLayer(np.amax([maze_width, maze_height]), input_dim, map_size, learning_rate, sigma, sigma_const)
self.location_counts = np.zeros((maze_height, maze_width))
return
def Update(self, state, best_unit, reward_value):
self.SOM_layer.Update(state, best_unit, reward_value)
return
def GetOutput(self, state):
best_unit = self.SOM_layer.GetBestUnit(state)
return best_unit
def PlotResults(self, plot_num):
self.PlotMap(plot_num)
self.PlotLocations(plot_num)
return
def PlotMap(self, plot_num):
width = np.unique(self.SOM_layer.units['xy']).shape[0]
height = width
im_grid = np.zeros((width, height, 3))
for i in range(width * height):
image = np.zeros(3)
image[:2] = self.SOM_layer.units['w'][i, :]
image = np.clip(np.array(image) / np.amax([self.maze_width, self.maze_height]), 0, 1)
im_grid[self.SOM_layer.units['xy'][i, 0], self.SOM_layer.units['xy'][i, 1], :] = image
plt.figure()
plt.imshow(im_grid)
plt.savefig(self.directory + 'SOM%06d.pdf' % plot_num)
plt.close()
return
def PlotLocations(self, plot_num):
im_grid = np.zeros((self.maze_height, self.maze_width))
for i in range(self.SOM_layer.num_units):
y = int(np.rint(np.clip(self.SOM_layer.units['w'][i, 0], 0, self.maze_height-1)))
x = int(np.rint(np.clip(self.SOM_layer.units['w'][i, 1], 0, self.maze_width-1)))
im_grid[y, x] = 1
plt.figure()
plt.imshow(im_grid)
plt.savefig(self.directory + 'SOMLocations%06d.pdf' % plot_num)
plt.close()
np.save(self.directory + 'SOMLocations', im_grid)
return
def RecordLocationCounts(self):
for i in range(self.SOM_layer.num_units):
y = int(np.clip(self.SOM_layer.units['w'][i, 0], 0, self.maze_height-1))
x = int(np.clip(self.SOM_layer.units['w'][i, 1], 0, self.maze_width-1))
self.location_counts[y, x] += 1
return
| 2,484 | 28.939759 | 125 |
py
|
CTDL
|
CTDL-master/GridWorld/Agents/CTDL/QTargetGraph.py
|
import tensorflow as tf
class QTargetGraph(object):
def __init__(self, directory, maze_size):
self.maze_size = maze_size
tf.reset_default_graph()
self.graph = tf.Graph()
with self.graph.as_default():
saver = tf.train.import_meta_graph(directory + ".meta")
self.logits = tf.get_collection('logits')
self.sess = tf.Session(graph=self.graph)
saver.restore(self.sess, directory)
def GetActionValues(self, X):
preds = self.sess.run(self.logits, feed_dict={'X:0': X / self.maze_size})
return preds
| 607 | 16.882353 | 81 |
py
|
CTDL
|
CTDL-master/GridWorld/Agents/CTDL/QGraph.py
|
import os
import tensorflow as tf
import numpy as np
class QGraph(object):
def __init__(self, num_actions, directory, maze_size):
self.ti = 0
self.num_actions = num_actions
self.directory = directory
self.maze_size = maze_size
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '1'
tf.reset_default_graph()
self.graph = tf.Graph()
with self.graph.as_default():
""" Construction phase """
self.X = tf.placeholder(tf.float32, shape=(None, 2), name="X")
self.y = tf.placeholder(tf.float32, shape=(None), name="y")
self.actions = tf.placeholder(tf.float32, shape=[None, self.num_actions], name="actions")
# Layers
self.dense1 = tf.layers.dense(inputs=self.X, units=128, activation=tf.nn.relu)
self.dense2 = tf.layers.dense(inputs=self.dense1, units=128, activation=tf.nn.relu)
self.logits = tf.layers.dense(inputs=self.dense2, units=self.num_actions)
# Loss function
with tf.name_scope("loss"):
self.predictions = tf.reduce_sum(tf.multiply(self.logits, self.actions), 1)
self.targets = tf.stop_gradient(self.y)
self.error = self.targets - self.predictions
self.clipped_error = tf.clip_by_value(self.targets - self.predictions, -1., 1.)
self.loss = tf.reduce_mean(tf.multiply(self.error, self.clipped_error), axis=0, name='loss')
# Minimizer
self.learning_rate = 0.00025
self.momentum = 0.95
self.epsilon = 0.01
with tf.name_scope("train"):
self.optimizer = tf.train.RMSPropOptimizer(learning_rate=self.learning_rate, momentum=self.momentum, epsilon=self.epsilon)
self.training_op = self.optimizer.minimize(self.loss)
self.init = tf.global_variables_initializer()
self.saver = tf.train.Saver()
tf.add_to_collection('logits', self.logits)
self.sess = tf.Session(graph=self.graph)
self.sess.run(self.init)
return
def GetActionValues(self, X):
preds = self.logits.eval(feed_dict={self.X: X / self.maze_size}, session=self.sess)
return preds
def GradientDescentStep(self, X_batch, action_batch, y_batch):
actions = np.zeros((X_batch.shape[0], self.num_actions))
for i in range(X_batch.shape[0]):
actions[i, action_batch[i]] = 1
self.sess.run(self.training_op,
feed_dict={self.X: X_batch / self.maze_size, self.y: y_batch, self.actions: actions})
return
def SaveGraphAndVariables(self):
save_path = self.saver.save(self.sess, self.directory)
print('Model saved in ' + save_path)
return
def LoadGraphAndVariables(self):
self.saver.restore(self.sess, self.directory)
print('Model loaded from ' + self.directory)
return
| 2,986 | 33.333333 | 138 |
py
|
CTDL
|
CTDL-master/GridWorld/Agents/CTDL/SOMLayer.py
|
import numpy as np
class SOMLayer():
def __init__(self, maze_dim, input_dim, size, learning_rate, sigma, sigma_const):
self.size = size
self.num_units = size * size
self.num_dims = input_dim
self.num_weights = input_dim
self.learning_rate = learning_rate
self.sigma = sigma
self.sigma_const = sigma_const
self.units = {'xy': [], 'w': []}
self.ConstructMap(maze_dim)
return
def ConstructMap(self, maze_dim):
x = 0
y = 0
# Construct map
for u in range(self.num_units):
self.units['xy'].append([x, y])
self.units['w'].append(np.random.rand(self.num_weights) * maze_dim)
x += 1
if (x >= self.size):
x = 0
y += 1
self.units['xy'] = np.array(self.units['xy'])
self.units['w'] = np.array(self.units['w'])
return
def Update(self, state, best_unit, reward_value):
diffs = self.units['xy'] - self.units['xy'][best_unit, :]
location_distances = np.sqrt(np.sum(np.square(diffs), axis=-1))
neighbourhood_values = np.exp(-np.square(location_distances) / (
2.0 * (self.sigma_const + (reward_value * self.sigma))))
self.units['w'] += (reward_value * self.learning_rate) * \
np.expand_dims(neighbourhood_values, axis=-1) * (state - self.units['w'])
return
def GetBestUnit(self, state):
best_unit = np.argmin(np.sum((self.units['w'] - state) ** 2, axis=-1), axis=0)
return best_unit
| 1,615 | 26.862069 | 100 |
py
|
CTDL
|
CTDL-master/GridWorld/Enums/Enums.py
|
from enum import Enum
class MazeType(Enum):
random = 1
direct = 2
obstacle1 = 3
obstacle2 = 4
class AgentType(Enum):
CTDL = 1
DQN = 2
| 160 | 12.416667 | 22 |
py
|
CTDL
|
CTDL-master/GridWorld/Classes/Maze.py
|
import numpy as np
import matplotlib.pyplot as plt
from GridWorld.Enums.Enums import MazeType
class Maze(object):
def __init__(self, directory, maze_params):
np.random.seed(maze_params['random_seed'])
self.type = maze_params['type']
self.width = maze_params['width']
self.height = maze_params['height']
self.num_hazards = maze_params['num_hazards']
self.num_rewards = maze_params['num_rewards']
self.max_steps = maze_params['max_steps']
self.directory = directory
self.ConstructMaze()
self.Reset()
self.step = 0
return
def ConstructMaze(self):
self.maze = np.zeros((self.height * self.width))
if(self.type == MazeType.random):
self.ConstructRandomMaze()
elif(self.type == MazeType.direct):
self.ConstructDirectMaze()
elif (self.type == MazeType.obstacle1):
self.ConstructFirstObstacleMaze()
elif (self.type == MazeType.obstacle2):
self.ConstructSecondObstacleMaze()
plt.figure()
plt.imshow(self.maze)
plt.savefig(self.directory + 'Maze.pdf')
plt.close()
np.save(self.directory + 'Maze', self.maze)
self.start = np.squeeze(np.array(np.where(self.maze == 2)))
self.maze[self.start[0], self.start[1]] = 0
return
def ConstructRandomMaze(self):
inds = np.random.choice(np.arange(self.height * self.width), self.num_hazards + self.num_rewards + 1,
replace=False)
self.maze[inds[:self.num_hazards]] = -1
self.maze[inds[self.num_hazards:self.num_hazards + self.num_rewards]] = 1
self.maze[inds[-1]] = 2
self.maze = self.maze.reshape((self.height, self.width))
return
def ConstructDirectMaze(self):
self.maze = self.maze.reshape((self.height, self.width))
self.maze[0, int(self.width / 2)] = 1
self.maze[-1, int(self.width / 2)] = 2
self.maze[:, :int(self.width / 2) - 2] = -1
self.maze[:, int(self.width / 2) + 3:] = -1
return
def ConstructFirstObstacleMaze(self):
self.ConstructDirectMaze()
self.maze[int(self.height / 2) - 1, int(self.width / 2) - 1: int(self.width / 2) + 2] = -1
return
def ConstructSecondObstacleMaze(self):
self.ConstructDirectMaze()
self.maze[int(self.height / 3), int(self.width / 2) - 2:int(self.width / 2) + 1] = -1
self.maze[int(self.height / 3) * 2, int(self.width / 2):int(self.width / 2) + 3] = -1
return
def GetMaze(self):
maze = np.copy(self.maze)
maze[self.start[0], self.start[1]] = 2
return maze
def Reset(self):
self.working_maze = np.copy(self.maze)
self.state = np.copy(self.start)
return
def Update(self, action):
self.step += 1
bTrial_over = False
self.reward = 0
self.UpdateState(action)
if(self.reward > 0 or self.step >= self.max_steps):
bTrial_over = True
self.step = 0
self.Reset()
return self.reward, self.state, bTrial_over
def UpdateState(self, action):
if (action == 0):
if (self.state[0] > 0):
self.state[0] -= 1
elif (action == 1):
if (self.state[0] < self.height - 1):
self.state[0] += 1
elif (action == 2):
if (self.state[1] > 0):
self.state[1] -= 1
elif (action == 3):
if (self.state[1] < self.width - 1):
self.state[1] += 1
self.reward = self.working_maze[self.state[0], self.state[1]]
if (self.reward > 0):
self.working_maze[self.state[0], self.state[1]] = 0
return
| 3,846 | 25.902098 | 109 |
py
|
CTDL
|
CTDL-master/GridWorld/Functions/MazeTypeSweep.py
|
from GridWorld.Parameters import maze_params, agent_params
from GridWorld.Functions.Run import Run
from GridWorld.Classes.Maze import MazeType
def RunMazeTypeSweep():
maze_types = [MazeType.direct, MazeType.obstacle1, MazeType.obstacle2]
for i in range(maze_params['num_repeats']):
for maze_type in maze_types:
maze_params['type'] = maze_type
Run(maze_params, agent_params)
return
| 429 | 27.666667 | 74 |
py
|
CTDL
|
CTDL-master/GridWorld/Functions/RevaluationSweep.py
|
from GridWorld.Parameters import maze_params, agent_params
from GridWorld.Functions.Run import RunSequentially, AgentType
from GridWorld.Classes.Maze import MazeType
def RunRevaluationSweep():
maze_types = [MazeType.direct, MazeType.obstacle1]
for i in range(maze_params['num_repeats']):
RunSequentially(maze_params, agent_params, maze_types)
return
| 374 | 27.846154 | 62 |
py
|
CTDL
|
CTDL-master/GridWorld/Functions/Run.py
|
import os
from datetime import datetime
from Utilities import RecordSettings
from GridWorld.Classes.Maze import Maze
from GridWorld.Enums.Enums import AgentType
def Run(maze_params, agent_params):
results_dir = CreateResultsDirectory()
maze_params['num_hazards'] = int((maze_params['width'] * maze_params['height']) / 5)
RecordSettings(results_dir, maze_params, agent_params)
if(agent_params['agent_type'] == AgentType.CTDL):
from GridWorld.Agents.CTDL.Agent import Agent
elif(agent_params['agent_type'] == AgentType.DQN):
from GridWorld.Agents.DQN.Agent import Agent
agent = Agent(results_dir, maze_params, agent_params)
maze = Maze(results_dir, maze_params)
RunMaze(agent, maze, maze_params)
return
def RunSequentially(maze_params, agent_params, mazes):
maze_params['num_hazards'] = int((maze_params['width'] * maze_params['height']) / 5)
agent_params['e_trials'] = 200#int(maze_params['num_trials'] / 5)
if (agent_params['agent_type'] == AgentType.CTDL):
from GridWorld.Agents.CTDL.Agent import Agent
elif (agent_params['agent_type'] == AgentType.DQN):
from GridWorld.Agents.DQN.Agent import Agent
for i, m in enumerate(mazes):
maze_params['type'] = m
results_dir = CreateResultsDirectory()
if(i == 0):
agent = Agent(results_dir, maze_params, agent_params)
else:
agent.NewMaze(results_dir)
RecordSettings(results_dir, maze_params, agent_params)
maze = Maze(results_dir, maze_params)
RunMaze(agent, maze, maze_params)
return
def RunMaze(agent, maze, maze_params):
trial = 0
reward = 0
state = maze.start
bTrial_over = False
ti = 0
print('Starting Trial ' + str(trial) + '...')
while trial < maze_params['num_trials']:
if (ti % 50 == 0):
print('Time Step: ' + str(ti) + ' Agent Epsilon: ' + str(agent.epsilon))
ti += 1
action = agent.Update(reward, state, bTrial_over)
reward, state, bTrial_over = maze.Update(action)
if (bTrial_over):
trial += 1
ti = 0
print('Starting Trial ' + str(trial) + '...')
if (trial % 10 == 0):
agent.PlotValueFunction()
agent.PlotResults()
return
def CreateResultsDirectory():
date_time = str(datetime.now())
date_time = date_time.replace(" ", "_")
date_time = date_time.replace(".", "_")
date_time = date_time.replace("-", "_")
date_time = date_time.replace(":", "_")
# Make the results directory
dir_path = os.path.abspath(os.path.join(os.path.dirname(__file__), '..'))
results_dir = dir_path + '/Results/' + date_time + '/'
os.mkdir(results_dir)
return results_dir
| 2,789 | 28.0625 | 88 |
py
|
CTDL
|
CTDL-master/GridWorld/Functions/Plotters.py
|
import numpy as np
import matplotlib.pyplot as plt
def PlotComparisons(var, data_frames, labels):
vals = np.array([])
for df in data_frames:
vals = np.concatenate([vals, df[var].values])
vals = np.unique(vals)
num_plots = vals.shape[0]
figs = []
axes = []
for i in range(num_plots):
f, a = plt.subplots(4, figsize=(3, 6))
a[0].axis('off')
a[3].set_xlabel('Episode')
a[1].set_ylabel('Episode Length')
a[2].set_ylabel('Reward')
a[3].set_ylabel('Ideal Episodes')
a[1].set_xticks([])
a[2].set_xticks([])
a[3].spines['top'].set_visible(False)
a[3].spines['right'].set_visible(False)
a[1].spines['top'].set_visible(False)
a[1].spines['right'].set_visible(False)
a[1].spines['bottom'].set_visible(False)
a[2].spines['top'].set_visible(False)
a[2].spines['right'].set_visible(False)
a[2].spines['bottom'].set_visible(False)
a[1].ticklabel_format(style='sci', axis='y', scilimits=(0, 0))
a[2].ticklabel_format(style='sci', axis='y', scilimits=(0, 0))
figs.append(f)
axes.append(a)
colors = ['b', 'r', 'g', 'k', 'c', 'm']
for df, label, color in zip(data_frames, labels, colors):
length_results = [[] for i in range(num_plots)]
reward_results = [[] for i in range(num_plots)]
ideal_results = [[] for i in range(num_plots)]
for v, rewards, lengths, maze in zip(df[var], df['rewards'], df['lengths'], df['maze']):
p = np.where(vals == v)[0][0]
axes[p][0].set_title(var + ': ' + str(v))
axes[p][0].imshow(maze)
length_results[p].append(np.cumsum(lengths))
reward_results[p].append(np.cumsum(rewards))
ideal_results[p].append(np.cumsum(np.array(rewards) == 1))
for p in range(num_plots):
if(length_results[p]):
y = np.mean(length_results[p], axis=0)
x = np.arange(y.shape[0])
error = np.std(length_results[p], axis=0)
axes[p][1].plot(x, y, color=color)
axes[p][1].fill_between(x, y-error, y+error, color=color, alpha=.25)
y = np.mean(reward_results[p], axis=0)
x = np.arange(y.shape[0])
error = np.std(reward_results[p], axis=0)
axes[p][2].plot(x, y, color=color)
axes[p][2].fill_between(x, y - error, y + error, color=color, alpha=.25)
y = np.mean(ideal_results[p], axis=0)
x = np.arange(y.shape[0])
error = np.std(ideal_results[p], axis=0)
axes[p][3].plot(x, y, label=label, color=color)
axes[p][3].fill_between(x, y - error, y + error, color=color, alpha=.25)
for a in axes:
for s in a.ravel():
s.legend()
for i, f in enumerate(figs):
f.tight_layout()
f.savefig('Plots/ComparisonPlot' + str(i) + '.pdf')
plt.close(f)
return
def PlotPairwiseComparison(df1, df2, labels):
vals = np.array([])
vals = np.concatenate([vals, df1['random_seed'].values])
vals = np.concatenate([vals, df2['random_seed'].values])
vals = np.unique(vals)
num_points = vals.shape[0]
reward_results = [[] for i in range(num_points)]
ideal_results = [[] for i in range(num_points)]
for seed, rewards, lengths in zip(df1['random_seed'], df1['rewards'], df1['lengths']):
p = np.where(vals == seed)[0][0]
reward_results[p].append(np.sum(rewards))
ideal_results[p].append(np.sum(np.array(rewards) == 1))
ys = np.zeros((2, num_points))
for p in range(num_points):
ys[0, p] = np.mean(reward_results[p])
ys[1, p] = np.mean(ideal_results[p])
reward_results = [[] for i in range(num_points)]
ideal_results = [[] for i in range(num_points)]
for seed, rewards, lengths in zip(df2['random_seed'], df2['rewards'], df2['lengths']):
p = np.where(vals == seed)[0][0]
reward_results[p].append(np.sum(rewards))
ideal_results[p].append(np.sum(np.array(rewards) == 1))
xs = np.zeros((2, num_points))
for p in range(num_points):
xs[0, p] = np.mean(reward_results[p])
xs[1, p] = np.mean(ideal_results[p])
colors = ['r', 'b']
fig, axes = plt.subplots(1, 2, figsize=(6, 3))
axes[0].ticklabel_format(style='sci', axis='both', scilimits=(0, 0))
axes[0].scatter(xs[0, :], ys[0, :], color=[colors[i] for i in ys[0, :] > xs[0, :]])
axes[1].scatter(xs[1, :], ys[1, :], color=[colors[i] for i in ys[1, :] > xs[1, :]])
min_val = np.min(np.concatenate([xs[0, :], ys[0, :]]))
max_val = np.max(np.concatenate([xs[0, :], ys[0, :]]))
axes[0].plot([min_val, max_val], [min_val, max_val], 'k-')
axes[0].axis('equal')
axes[0].set_aspect('equal', 'box')
min_val = np.min(np.concatenate([xs[1, :], ys[1, :]]))
max_val = np.max(np.concatenate([xs[1, :], ys[1, :]]))
axes[1].plot([min_val, max_val], [min_val, max_val], 'k-')
axes[1].axis('equal')
axes[1].set_aspect('equal', 'box')
axes[0].set_ylabel(labels[0])
axes[0].set_xlabel(labels[1])
axes[1].set_xlabel(labels[1])
axes[0].set_title('Reward')
axes[1].set_title('Ideal Episodes')
axes[0].spines['top'].set_visible(False)
axes[0].spines['right'].set_visible(False)
axes[1].spines['top'].set_visible(False)
axes[1].spines['right'].set_visible(False)
fig.tight_layout()
plt.savefig('Plots/PairwiseComparisonPlot.pdf')
plt.close()
fig, axes = plt.subplots(1, 2, figsize=(4, 2))
axes[0].pie([np.sum(ys[0, :] > xs[0, :]), np.sum(ys[0, :] < xs[0, :])], colors=reversed(colors))
axes[1].pie([np.sum(ys[1, :] > xs[1, :]), np.sum(ys[1, :] < xs[1, :])], colors=reversed(colors))
fig.tight_layout()
plt.savefig('Plots/PairwisePieChart.pdf')
plt.close()
return
def PlotMeanSOMLocations(root_dir, df):
vals = df['type'].values
vals = np.unique(vals)
num_plots = vals.shape[0]
mazes = [[] for i in range(num_plots)]
for type, directory in zip(df['type'], df['dir']):
som_locations = np.load(root_dir + directory + '/SOMLocations.npy')
p = np.where(vals == type)[0][0]
mazes[p].append(som_locations)
for i in range(num_plots):
plt.figure()
plt.imshow(np.mean(mazes[i], axis=0))#, cmap='plasma')
plt.axis('off')
plt.tight_layout()
plt.savefig('Plots/MeanSOMLocations' + str(i) + '.pdf')
plt.close()
return
def PlotRevaluationComparisons(data_frames, labels):
start = 0
end = 1000
var = 'type'
vals = np.array([])
for df in data_frames:
vals = np.concatenate([vals, df[var].values])
vals = np.unique(vals)
num_mazes = vals.shape[0]
f, a = plt.subplots(3, figsize=(6, 6))
a[2].set_xlabel('Episode')
a[0].set_ylabel('Episode Length')
a[1].set_ylabel('Reward')
a[2].set_ylabel('Ideal Episodes')
a[0].set_xticks([])
a[1].set_xticks([])
a[2].spines['top'].set_visible(False)
a[2].spines['right'].set_visible(False)
a[0].spines['top'].set_visible(False)
a[0].spines['right'].set_visible(False)
a[0].spines['bottom'].set_visible(False)
a[1].spines['top'].set_visible(False)
a[1].spines['right'].set_visible(False)
a[1].spines['bottom'].set_visible(False)
a[0].ticklabel_format(style='sci', axis='y', scilimits=(0, 0))
a[1].ticklabel_format(style='sci', axis='y', scilimits=(0, 0))
colors = ['b', 'r', 'g', 'k', 'c', 'm']
for df, label, color in zip(data_frames, labels, colors):
length_results = [[] for i in range(num_mazes)]
reward_results = [[] for i in range(num_mazes)]
ideal_results = [[] for i in range(num_mazes)]
for v, rewards, lengths, maze in zip(df[var], df['rewards'], df['lengths'], df['maze']):
p = np.where(vals == v)[0][0]
length_results[p].append(np.cumsum(lengths))
reward_results[p].append(np.cumsum(rewards))
ideal_results[p].append(np.cumsum(np.array(rewards) == 1))
for p in range(num_mazes):
num_trials = df['num_trials'][0]
if(p != 0):
y = np.array(length_results[p]) + np.expand_dims(np.array(length_results[p - 1])[:, -1], axis=-1)
else:
y = length_results[p]
error = np.std(y, axis=0)
y = np.mean(y, axis=0)
x = np.arange(y.shape[0]) + (p * num_trials)
a[0].plot(x, y, color=color)
a[0].fill_between(x, y - error, y + error, color=color, alpha=.25)
if (p != 0):
y = np.array(reward_results[p]) + np.expand_dims(np.array(reward_results[p - 1])[:, -1], axis=-1)
else:
y = reward_results[p]
error = np.std(y, axis=0)
y = np.mean(y, axis=0)
x = np.arange(y.shape[0]) + (p * num_trials)
a[1].plot(x, y, color=color)
a[1].fill_between(x, y - error, y + error, color=color, alpha=.25)
if (p != 0):
y = np.array(ideal_results[p]) + np.expand_dims(np.array(ideal_results[p-1])[:, -1], axis=-1)
else:
y = ideal_results[p]
error = np.std(y, axis=0)
y = np.mean(y, axis=0)
x = np.arange(y.shape[0]) + (p * num_trials)
if(p==0):
a[2].plot(x, y, label=label, color=color)
else:
a[2].plot(x, y, color=color)
a[0].axvline(p * num_trials, color='k', linestyle='--', linewidth=2)
a[1].axvline(p * num_trials, color='k', linestyle='--', linewidth=2)
a[2].axvline(p * num_trials, color='k', linestyle='--', linewidth=2)
a[2].fill_between(x, y - error, y + error, color=color, alpha=.25)
for axis in a:
axis.set_xlim([start, end])
for s in a.ravel():
s.legend()
f.tight_layout()
f.savefig('Plots/RevaluationComparisonPlot.pdf')
plt.close(f)
return
| 10,187 | 31.037736 | 113 |
py
|
CTDL
|
CTDL-master/GridWorld/Functions/RandomSeedSweep.py
|
import numpy as np
from GridWorld.Parameters import maze_params, agent_params
from GridWorld.Functions.Run import Run
def RunRandomSeedSweep():
random_seeds = np.arange(0, 50).tolist()
for i in range(maze_params['num_repeats']):
for random_seed in random_seeds:
maze_params['random_seed'] = random_seed
Run(maze_params, agent_params)
return
| 390 | 23.4375 | 58 |
py
|
CTDL
|
CTDL-master/GridWorld/Functions/Parsers.py
|
import os
import pickle
import numpy as np
import pandas as pd
def ParseIntoDataframes(dir, to_compare):
folders = os.listdir('Results/' + dir)
data_frames = []
labels = []
sorted_folders = [[] for i in range(to_compare.__len__())]
for folder in folders:
if (folder == '.DS_Store' or folder == '.keep'):
pass
else:
files = os.listdir('Results/' + dir + '/' + folder)
if ('.DS_Store' in files):
files.remove('.DS_Store')
file = open('Results/' + dir + '/' + folder + '/Settings.txt', 'r')
settings = file.readlines()
file.close()
for setting in settings:
vals = setting.strip('\n').split(': ')
if (vals[0] == 'agent_type'):
try:
ind = np.where(np.array(to_compare) == vals[1].split('.')[1])[0][0]
sorted_folders[ind].append(folder)
except:
pass
for model, folders in zip(to_compare, sorted_folders):
data_frames.append(ParseDataFrame(folders, dir))
labels.append(model)
return data_frames, labels
def ParseDataFrame(folders, dir):
results_dict = {'dir': [], 'rewards': [], 'lengths': [], 'maze': []}
for folder in folders:
try:
with open('Results/' + dir + '/' + folder + '/Results.pkl', 'rb') as handle:
dict = pickle.load(handle)
results_dict['dir'].append(folder)
results_dict['rewards'].append(dict['rewards'])
results_dict['lengths'].append(dict['lengths'])
file = open('Results/' + dir + '/' + folder + '/Settings.txt', 'r')
settings = file.readlines()
file.close()
for setting in settings:
vals = setting.split(': ')
if (vals[0] not in results_dict):
results_dict[vals[0]] = []
try:
results_dict[vals[0]].append(float(vals[1]))
except:
results_dict[vals[0]].append(vals[1])
results_dict['maze'].append(np.load('Results/' + dir + '/' + folder + '/Maze.npy'))
except:
pass
df = pd.DataFrame.from_dict(results_dict)
return df
| 2,346 | 29.089744 | 95 |
py
|
doc2vec
|
doc2vec-master/infer_test.py
|
#python example to infer document vectors from trained doc2vec model
import gensim.models as g
import codecs
#parameters
model="toy_data/model.bin"
test_docs="toy_data/test_docs.txt"
output_file="toy_data/test_vectors.txt"
#inference hyper-parameters
start_alpha=0.01
infer_epoch=1000
#load model
m = g.Doc2Vec.load(model)
test_docs = [ x.strip().split() for x in codecs.open(test_docs, "r", "utf-8").readlines() ]
#infer test vectors
output = open(output_file, "w")
for d in test_docs:
output.write( " ".join([str(x) for x in m.infer_vector(d, alpha=start_alpha, steps=infer_epoch)]) + "\n" )
output.flush()
output.close()
| 632 | 25.375 | 110 |
py
|
doc2vec
|
doc2vec-master/train_model.py
|
#python example to train doc2vec model (with or without pre-trained word embeddings)
import gensim.models as g
import logging
#doc2vec parameters
vector_size = 300
window_size = 15
min_count = 1
sampling_threshold = 1e-5
negative_size = 5
train_epoch = 100
dm = 0 #0 = dbow; 1 = dmpv
worker_count = 1 #number of parallel processes
#pretrained word embeddings
pretrained_emb = "toy_data/pretrained_word_embeddings.txt" #None if use without pretrained embeddings
#input corpus
train_corpus = "toy_data/train_docs.txt"
#output model
saved_path = "toy_data/model.bin"
#enable logging
logging.basicConfig(format='%(asctime)s : %(levelname)s : %(message)s', level=logging.INFO)
#train doc2vec model
docs = g.doc2vec.TaggedLineDocument(train_corpus)
model = g.Doc2Vec(docs, size=vector_size, window=window_size, min_count=min_count, sample=sampling_threshold, workers=worker_count, hs=0, dm=dm, negative=negative_size, dbow_words=1, dm_concat=1, pretrained_emb=pretrained_emb, iter=train_epoch)
#save model
model.save(saved_path)
| 1,031 | 29.352941 | 244 |
py
|
rancher
|
rancher-master/tests/integration/setup.py
|
from distutils.core import setup
setup(
name='IntegrationTests',
version='0.1',
packages=[
'suite',
],
license='ASL 2.0',
long_description=open('README.txt').read(),
)
| 201 | 15.833333 | 47 |
py
|
rancher
|
rancher-master/tests/integration/suite/test_node.py
|
import os
import tempfile
import pytest
from rancher import ApiError
from kubernetes.client import CoreV1Api
from .common import auth_check, random_str, string_to_encoding
from .conftest import wait_for, wait_for_condition
import time
def test_node_fields(admin_mc):
cclient = admin_mc.client
fields = {
'annotations': 'cru',
'appliedNodeVersion': 'r',
'labels': 'cru',
'nodeTaints': 'r',
'namespaceId': 'cr',
'conditions': 'r',
'allocatable': 'r',
'capacity': 'r',
'hostname': 'r',
'info': 'r',
'ipAddress': 'r',
'externalIpAddress': 'r',
'limits': 'r',
'publicEndpoints': 'r',
'nodePoolId': 'r',
'nodePlan': 'r',
'nodeName': 'r',
'requested': 'r',
'clusterId': 'cr',
'etcd': 'cr',
'controlPlane': 'cr',
'worker': 'cr',
'requestedHostname': 'cr',
'volumesAttached': 'r',
'nodeTemplateId': 'cr',
'volumesInUse': 'r',
'podCidr': 'r',
'podCidrs': 'r',
'name': 'cru',
'taints': 'ru',
'unschedulable': 'r',
'providerId': 'r',
'sshUser': 'r',
'imported': 'cru',
'dockerInfo': 'r',
'scaledownTime': 'cru'
}
for name in cclient.schema.types['node'].resourceFields.keys():
if name.endswith("Config"):
fields[name] = 'cr'
fields['customConfig'] = 'cru'
auth_check(cclient.schema, 'node', 'crud', fields)
def test_node_template_delete(admin_mc, remove_resource):
"""Test deleting a nodeTemplate that is in use by a nodePool.
The nodeTemplate should not be deleted while in use, after the nodePool is
removed, the nodes referencing the nodeTemplate will be deleted
and the nodeTemplate should delete
"""
client = admin_mc.client
node_template, cloud_credential = create_node_template(client)
node_pool = client.create_node_pool(
nodeTemplateId=node_template.id,
hostnamePrefix="test1",
clusterId="local")
# node_pool needs to come first or the API will stop the delete if the
# template still exists
remove_resource(node_pool)
remove_resource(node_template)
assert node_pool.nodeTemplateId == node_template.id
def _wait_for_no_remove_link():
nt = client.reload(node_template)
if not hasattr(nt.links, "remove"):
return True
return False
wait_for(_wait_for_no_remove_link)
# Attempting to delete the template should raise an ApiError
with pytest.raises(ApiError) as e:
client.delete(node_template)
assert e.value.error.status == 405
client.delete(node_pool)
def _node_pool_reload():
np = client.reload(node_pool)
return np is None
wait_for(_node_pool_reload)
def _wait_for_remove_link():
nt = client.reload(node_template)
if hasattr(nt.links, "remove"):
return True
return False
wait_for(_wait_for_remove_link)
# NodePool and Nodes are gone, template should delete
client.delete(node_template)
node_template = client.reload(node_template)
assert node_template is None
def test_cloud_credential_delete(admin_mc, remove_resource):
"""Test deleting a cloud credential that is referenced by nodeTemplate, which
is in use by nodePool
"""
client = admin_mc.client
node_template, cloud_credential = create_node_template(client)
node_pool = client.create_node_pool(
nodeTemplateId=node_template.id,
hostnamePrefix="test1",
clusterId="local")
assert node_pool.nodeTemplateId == node_template.id
wait_for_node_template(client, node_template.id)
# Attempting to delete the template should raise an ApiError
with pytest.raises(ApiError) as e:
client.delete(cloud_credential)
assert e.value.error.status == 405
def test_writing_config_to_disk(admin_mc, wait_remove_resource):
"""Test that userdata and other fields from node driver configs are being
written to disk as expected.
"""
client = admin_mc.client
tempdir = tempfile.gettempdir()
cloud_credential = client.create_cloud_credential(
digitaloceancredentialConfig={"accessToken": "test"})
wait_remove_resource(cloud_credential)
data = {'userdata': 'do cool stuff' + random_str() + '\n',
# This validates ssh keys don't drop the ending \n
'id_rsa': 'some\nfake\nstuff\n' + random_str() + '\n'
}
def _node_template():
try:
return client.create_node_template(
digitaloceanConfig={
'userdata': data['userdata'],
'sshKeyContents': data['id_rsa']
},
name=random_str(),
cloudCredentialId=cloud_credential.id)
except ApiError:
return False
node_template = wait_for(_node_template,
fail_handler=lambda:
'failed to create node template')
wait_remove_resource(node_template)
node_pool = client.create_node_pool(
nodeTemplateId=node_template.id,
hostnamePrefix="test1",
clusterId="local")
def node_available():
node = client.list_node(nodePoolId=node_pool.id)
if len(node.data):
return node.data[0]
return None
node = wait_for(node_available)
wait_for_condition("Saved", "False", client, node)
wait_remove_resource(node_pool)
for key, value in data.items():
dir_name = string_to_encoding(value)
full_path = os.path.join(tempdir, dir_name, key)
def file_exists():
try:
os.stat(full_path)
return True
except FileNotFoundError:
return False
wait_for(file_exists, timeout=120,
fail_handler=lambda: 'file is missing from disk')
with open(full_path, 'r') as f:
contents = f.read()
assert contents == value
def test_node_driver_schema(admin_mc):
"""Test node driver schemas have path fields removed."""
drivers = ['amazonec2config', 'digitaloceanconfig', 'azureconfig']
bad_fields = ['sshKeypath', 'sshKeyPath', 'existingKeyPath']
client = admin_mc.client
for driver in drivers:
schema = client.schema.types[driver]
for field in bad_fields:
assert field not in schema.resourceFields, \
'Driver {} has field {}'.format(driver, field)
def test_amazon_node_driver_schema(admin_mc):
"""Test amazon node driver schema supports AWS-specific resource fields"""
required_fields = ['encryptEbsVolume']
client = admin_mc.client
schema = client.schema.types['amazonec2config']
for field in required_fields:
assert field in schema.resourceFields, \
'amazonec2config missing support for field {}'.format(field)
def create_node_template(client, clientId="test"):
cloud_credential = client.create_cloud_credential(
azurecredentialConfig={"clientId": clientId,
"subscriptionId": "test",
"clientSecret": "test"})
wait_for_cloud_credential(client, cloud_credential.id)
node_template = client.create_node_template(
azureConfig={},
cloudCredentialId=cloud_credential.id)
assert node_template.cloudCredentialId == cloud_credential.id
return node_template, cloud_credential
def wait_for_cloud_credential(client, cloud_credential_id, timeout=60):
start = time.time()
interval = 0.5
creds = client.list_cloud_credential()
cred = None
for val in creds:
if val["id"] == cloud_credential_id:
cred = val
while cred is None:
if time.time() - start > timeout:
print(cred)
raise Exception('Timeout waiting for cloud credential')
time.sleep(interval)
interval *= 2
creds = client.list_cloud_credential()
for val in creds:
if val["id"] == cloud_credential_id:
cred = val
return cred
def wait_for_node_template(client, node_template_id, timeout=60):
start = time.time()
interval = 0.5
template = None
while template is None:
if time.time() - start > timeout:
raise Exception('Timeout waiting for node template lister')
time.sleep(interval)
interval *= 2
nodeTemplates = client.list_node_template()
for each_template in nodeTemplates:
if each_template["id"] == node_template_id:
template = each_template
def test_user_access_to_other_template(user_factory, remove_resource):
"""Asserts that a normal user's nodepool cannot reference another user's
nodetemplate"""
user1_client = user_factory().client
user2_client = user_factory().client
user2_node_template = user2_client.create_node_template(name="nt-" +
random_str(),
azureConfig={})
remove_resource(user2_node_template)
wait_for_node_template(user2_client, user2_node_template.id)
with pytest.raises(ApiError) as e:
user1_client.create_node_pool(
nodeTemplateId=user2_node_template.id,
hostnamePrefix="test1",
clusterId="local")
assert e.value.error.status == 404
assert e.value.error.message == \
"unable to find node template [%s]" % user2_node_template.id
@pytest.mark.skip(reason="flaky, todo in 27885")
def test_user_cluster_owner_access_to_pool(admin_mc,
user_factory,
remove_resource,
wait_remove_resource):
"""Test that a cluster created by the admin is accessible by another user
added as a cluster-owner, validate nodepool changing and switching
nodetemplate"""
# make an admin and user client
admin_client = admin_mc.client
k8sclient = CoreV1Api(admin_mc.k8s_client)
user = user_factory()
# make a cluster
cluster = admin_client.create_cluster(
name=random_str(),
rancherKubernetesEngineConfig={
"accessKey": "junk"
}
)
remove_resource(cluster)
# wait for the namespace created by the cluster
def _check_namespace(cluster):
for n in k8sclient.list_namespace().items:
if n.metadata.name == cluster.id:
return True
return False
wait_for(lambda: _check_namespace(cluster))
# add user as cluster-owner to the cluster
crtb = admin_client.create_cluster_role_template_binding(
userId=user.user.id,
roleTemplateId="cluster-owner",
clusterId=cluster.id,
)
remove_resource(crtb)
# admin creates a node template and assigns to a pool
admin_node_template, admin_cloud_credential = create_node_template(
admin_client, "admincloudcred-" + random_str())
admin_pool = admin_client.create_node_pool(
nodeTemplateId=admin_node_template.id,
hostnamePrefix="test",
clusterId=cluster.id)
wait_remove_resource(admin_pool)
remove_resource(admin_cloud_credential)
remove_resource(admin_node_template)
# create a template for the user to try and assign
user_node_template, user_cloud_credential = create_node_template(
user.client, "usercloudcred-" + random_str())
remove_resource(user_cloud_credential)
remove_resource(user_node_template)
# will pass, cluster owner user can change pool quantity
user.client.update(admin_pool, quantity=2)
# will pass, can set to a template owned by the user
user.client.update(admin_pool, nodeTemplateId=user_node_template.id)
# will fail, can not update nodepool template,
# if no access to the original template
with pytest.raises(ApiError) as e:
user.client.update(admin_pool, nodeTemplateId=admin_node_template.id)
assert e.value.error.status == 404
assert e.value.error.message == "unable to find node template [%s]" % \
admin_node_template.id
# delete this by hand and the rest will cleanup
admin_client.delete(admin_pool)
def test_admin_access_to_node_template(admin_mc, list_remove_resource):
"""Asserts that an admin user's nodepool can reference
nodetemplates they have created"""
admin_client = admin_mc.client
admin_node_template = admin_client.create_node_template(name="nt-" +
random_str(),
azureConfig={})
remove_list = [admin_node_template]
list_remove_resource(remove_list)
# Admin has access to create nodepool and nodepool create only happens
# after it passes validation.
node_pool = admin_client.create_node_pool(
nodeTemplateId=admin_node_template.id,
hostnamePrefix="test1",
clusterId="local")
remove_list.insert(0, node_pool)
def test_user_access_to_node_template(user_mc, remove_resource):
"""Asserts that a normal user's nodepool can reference
nodetemplates they have created"""
user_client = user_mc.client
user_node_template = user_client.create_node_template(name="nt-" +
random_str(),
azureConfig={})
remove_resource(user_node_template)
wait_for_node_template(user_client, user_node_template.id)
with pytest.raises(ApiError) as e:
user_client.create_node_pool(
nodeTemplateId=user_node_template.id,
hostnamePrefix="test1",
clusterId="local")
# User does not have access to create nodepools but has
# access to nodetemplate. Nodepool create happens after
# validation has passed.
assert e.value.error.status == 403
assert 'cannot create resource "nodepools"' in e.value.error.message
def test_admin_access_user_template(admin_mc, user_mc, list_remove_resource):
"""Asserts that an admin user's nodepool can reference another user's
nodetemplates"""
admin_client = admin_mc.client
user_client = user_mc.client
user_node_template = user_client.create_node_template(name="nt-" +
random_str(),
azureConfig={})
remove_list = [user_node_template]
list_remove_resource(remove_list)
# Admin has access to create nodepool and nodepool create only happens
# after it passes validation.
node_pool = admin_client.create_node_pool(
nodeTemplateId=user_node_template.id,
hostnamePrefix="test1",
clusterId="local")
remove_list.insert(0, node_pool)
def test_no_node_template(user_mc):
"""Asserts that a nodepool cannot create without a valid
nodetemplate"""
user_client = user_mc.client
invalid_template_id = "thisinsnotatemplateid"
with pytest.raises(ApiError) as e:
user_client.create_node_pool(
nodeTemplateId=invalid_template_id,
hostnamePrefix="test1",
clusterId="local")
assert e.value.error.status == 404
assert e.value.error.message == \
"unable to find node template [%s]" % invalid_template_id
| 15,616 | 33.550885 | 81 |
py
|
rancher
|
rancher-master/tests/integration/suite/test_logging.py
|
import pytest
from rancher import ApiError
from .common import random_str
def test_logging_test_action(admin_mc, admin_pc, user_mc, remove_resource):
"""Tests that a user with read-only access is not
able to perform a logging test.
"""
prtb = admin_mc.client.create_project_role_template_binding(
name="prtb-" + random_str(),
userId=user_mc.user.id,
projectId=admin_pc.project.id,
roleTemplateId="read-only")
remove_resource(prtb)
# use logEndpoint from admin client to get action not available to user
logEndpoint = admin_mc.client.list_clusterLogging()
with pytest.raises(ApiError) as e:
user_mc.client.action(
obj=logEndpoint,
action_name="test",
syslog={"config": {"endpoint": "0.0.0.0:8080"}}
)
assert e.value.error.status == 404
| 858 | 32.038462 | 75 |
py
|
rancher
|
rancher-master/tests/integration/suite/test_password_store.py
|
from kubernetes.client import CustomObjectsApi
from kubernetes.client import CoreV1Api
from kubernetes.client.rest import ApiException
from .common import random_str
import base64
group = 'management.cattle.io'
version = 'v3'
namespace = 'local'
plural = 'clusterloggings'
clusterId = "local"
globalNS = "cattle-global-data"
def test_cluster_logging_elasticsearch(admin_mc, remove_resource):
client = admin_mc.client
secretPassword = random_str()
indexPrefix = "prefix"
endpoint = "https://localhost:8443/"
name = random_str()
es = client.create_cluster_logging(
name=name,
clusterId=clusterId,
elasticsearchConfig={
'authPassword': secretPassword,
'endpoint': endpoint,
'indexPrefix': indexPrefix})
remove_resource(es)
# Test password not present in api
assert es is not None
assert es['elasticsearchConfig'].get('authPassword') is None
crdClient, k8sclient = getClients(admin_mc)
ns, name = es["id"].split(":")
# Test password is in k8s secret after creation
verifyPassword(crdClient, k8sclient, ns, name, secretPassword)
# Test noop, password field should be as it is
es = client.update(es, elasticsearchConfig=es['elasticsearchConfig'])
verifyPassword(crdClient, k8sclient, ns, name, secretPassword)
# Test updating password
newSecretPassword = random_str()
es = client.update(es, elasticsearchConfig={
'endpoint': endpoint,
'authPassword': newSecretPassword})
verifyPassword(crdClient, k8sclient, ns, name, newSecretPassword)
# Test secret doesn't exist after object deletion
checkSecret(crdClient, k8sclient, ns, name, es, client, deleteFunc)
def test_cluster_logging_fluentd(admin_mc, remove_resource):
client = admin_mc.client
fluentdservers = getFluentdServers()
name = random_str()
fs = client.create_cluster_logging(
name=name,
clusterId=clusterId,
fluentForwarderConfig={
'compress': "true",
'enableTls': "false",
'fluentServers': fluentdservers})
remove_resource(fs)
assert fs is not None
servers = fs['fluentForwarderConfig'].get('fluentServers')
assert len(servers) == 3
# Test password not present in api
for server in servers:
assert server.get('password') is None
crdClient, k8sclient = getClients(admin_mc)
ns, name = fs['id'].split(":")
# Test password is in k8s secret after creation
verifyPasswords(crdClient, k8sclient, ns, name, fluentdservers)
# Test noop, password field should be as it is
fs = client.update(fs, fluentForwarderConfig=fs['fluentForwarderConfig'])
verifyPasswords(crdClient, k8sclient, ns, name, fluentdservers)
# Test updating password of one of the entries, no password passed in rest
newSecretPassword = random_str()
fs['fluentForwarderConfig'].\
get('fluentServers')[2].password = newSecretPassword
fluentdservers[2]['password'] = newSecretPassword
fs = client.update(fs, fluentForwarderConfig=fs['fluentForwarderConfig'])
verifyPasswords(crdClient, k8sclient, ns, name, fluentdservers)
# Change array order (delete middle entry from array)
servers = fs['fluentForwarderConfig'].get('fluentServers')
del servers[1]
del fluentdservers[1]
config = {'fluentServers': servers}
fs = client.update(fs, fluentForwarderConfig=config)
verifyPasswords(crdClient, k8sclient, ns, name, fluentdservers)
# Test secrets doesn't exist after object deletion
checkSecrets(crdClient, k8sclient, ns, name, fs, client, deleteFunc)
def verifyPassword(crdClient, k8sclient, ns, name, secretPassword):
k8es = crdClient.get_namespaced_custom_object(
group, version, namespace, plural, name)
secretName = k8es['spec']['elasticsearchConfig']['authPassword']
ns, name = secretName.split(":")
assert ns is not None
assert name is not None
secret = k8sclient.read_namespaced_secret(name, ns)
assert base64.b64decode(secret.data[name]).\
decode("utf-8") == secretPassword
def verifyPasswords(crdClient, k8sclient, ns, name, fluentdServers):
k8fs = crdClient.get_namespaced_custom_object(
group, version, namespace, plural, name)
servers = k8fs['spec']['fluentForwarderConfig']['fluentServers']
for ind, server in enumerate(fluentdServers):
secretName = servers[ind]['password']
ns, name = secretName.split(":")
assert ns is not None
assert name is not None
secret = k8sclient.read_namespaced_secret(name, ns)
assert base64.b64decode(secret.data[name]).\
decode("utf-8") == server['password']
def checkSecret(crdClient, k8sclient, ns, name, es, client, func):
k8es = crdClient.get_namespaced_custom_object(
group, version, namespace, plural, name)
secretName = k8es['spec']['elasticsearchConfig']['authPassword']
ns, name = secretName.split(":")
func(client, es)
try:
k8sclient.read_namespaced_secret(name, ns)
except ApiException as e:
assert e.status == 404
def checkSecrets(crdClient, k8sclient, ns, name, fs, client, func):
k8fs = crdClient.get_namespaced_custom_object(
group, version, namespace, plural, name)
servers = k8fs['spec']['fluentForwarderConfig']['fluentServers']
secretNames = []
for ind, server in enumerate(servers):
secretName = server['password']
ns, name = secretName.split(":")
secretNames.append(name)
func(client, fs)
for secretName in secretNames:
try:
k8sclient.read_namespaced_secret(name, globalNS)
except ApiException as e:
assert e.status == 404
def getClients(admin_mc):
return CustomObjectsApi(admin_mc.k8s_client), \
CoreV1Api(admin_mc.k8s_client)
def test_cluster_logging_null(admin_mc, remove_resource):
client = admin_mc.client
secretPassword = random_str()
indexPrefix = "prefix"
endpoint = "https://localhost:8443/"
name = random_str()
crdClient, k8sclient = getClients(admin_mc)
es = client.create_cluster_logging(
name=name,
clusterId=clusterId,
elasticsearchConfig={
'authPassword': secretPassword,
'endpoint': endpoint,
'indexPrefix': indexPrefix})
remove_resource(es)
ns, name = es['id'].split(":")
checkSecret(crdClient, k8sclient, ns, name, es, client, upFuncElastic)
fluentdservers = getFluentdServers()
name = random_str()
fs = client.create_cluster_logging(
name=name,
clusterId=clusterId,
fluentForwarderConfig={
'compress': "true",
'enableTls': "false",
'fluentServers': fluentdservers})
remove_resource(fs)
ns, name = fs['id'].split(":")
checkSecrets(crdClient, k8sclient, ns, name, fs, client, upFuncFluentd)
def upFuncFluentd(client, fs):
try:
fs = client.update(fs, fluentForwarderConfig=None)
except ApiException as e:
assert e is None
def upFuncElastic(client, es):
try:
es = client.update(es, elasticsearchConfig=None)
except ApiException as e:
assert e is None
def deleteFunc(client, obj):
client.delete(obj)
def getFluentdServers():
return [{
"endpoint": "192.168.1.10:87",
"standby": False,
"username": random_str(),
"weight": 100,
"password": random_str()
},
{
"endpoint": "192.168.1.10:89",
"standby": False,
"username": random_str(),
"weight": 100,
"password": random_str()
},
{
"endpoint": "192.168.2.10:86",
"standby": False,
"username": random_str(),
"weight": 100,
"password": random_str()
}]
| 8,759 | 33.085603 | 78 |
py
|
rancher
|
rancher-master/tests/integration/suite/test_istio.py
|
import os
import pytest
import subprocess
from .common import random_str
from .conftest import cluster_and_client, ClusterContext
kube_fname = os.path.join(os.path.dirname(os.path.realpath(__file__)),
"k8s_kube_config")
istio_crd_url = "https://raw.githubusercontent.com/istio/istio/1.1.5" \
"/install/kubernetes/helm/istio-init/files/crd-10.yaml"
@pytest.mark.nonparallel
def test_virtual_service(admin_pc):
client = admin_pc.client
ns = admin_pc.cluster.client.create_namespace(
name=random_str(),
projectId=admin_pc.project.id)
name = random_str()
client.create_virtualService(
name=name,
namespaceId=ns.id,
hosts=["test"],
http=[{
"route": [
{
"destination": {
"host": "test",
"subset": "v1"
}
}
]
}],
)
virtualServices = client.list_virtualService(
namespaceId=ns.id
)
assert len(virtualServices) == 1
client.delete(virtualServices.data[0])
client.delete(ns)
@pytest.mark.nonparallel
def test_destination_rule(admin_pc):
client = admin_pc.client
ns = admin_pc.cluster.client.create_namespace(
name=random_str(),
projectId=admin_pc.project.id)
name = random_str()
client.create_destinationRule(
name=name,
namespaceId=ns.id,
host="test",
subsets=[{
"name": "v1",
"labels": {
"version": "v1",
}
}],
)
destinationRules = client.list_destinationRule(
namespaceId=ns.id
)
assert len(destinationRules) == 1
client.delete(destinationRules.data[0])
client.delete(ns)
# consistentHash has a "oneOf" only openAPI validation on it,
# and our types were passing multiple options which failed.
# This test ensures you can pass a single option.
# See: https://github.com/rancher/rancher/issues/25515
@pytest.mark.nonparallel
def test_destination_rule_on_cookie(admin_pc, remove_resource):
client = admin_pc.client
ns = admin_pc.cluster.client.create_namespace(
name=random_str(),
projectId=admin_pc.project.id)
remove_resource(ns)
name = random_str()
cookie_name = name + "_cookie"
dr = client.create_destinationRule(
name=name,
namespaceId=ns.id,
host="test",
subsets=[{
"name": "v1",
"labels": {
"version": "v1",
}
}],
trafficPolicy={
"loadBalancer": {
"consistentHash": {
"httpCookie": {
"ttl": "0s",
"name": cookie_name,
}
}
}
}
)
remove_resource(dr)
destinationRules = client.list_destinationRule(
namespaceId=ns.id
)
assert len(destinationRules) == 1
assert destinationRules.data[0].trafficPolicy.loadBalancer\
.consistentHash.httpCookie.name == cookie_name
@pytest.mark.nonparallel
def test_gateway(admin_pc):
client = admin_pc.client
ns = admin_pc.cluster.client.create_namespace(
name=random_str(),
projectId=admin_pc.project.id)
name = random_str()
client.create_gateway(
name=name,
namespaceId=ns.id,
servers=[{
"hosts": [
"*",
],
"port": {
"number": 443,
"name": "https",
"protocol": "HTTPS",
},
"tls": {
"mode": "SIMPLE",
"serverCertificate": "/etc/certs/server.pem",
"privateKey": "/etc/certs/privatekey.pem",
}
}],
)
gateways = client.list_gateway(
namespaceId=ns.id
)
assert len(gateways) == 1
client.delete(gateways.data[0])
client.delete(ns)
@pytest.fixture(scope='module', autouse="True")
def install_crd(admin_mc):
cluster, client = cluster_and_client('local', admin_mc.client)
cc = ClusterContext(admin_mc, cluster, client)
create_kubeconfig(cc.cluster)
try:
return subprocess.check_output(
'kubectl apply ' +
' --kubeconfig ' + kube_fname +
' -f ' + istio_crd_url,
stderr=subprocess.STDOUT, shell=True,
)
except subprocess.CalledProcessError as err:
print('kubectl error: ' + str(err.output))
raise err
def teardown_module(module):
try:
return subprocess.check_output(
'kubectl delete ' +
' --kubeconfig ' + kube_fname +
' -f ' + istio_crd_url,
stderr=subprocess.STDOUT, shell=True,
)
except subprocess.CalledProcessError as err:
print('kubectl error: ' + str(err.output))
raise err
def create_kubeconfig(cluster):
generateKubeConfigOutput = cluster.generateKubeconfig()
print(generateKubeConfigOutput.config)
file = open(kube_fname, "w")
file.write(generateKubeConfigOutput.config)
file.close()
| 5,190 | 27.521978 | 71 |
py
|
rancher
|
rancher-master/tests/integration/suite/test_namespaced_secrets.py
|
from .common import random_str
from .test_secrets import CERT, KEY
UPDATED_CERT = """-----BEGIN CERTIFICATE-----
MIIDEDCCAfgCCQC+HwE8rpMN7jANBgkqhkiG9w0BAQUFADBKMQswCQYDVQQGEwJV
UzEQMA4GA1UECBMHQXJpem9uYTEVMBMGA1UEChMMUmFuY2hlciBMYWJzMRIwEAYD
VQQDEwlsb2NhbGhvc3QwHhcNMTYwNjMwMDExMzMyWhcNMjYwNjI4MDExMzMyWjBK
MQswCQYDVQQGEwJVUzEQMA4GA1UECBMHQXJpem9uYTEVMBMGA1UEChMMUmFuY2hl
ciBMYWJzMRIwEAYDVQQDEwlsb2NhbGhvc3QwggEiMA0GCSqGSIb3DQEBAQUAA4IB
DwAwggEKAoIBAQC1PR0EiJjM0wbFQmU/yKSb7AuQdzhdW02ya+RQe+31/B+sOTMr
z9b473KCKf8LiFKFOIQUhR5fPvwyrrIWKCEV9pCp/wM474fX32j0zYaH6ezZjL0r
L6hTeGFScGse3dk7ej2+6nNWexpujos0djFi9Gu11iVHIJyT2Sx66kPPPZVRkJO9
5Pfetm5SLIQtJHUwy5iWv5Br+AbdXlUAjTYUqS4mhKIIbblAPbOKrYRxGXX/6oDV
J5OGLle8Uvlb8poxqmy67FPyMObNHhjggKwboXhmNuuT2OGf/VeZANMYubs4JP2V
ZLs3U/1tFMAOaQM+PbT9JuwMSmGYFX0Qiuh/AgMBAAEwDQYJKoZIhvcNAQEFBQAD
ggEBACpkRCQpCn/zmTOwboBckkOFeqMVo9cvSu0Sez6EPED4WUv/6q5tlJeHekQm
6YVcsXeOMkpfZ7qtGmBDwR+ly7D43dCiPKplm0uApO1CkogG5ePv0agvKHEybd36
xu9pt0fnxDdrP2NrP6trHq1D+CzPZooLRfmYqbt1xmIb00GpnyiJIUNuMu7GUM3q
NxWGK3eq+1cyt6xr8nLOC5zaGeSyZikw4+9vqLudNSyYdnw9mdHtrYT0GlcEP1Vc
NK+yrhDCvEWH6+4+pp8Ve2P2Le5tvbA1m24AxyuC9wHS5bUmiNHweLXNpxLFTjK8
BBUi6y1Vm9jrDi/LiiHcN4sJEoP=
-----END CERTIFICATE-----"""
def test_namespaced_secrets(admin_pc, admin_cc_client):
client = admin_pc.client
ns = admin_cc_client.create_namespace(name=random_str(),
projectId=admin_pc.project.id)
name = random_str()
secret = client.create_namespaced_secret(name=name, namespaceId=ns.id,
stringData={
'foo': 'bar'
})
assert secret.baseType == 'namespacedSecret'
assert secret.type == 'namespacedSecret'
assert secret.kind == 'Opaque'
assert secret.name == name
assert secret.data.foo == 'YmFy'
secret.data.baz = 'YmFy'
secret = client.update(secret, data=secret.data)
assert secret is not None
secret = client.reload(secret)
assert secret.baseType == 'namespacedSecret'
assert secret.type == 'namespacedSecret'
assert secret.kind == 'Opaque'
assert secret.name == name
assert secret.data.foo == 'YmFy'
assert secret.data.baz == 'YmFy'
assert secret.namespaceId == ns.id
assert 'namespace' not in secret.data
assert secret.projectId == admin_pc.project.id
found = False
for i in client.list_namespaced_secret():
if i.id == secret.id:
found = True
break
assert found
client.delete(secret)
def test_namespaced_certificates(admin_pc, admin_cc_client):
client = admin_pc.client
ns = admin_cc_client.create_namespace(name=random_str(),
projectId=admin_pc.project.id)
name = random_str()
cert = client.create_namespaced_certificate(name=name, key=KEY,
namespaceId=ns.id,
certs=CERT)
assert cert.baseType == 'namespacedSecret'
assert cert.type == 'namespacedCertificate'
assert cert.name == name
assert cert.certs == CERT
assert cert.namespaceId == ns.id
assert cert.projectId == admin_pc.project.id
assert 'namespace' not in cert
cert = client.update(cert, certs=UPDATED_CERT)
assert cert.namespaceId == ns.id
assert cert.projectId == admin_pc.project.id
cert = client.reload(cert)
assert cert.baseType == 'namespacedSecret'
assert cert.type == 'namespacedCertificate'
assert cert.name == name
assert cert.certs == UPDATED_CERT
assert cert.namespaceId == ns.id
assert cert.projectId == admin_pc.project.id
found = False
for i in client.list_namespaced_certificate():
if i.id == cert.id:
found = True
break
assert found
cert = client.by_id_namespaced_certificate(cert.id)
assert cert is not None
client.delete(cert)
def test_namespaced_docker_credential(admin_pc, admin_cc_client):
client = admin_pc.client
ns = admin_cc_client.create_namespace(name=random_str(),
projectId=admin_pc.project.id)
name = random_str()
registries = {'index.docker.io': {
'username': 'foo',
'password': 'bar',
}}
cert = client.create_namespaced_docker_credential(name=name,
namespaceId=ns.id,
registries=registries)
assert cert.baseType == 'namespacedSecret'
assert cert.type == 'namespacedDockerCredential'
assert cert.name == name
assert cert.registries['index.docker.io'].username == 'foo'
assert 'password' in cert.registries['index.docker.io']
assert cert.namespaceId == ns.id
assert cert.projectId == admin_pc.project.id
registries['two'] = {
'username': 'blah'
}
cert = client.update(cert, registries=registries)
cert = client.reload(cert)
assert cert.baseType == 'namespacedSecret'
assert cert.type == 'namespacedDockerCredential'
assert cert.name == name
assert cert.registries['index.docker.io'].username == 'foo'
assert cert.registries.two.username == 'blah'
assert 'password' not in cert.registries['index.docker.io']
assert cert.namespaceId == ns.id
assert 'namespace' not in cert
assert cert.projectId == admin_pc.project.id
found = False
for i in client.list_namespaced_docker_credential():
if i.id == cert.id:
found = True
break
assert found
cert = client.by_id_namespaced_docker_credential(cert.id)
assert cert is not None
client.delete(cert)
def test_namespaced_basic_auth(admin_pc, admin_cc_client):
client = admin_pc.client
ns = admin_cc_client.create_namespace(name=random_str(),
projectId=admin_pc.project.id)
name = random_str()
cert = client.create_namespaced_basic_auth(name=name,
namespaceId=ns.id,
username='foo',
password='bar')
assert cert.baseType == 'namespacedSecret'
assert cert.type == 'namespacedBasicAuth'
assert cert.name == name
assert cert.username == 'foo'
assert 'password' in cert
assert cert.namespaceId == ns.id
assert 'namespace' not in cert
assert cert.projectId == admin_pc.project.id
cert = client.update(cert, username='foo2')
cert = client.reload(cert)
assert cert.baseType == 'namespacedSecret'
assert cert.type == 'namespacedBasicAuth'
assert cert.name == name
assert cert.username == 'foo2'
assert 'password' not in cert
assert cert.namespaceId == ns.id
assert 'namespace' not in cert
assert cert.projectId == admin_pc.project.id
found = False
for i in client.list_namespaced_basic_auth():
if i.id == cert.id:
found = True
break
assert found
cert = client.by_id_namespaced_basic_auth(cert.id)
assert cert is not None
client.delete(cert)
def test_namespaced_ssh_auth(admin_pc, admin_cc_client):
client = admin_pc.client
ns = admin_cc_client.create_namespace(name=random_str(),
projectId=admin_pc.project.id)
name = random_str()
cert = client.create_namespaced_ssh_auth(name=name,
namespaceId=ns.id,
privateKey='foo')
assert cert.baseType == 'namespacedSecret'
assert cert.type == 'namespacedSshAuth'
assert cert.name == name
assert 'privateKey' in cert
assert cert.namespaceId == ns.id
assert 'namespace' not in cert
assert cert.projectId == admin_pc.project.id
cert = client.update(cert, privateKey='foo2')
cert = client.reload(cert)
assert cert.baseType == 'namespacedSecret'
assert cert.type == 'namespacedSshAuth'
assert cert.name == name
assert 'privateKey' not in cert
assert cert.namespaceId == ns.id
assert 'namespace' not in cert
assert cert.projectId == admin_pc.project.id
found = False
for i in client.list_namespaced_ssh_auth():
if i.id == cert.id:
found = True
break
assert found
cert = client.by_id_namespaced_ssh_auth(cert.id)
assert cert is not None
client.delete(cert)
| 8,568 | 32.869565 | 76 |
py
|
rancher
|
rancher-master/tests/integration/suite/test_system_project.py
|
import pytest
from rancher import ApiError
from kubernetes.client import CoreV1Api
from .conftest import wait_for
systemProjectLabel = "authz.management.cattle.io/system-project"
defaultProjectLabel = "authz.management.cattle.io/default-project"
initial_system_namespaces = set(["kube-node-lease",
"kube-system",
"cattle-system",
"kube-public",
"cattle-global-data",
"cattle-global-nt",
"cattle-fleet-system"])
loggingNamespace = "cattle-logging"
def test_system_project_created(admin_cc):
projects = admin_cc.management.client.list_project(
clusterId=admin_cc.cluster.id)
initial_projects = {}
initial_projects["Default"] = defaultProjectLabel
initial_projects["System"] = systemProjectLabel
required_projects = []
for project in projects:
name = project['name']
if name in initial_projects:
projectLabel = initial_projects[name]
assert project['labels'].\
data_dict()[projectLabel] == 'true'
required_projects.append(name)
assert len(required_projects) == len(initial_projects)
def test_system_namespaces_assigned(admin_cc):
projects = admin_cc.management.client.list_project(
clusterId=admin_cc.cluster.id)
systemProject = None
for project in projects:
if project['name'] == "System":
systemProject = project
break
assert systemProject is not None
system_namespaces = admin_cc.client.list_namespace(
projectId=systemProject.id)
system_namespaces_names = set(
[ns['name'] for ns in system_namespaces])
# If clusterLogging tests run before this, cattle-logging
# will be present in current system_namespaces, removing it
if loggingNamespace in system_namespaces_names:
system_namespaces_names.remove(loggingNamespace)
assert initial_system_namespaces.issubset(system_namespaces_names)
def test_system_project_cant_be_deleted(admin_mc, admin_cc):
"""The system project is not allowed to be deleted, test to ensure that is
true
"""
projects = admin_cc.management.client.list_project(
clusterId=admin_cc.cluster.id)
system_project = None
for project in projects:
if project['name'] == "System":
system_project = project
break
assert system_project is not None
# Attempting to delete the template should raise an ApiError
with pytest.raises(ApiError) as e:
admin_mc.client.delete(system_project)
assert e.value.error.status == 405
assert e.value.error.message == 'System Project cannot be deleted'
def test_system_namespaces_default_svc_account(admin_mc):
system_namespaces_setting = admin_mc.client.by_id_setting(
"system-namespaces")
system_namespaces = system_namespaces_setting["value"].split(",")
k8sclient = CoreV1Api(admin_mc.k8s_client)
def_saccnts = k8sclient.list_service_account_for_all_namespaces(
field_selector='metadata.name=default')
for sa in def_saccnts.items:
ns = sa.metadata.namespace
def _check_system_sa_flag():
if ns in system_namespaces and ns != "kube-system":
if sa.automount_service_account_token is False:
return True
else:
return False
else:
return True
def _sa_update_fail():
name = sa.metadata.name
flag = sa.automount_service_account_token
return 'Service account {} in namespace {} does not have correct \
automount_service_account_token flag: {}'.format(name, ns, flag)
wait_for(_check_system_sa_flag, fail_handler=_sa_update_fail)
| 3,932 | 36.103774 | 78 |
py
|
rancher
|
rancher-master/tests/integration/suite/test_clustertemplate.py
|
from .common import random_str, check_subject_in_rb
from rancher import ApiError
from .conftest import wait_until, wait_for, DEFAULT_TIMEOUT
import pytest
import time
import kubernetes
rb_resource = 'rolebinding'
def test_create_cluster_template_with_revision(admin_mc, remove_resource):
cluster_template = create_cluster_template(admin_mc, [], admin_mc)
remove_resource(cluster_template)
templateId = cluster_template.id
_ = \
create_cluster_template_revision(admin_mc.client, templateId)
_ = \
create_cluster_template_revision(admin_mc.client, templateId)
client = admin_mc.client
template_reloaded = client.by_id_cluster_template(cluster_template.id)
assert template_reloaded.links.revisions is not None
def test_create_template_revision_k8s_translation(admin_mc, remove_resource):
cluster_template = create_cluster_template(admin_mc,
[], admin_mc)
remove_resource(cluster_template)
tId = cluster_template.id
client = admin_mc.client
cconfig = {
"rancherKubernetesEngineConfig": {
"kubernetesVersion": "1.15"
}
}
with pytest.raises(ApiError) as e:
client.create_cluster_template_revision(clusterConfig=cconfig,
clusterTemplateId=tId,
enabled="true")
assert e.value.error.status == 422
# template k8s question needed if using generic version
cconfig = {
"rancherKubernetesEngineConfig": {
"kubernetesVersion": "1.15.x"
}
}
questions = [{
"variable": "dockerRootDir",
"required": "false",
"type": "string",
"default": "/var/lib/docker"
}]
with pytest.raises(ApiError) as e:
client.create_cluster_template_revision(name=random_str(),
clusterConfig=cconfig,
clusterTemplateId=tId,
questions=questions,
enabled="true")
assert e.value.error.status == 422
def test_default_pod_sec(admin_mc, list_remove_resource):
cluster_template = create_cluster_template(admin_mc,
[], admin_mc)
remove_list = [cluster_template]
list_remove_resource(remove_list)
tId = cluster_template.id
client = admin_mc.client
cconfig = {
"rancherKubernetesEngineConfig": {
"services": {
"type": "rkeConfigServices",
"kubeApi": {
"alwaysPullImages": "false",
"podSecurityPolicy": "false",
"serviceNodePortRange": "30000-32767",
"type": "kubeAPIService"
}
}
},
"defaultPodSecurityPolicyTemplateId": "restricted",
}
rev = client.create_cluster_template_revision(name=random_str(),
clusterConfig=cconfig,
clusterTemplateId=tId,
enabled="true")
time.sleep(2)
cluster = wait_for_cluster_create(client, name=random_str(),
clusterTemplateRevisionId=rev.id)
remove_list.insert(0, cluster)
assert cluster.conditions[0].type == 'Pending'
assert cluster.conditions[0].status == 'True'
assert cluster.defaultPodSecurityPolicyTemplateId == "restricted"
client.delete(cluster)
wait_for_cluster_to_be_deleted(client, cluster.id)
def test_check_default_revision(admin_mc, remove_resource):
cluster_template = create_cluster_template(admin_mc,
[], admin_mc)
remove_resource(cluster_template)
templateId = cluster_template.id
first_revision = \
create_cluster_template_revision(admin_mc.client, templateId)
client = admin_mc.client
wait_for_default_revision(client, templateId, first_revision.id)
# delete the cluster template revision, it should error out
with pytest.raises(ApiError) as e:
client.delete(first_revision)
assert e.value.error.status == 403
def test_create_cluster_with_template(admin_mc, list_remove_resource):
cluster_template = create_cluster_template(admin_mc,
[], admin_mc)
remove_list = [cluster_template]
list_remove_resource(remove_list)
templateId = cluster_template.id
template_revision = \
create_cluster_template_revision(admin_mc.client, templateId)
# create a cluster with this template
answers = {
"values": {
"dockerRootDir": "/var/lib/docker123",
"rancherKubernetesEngineConfig.ignoreDockerVersion":
"false"
}
}
revId = template_revision.id
client = admin_mc.client
cluster = wait_for_cluster_create(client, name=random_str(),
clusterTemplateRevisionId=revId,
description="template from cluster",
answers=answers)
remove_list.insert(0, cluster)
assert cluster.conditions[0].type == 'Pending'
assert cluster.conditions[0].status == 'True'
assert cluster.questions is not None
k8s_version = cluster.rancherKubernetesEngineConfig.kubernetesVersion
assert k8s_version != "v1.15.x"
# edit cluster should not fail
client.update(cluster, name=random_str(), clusterTemplateRevisionId=revId)
# edit cluster to remove template must fail
with pytest.raises(ApiError) as e:
client.update(cluster, name=random_str(), clusterTemplateId=None,
clusterTemplateRevisionId=None)
assert e.value.error.status == 422
# delete the cluster template, it should error out
with pytest.raises(ApiError) as e:
client.delete(cluster_template)
assert e.value.error.status == 422
client.delete(cluster)
wait_for_cluster_to_be_deleted(client, cluster.id)
def test_create_cluster_validations(admin_mc, remove_resource):
cluster_template = create_cluster_template(admin_mc,
[], admin_mc)
remove_resource(cluster_template)
templateId = cluster_template.id
template_revision = \
create_cluster_template_revision(admin_mc.client, templateId)
# create a cluster with this template
revId = template_revision.id
client = admin_mc.client
rConfig = getRKEConfig()
try:
wait_for_cluster_create(client, name=random_str(),
clusterTemplateRevisionId=revId,
description="template from cluster",
rancherKubernetesEngineConfig=rConfig)
except ApiError as e:
assert e.error.status == 500
@pytest.mark.nonparallel
def test_create_cluster_template_with_members(admin_mc, remove_resource,
user_factory):
client = admin_mc.client
user_member = user_factory()
remove_resource(user_member)
user_not_member = user_factory()
remove_resource(user_not_member)
members = [{"userPrincipalId": "local://" + user_member.user.id,
"accessType": "read-only"}]
cluster_template = create_cluster_template(admin_mc, members, admin_mc)
remove_resource(cluster_template)
time.sleep(30)
# check who has access to the cluster template
# admin and user_member should be able to list it
id = cluster_template.id
ct = client.by_id_cluster_template(id)
assert ct is not None
rbac = kubernetes.client.RbacAuthorizationV1Api(admin_mc.k8s_client)
split = cluster_template.id.split(":")
name = split[1]
rb_name = name + "-ct-r"
wait_for(lambda: check_subject_in_rb(rbac, 'cattle-global-data',
user_member.user.id, rb_name),
timeout=60,
fail_handler=lambda: "failed to check rolebinding")
um_client = user_member.client
ct = um_client.by_id_cluster_template(id)
assert ct is not None
# user not added as member shouldn't be able to access
unm_client = user_not_member.client
try:
unm_client.by_id_cluster_template(id)
except ApiError as e:
assert e.error.status == 403
# add * as member to share with all
new_members = [{"userPrincipalId": "local://" + user_member.user.id,
"accessType": "read-only"}, {"groupPrincipalId": "*",
"accessType": "read-only"}]
client.update(ct, members=new_members)
split = cluster_template.id.split(":")
name = split[1]
rb_name = name + "-ct-r"
wait_for(lambda: check_subject_in_rb(rbac, 'cattle-global-data',
'system:authenticated', rb_name),
timeout=60,
fail_handler=fail_handler(rb_resource))
time.sleep(30)
ct = user_not_member.client.by_id_cluster_template(id)
assert ct is not None
def test_creation_standard_user(admin_mc, remove_resource, user_factory):
user_member = user_factory()
remove_resource(user_member)
um_client = user_member.client
with pytest.raises(ApiError) as e:
um_client.create_cluster_template(name="user template",
description="user template")
assert e.value.error.status == 403
@pytest.mark.nonparallel
def test_check_enforcement(admin_mc, remove_resource,
list_remove_resource, user_factory):
cluster_template = create_cluster_template(admin_mc, [], admin_mc)
remove_list = [cluster_template]
list_remove_resource(remove_list)
templateId = cluster_template.id
rev = \
create_cluster_template_revision(admin_mc.client, templateId)
client = admin_mc.client
# turn on the enforcement
client.update_by_id_setting(id='cluster-template-enforcement',
value="true")
# a globaladmin can create a rke cluster without a template
cluster = client.create_cluster(
name=random_str(), rancherKubernetesEngineConfig={
"accessKey": "asdfsd"})
remove_list.insert(0, cluster)
# a user cannot create an rke cluster without template
user = user_factory()
remove_resource(user)
crtb_owner = client.create_cluster_role_template_binding(
clusterId="local",
roleTemplateId="cluster-owner",
userId=user.user.id)
remove_resource(crtb_owner)
wait_until(rtb_cb(client, crtb_owner))
user_client = user.client
with pytest.raises(ApiError) as e:
user_client.create_cluster(name=random_str(),
rancherKubernetesEngineConfig={
"accessKey": "asdfsd"})
assert e.value.error.status == 422
# a user can create a non-rke cluster without template
cluster2 = user_client.create_cluster(
name=random_str(), amazonElasticContainerServiceConfig={
"accessKey": "asdfsd"})
remove_list.insert(0, cluster2)
# a user can create an rke cluster with a public template
template_reloaded = client.by_id_cluster_template(templateId)
new_members = [{"groupPrincipalId": "*", "accessType": "read-only"}]
client.update(template_reloaded, members=new_members)
cluster3 = wait_for_cluster_create(user_client, name=random_str(),
clusterTemplateRevisionId=rev.id,
description="template from cluster")
remove_list.insert(0, cluster3)
client.update_by_id_setting(id='cluster-template-enforcement',
value="false")
def test_revision_creation_permission(admin_mc, remove_resource,
user_factory):
user_readonly = user_factory()
user_owner = user_factory()
members = [{"userPrincipalId": "local://" + user_readonly.user.id,
"accessType": "read-only"},
{"userPrincipalId": "local://" + user_owner.user.id,
"accessType": "owner"}]
cluster_template = create_cluster_template(admin_mc, members, admin_mc)
remove_resource(cluster_template)
rbac = kubernetes.client.RbacAuthorizationV1Api(admin_mc.k8s_client)
split = cluster_template.id.split(":")
name = split[1]
rb_name = name + "-ct-r"
wait_for(lambda: check_subject_in_rb(rbac, 'cattle-global-data',
user_readonly.user.id, rb_name),
timeout=60,
fail_handler=fail_handler(rb_resource))
rb_name = name + "-ct-a"
wait_for(lambda: check_subject_in_rb(rbac, 'cattle-global-data',
user_owner.user.id, rb_name),
timeout=60,
fail_handler=fail_handler(rb_resource))
templateId = cluster_template.id
# user with accessType=owner should be able to create revision
# since a standard user can add revisions to template shared
# with owner access
create_cluster_template_revision(user_owner.client, templateId)
# user with read-only accessType should get Forbidden error
with pytest.raises(ApiError) as e:
create_cluster_template_revision(user_readonly.client, templateId)
assert e.value.error.status == 403
def test_updated_members_revision_access(admin_mc, remove_resource,
user_factory):
# create cluster template without members and a revision
cluster_template = create_cluster_template(admin_mc, [], admin_mc)
remove_resource(cluster_template)
templateId = cluster_template.id
rev = \
create_cluster_template_revision(admin_mc.client, templateId)
# update template to add a user as member
user_member = user_factory()
members = [{"userPrincipalId": "local://" + user_member.user.id,
"accessType": "read-only"}]
admin_mc.client.update(cluster_template, members=members)
# this member should get access to existing revision "rev"
rbac = kubernetes.client.RbacAuthorizationV1Api(admin_mc.k8s_client)
split = rev.id.split(":")
name = split[1]
rb_name = name + "-ctr-r"
wait_for(lambda: check_subject_in_rb(rbac, 'cattle-global-data',
user_member.user.id, rb_name),
timeout=120,
fail_handler=fail_handler(rb_resource))
revision = user_member.client.by_id_cluster_template_revision(rev.id)
assert revision is not None
# remove this user from cluster_template members list
admin_mc.client.update(cluster_template, members=[])
# now this user should not be able to see that revision
try:
user_member.client.by_id_cluster_template_revision(rev.id)
except ApiError as e:
assert e.error.status == 403
def test_permissions_removed_on_downgrading_access(admin_mc, remove_resource,
user_factory):
user_owner = user_factory()
remove_resource(user_owner)
members = [{"userPrincipalId": "local://" + user_owner.user.id,
"accessType": "owner"}]
# create cluster template with one member having "member" accessType
cluster_template = create_cluster_template(admin_mc, members, admin_mc)
remove_resource(cluster_template)
rbac = kubernetes.client.RbacAuthorizationV1Api(admin_mc.k8s_client)
split = cluster_template.id.split(":")
name = split[1]
rb_name = name + "-ct-a"
wait_for(lambda: check_subject_in_rb(rbac, 'cattle-global-data',
user_owner.user.id, rb_name),
timeout=60,
fail_handler=fail_handler(rb_resource))
# user with accessType=owner should be able to update template
# so adding new member by the user_member should be allowed
new_member = user_factory()
remove_resource(new_member)
members = [{"userPrincipalId": "local://" + user_owner.user.id,
"accessType": "owner"},
{"userPrincipalId": "local://" + new_member.user.id,
"accessType": "read-only"}]
user_owner.client.update(cluster_template, members=members)
# now change user_owner's accessType to read-only
members = [{"userPrincipalId": "local://" + user_owner.user.id,
"accessType": "read-only"},
{"userPrincipalId": "local://" + new_member.user.id,
"accessType": "read-only"}]
admin_mc.client.update(cluster_template, members=members)
rb_name = name + "-ct-r"
wait_for(lambda: check_subject_in_rb(rbac, 'cattle-global-data',
user_owner.user.id, rb_name),
timeout=60,
fail_handler=fail_handler(rb_resource))
# user_owner should not be allowed to update cluster template now
# test updating members field by removing new_member
members = [{"userPrincipalId": "local://" + user_owner.user.id,
"accessType": "read-only"}]
try:
user_owner.client.update(cluster_template, members=members)
except ApiError as e:
assert e.error.status == 403
def test_required_template_question(admin_mc, remove_resource):
cluster_template = create_cluster_template(admin_mc, [], admin_mc)
remove_resource(cluster_template)
tId = cluster_template.id
client = admin_mc.client
cconfig = {
"rancherKubernetesEngineConfig": {
"services": {
"type": "rkeConfigServices",
"kubeApi": {
"alwaysPullImages": "false",
"podSecurityPolicy": "false",
"serviceNodePortRange": "30000-32767",
"type": "kubeAPIService"
}
}
},
"defaultPodSecurityPolicyTemplateId": "restricted",
}
questions = [{
"variable": "dockerRootDir",
"required": "true",
"type": "string",
"default": ""
},
{
"variable":
"rancherKubernetesEngineConfig.ignoreDockerVersion",
"required": "false",
"type": "boolean",
"default": "true"
}]
rev = client.create_cluster_template_revision(name=random_str(),
clusterConfig=cconfig,
clusterTemplateId=tId,
questions=questions,
enabled="true")
# creating a cluster with this template with no answer should fail
answers = {
"values": {
"rancherKubernetesEngineConfig.ignoreDockerVersion":
"false"
}
}
try:
wait_for_cluster_create(client, name=random_str(),
clusterTemplateRevisionId=rev.id,
description="template from cluster",
answers=answers)
except ApiError as e:
assert e.error.status == 422
def test_secret_template_answers(admin_mc, remove_resource,
list_remove_resource):
cluster_template = create_cluster_template(admin_mc, [], admin_mc)
remove_list = [cluster_template]
list_remove_resource(remove_list)
tId = cluster_template.id
client = admin_mc.client
cconfig = {
"rancherKubernetesEngineConfig": {
"services": {
"type": "rkeConfigServices",
"kubeApi": {
"alwaysPullImages": "false",
"podSecurityPolicy": "false",
"serviceNodePortRange": "30000-32767",
"type": "kubeAPIService"
}
}
},
"defaultPodSecurityPolicyTemplateId": "restricted",
}
azureClientId = "rancherKubernetesEngineConfig.cloudProvider.\
azureCloudProvider.aadClientId"
azureClientSecret = "rancherKubernetesEngineConfig.cloudProvider.\
azureCloudProvider.aadClientSecret"
questions = [{
"variable": "dockerRootDir",
"required": "true",
"type": "string",
"default": ""
},
{
"variable": azureClientId,
"required": "true",
"type": "string",
"default": "abcdClientId"
},
{
"variable": azureClientSecret,
"required": "true",
"type": "string",
"default": ""
}]
rev = client.create_cluster_template_revision(name=random_str(),
clusterConfig=cconfig,
clusterTemplateId=tId,
questions=questions,
enabled="true")
# creating a cluster with this template
answers = {
"values": {
"dockerRootDir": "/var/lib/docker123",
azureClientId: "abcdClientId",
azureClientSecret: "abcdClientSecret"
}
}
cluster = wait_for_cluster_create(client, name=random_str(),
clusterTemplateRevisionId=rev.id,
description="template from cluster",
answers=answers)
remove_list.insert(0, cluster)
assert cluster.conditions[0].type == 'Pending'
assert cluster.conditions[0].status == 'True'
assert cluster.answers.values[azureClientId] is not None
assert azureClientSecret not in cluster.answers.values
client.delete(cluster)
wait_for_cluster_to_be_deleted(client, cluster.id)
def test_member_accesstype_check(admin_mc, user_factory, remove_resource):
client = admin_mc.client
user_readonly = user_factory()
user_owner = user_factory()
members = [{"userPrincipalId": "local://" + user_readonly.user.id,
"accessType": "read-only"},
{"userPrincipalId": "local://" + user_owner.user.id,
"accessType": "member"}]
# creation with a member with accessType "member" shouldn't be allowed
try:
cluster_template = create_cluster_template(admin_mc, members, admin_mc)
remove_resource(cluster_template)
except ApiError as e:
assert e.error.status == 422
members = [{"userPrincipalId": "local://" + user_readonly.user.id,
"accessType": "read-only"},
{"userPrincipalId": "local://" + user_owner.user.id,
"accessType": "owner"}]
cluster_template = create_cluster_template(admin_mc, members, admin_mc)
remove_resource(cluster_template)
updated_members = \
[{"userPrincipalId": "local://" + user_readonly.user.id,
"accessType": "read-only"},
{"userPrincipalId": "local://" + user_owner.user.id,
"accessType": "member"}]
# updating a cluster template to add user with access type "member"
# shouldn't be allowed
try:
client.update(cluster_template, members=updated_members)
except ApiError as e:
assert e.error.status == 422
def test_create_cluster_with_invalid_revision(admin_mc, remove_resource):
cluster_template = create_cluster_template(admin_mc, [], admin_mc)
remove_resource(cluster_template)
tId = cluster_template.id
client = admin_mc.client
# templaterevision with question with invalid format
cconfig = {
"rancherKubernetesEngineConfig": {
"services": {
"type": "rkeConfigServices",
"kubeApi": {
"alwaysPullImages": "false",
"podSecurityPolicy": "false",
"serviceNodePortRange": "30000-32767",
"type": "kubeAPIService"
}
}
},
"defaultPodSecurityPolicyTemplateId": "restricted",
}
questions = [{
"variable": "dockerRootDir",
"required": "true",
"type": "string",
"default": ""
},
{
"default": "map[enabled:true type:localClusterAuthEndpoint]",
"required": "false",
"type": "string",
"variable": "localClusterAuthEndpoint"
}]
rev = client.create_cluster_template_revision(name=random_str(),
clusterConfig=cconfig,
clusterTemplateId=tId,
questions=questions,
enabled="true")
# creating a cluster with this template
try:
wait_for_cluster_create(client, name=random_str(),
clusterTemplateRevisionId=rev.id,
description="template from cluster")
except ApiError as e:
assert e.error.status == 422
def test_disable_template_revision(admin_mc, list_remove_resource):
cluster_template = create_cluster_template(admin_mc, [], admin_mc)
remove_list = [cluster_template]
list_remove_resource(remove_list)
tId = cluster_template.id
client = admin_mc.client
rev = \
create_cluster_template_revision(admin_mc.client, tId)
# creating a cluster with this template
cluster = wait_for_cluster_create(client, name=random_str(),
clusterTemplateRevisionId=rev.id,
description="template from cluster")
remove_list.insert(0, cluster)
assert cluster.conditions[0].type == 'Pending'
assert cluster.conditions[0].status == 'True'
# disable the revision
client.action(obj=rev, action_name="disable")
try:
wait_for_cluster_create(client, name=random_str(),
clusterTemplateRevisionId=rev.id)
except ApiError as e:
assert e.error.status == 500
client.delete(cluster)
wait_for_cluster_to_be_deleted(client, cluster.id)
def test_template_delete_by_members(admin_mc, remove_resource,
list_remove_resource, user_factory):
user_owner = user_factory()
members = [{"userPrincipalId": "local://" + user_owner.user.id,
"accessType": "owner"}]
cluster_template = create_cluster_template(admin_mc, members, admin_mc)
remove_list = [cluster_template]
list_remove_resource(remove_list)
rbac = kubernetes.client.RbacAuthorizationV1Api(admin_mc.k8s_client)
split = cluster_template.id.split(":")
name = split[1]
rb_name = name + "-ct-a"
wait_for(lambda: check_subject_in_rb(rbac, 'cattle-global-data',
user_owner.user.id, rb_name),
timeout=60,
fail_handler=fail_handler(rb_resource))
templateId = cluster_template.id
rev = create_cluster_template_revision(user_owner.client, templateId)
cluster = wait_for_cluster_create(admin_mc.client, name=random_str(),
clusterTemplateRevisionId=rev.id,
description="template from cluster")
remove_list.insert(0, cluster)
assert cluster.conditions[0].type == 'Pending'
assert cluster.conditions[0].status == 'True'
# user with accessType=owner should not be able to delete this
# template since a cluster exists
wait_for_clusterTemplate_update_failure(admin_mc.client, rev)
with pytest.raises(ApiError) as e:
user_owner.client.delete(cluster_template)
assert e.value.error.status == 422
admin_mc.client.delete(cluster)
wait_for_cluster_to_be_deleted(admin_mc.client, cluster.id)
def test_template_access(admin_mc, remove_resource, user_factory):
user = user_factory()
cluster_template = create_cluster_template(admin_mc, [], admin_mc)
remove_resource(cluster_template)
templateId = cluster_template.id
rev = create_cluster_template_revision(admin_mc.client, templateId)
wait_for_clusterTemplate_list_failure(user.client, rev)
with pytest.raises(ApiError) as e:
user.client.create_cluster(name=random_str(),
clusterTemplateRevisionId=rev.id,
description="template from cluster")
assert e.value.error.status == 404
def test_save_as_template_action(admin_mc, list_remove_resource):
cluster_template = create_cluster_template(admin_mc, [], admin_mc)
remove_list = [cluster_template]
list_remove_resource(remove_list)
templateId = cluster_template.id
rev = create_cluster_template_revision(admin_mc.client, templateId)
cluster = wait_for_cluster_create(admin_mc.client, name=random_str(),
clusterTemplateRevisionId=rev.id,
description="template from cluster")
remove_list.insert(0, cluster)
assert cluster.conditions[0].type == 'Pending'
assert cluster.conditions[0].status == 'True'
try:
admin_mc.client.action(obj=cluster, action_name="saveAsTemplate", )
except AttributeError as e:
assert e is not None
def test_cluster_desc_update(admin_mc, list_remove_resource):
cluster_template = create_cluster_template(admin_mc,
[], admin_mc)
remove_list = [cluster_template]
list_remove_resource(remove_list)
templateId = cluster_template.id
rev = \
create_cluster_template_revision(admin_mc.client, templateId)
# create a cluster with this template
client = admin_mc.client
cname = random_str()
cluster = wait_for_cluster_create(admin_mc.client, name=cname,
clusterTemplateRevisionId=rev.id,
description="template from cluster")
remove_list.insert(0, cluster)
assert cluster.conditions[0].type == 'Pending'
assert cluster.conditions[0].status == 'True'
assert cluster.description == 'template from cluster'
# edit cluster description
updatedC = client.update(cluster, name=cname,
clusterTemplateRevisionId=rev.id,
description="updated desc")
assert updatedC.description == 'updated desc'
client.delete(cluster)
wait_for_cluster_to_be_deleted(client, cluster.id)
def test_update_cluster_monitoring(admin_mc, list_remove_resource):
cluster_template = create_cluster_template(admin_mc, [], admin_mc)
remove_list = [cluster_template]
list_remove_resource(remove_list)
tId = cluster_template.id
client = admin_mc.client
cconfig = {
"rancherKubernetesEngineConfig": {
"services": {
"type": "rkeConfigServices",
"kubeApi": {
"alwaysPullImages": "false",
"podSecurityPolicy": "false",
"serviceNodePortRange": "30000-32767",
"type": "kubeAPIService"
}
}
},
"enableClusterMonitoring": "true",
"defaultPodSecurityPolicyTemplateId": "restricted",
}
rev1 = client.create_cluster_template_revision(clusterConfig=cconfig,
name="v1",
clusterTemplateId=tId,
enabled="true")
cconfig2 = {
"rancherKubernetesEngineConfig": {
"services": {
"type": "rkeConfigServices",
"kubeApi": {
"alwaysPullImages": "false",
"podSecurityPolicy": "false",
"serviceNodePortRange": "30000-32767",
"type": "kubeAPIService"
}
}
},
"enableClusterMonitoring": "false",
"defaultPodSecurityPolicyTemplateId": "restricted",
}
rev2 = client.create_cluster_template_revision(clusterConfig=cconfig2,
name="v2",
clusterTemplateId=tId,
enabled="true")
cluster_name = random_str()
cluster = wait_for_cluster_create(client, name=cluster_name,
clusterTemplateRevisionId=rev1.id,
description="template from cluster")
remove_list.insert(0, cluster)
assert cluster.conditions[0].type == 'Pending'
assert cluster.conditions[0].status == 'True'
# update cluster to use rev2 that turns off monitoring
# expect no change to monitoring
client.update(cluster,
name=cluster_name, clusterTemplateRevisionId=rev2.id)
reloaded_cluster = client.by_id_cluster(cluster.id)
assert reloaded_cluster.enableClusterMonitoring is True
client.delete(cluster)
wait_for_cluster_to_be_deleted(client, cluster.id)
def rtb_cb(client, rtb):
"""Wait for the prtb to have the userId populated"""
def cb():
rt = client.reload(rtb)
return rt.userPrincipalId is not None
return cb
def grb_cb(client, grb):
"""Wait for the grb to have the userId populated"""
def cb():
rt = client.reload(grb)
return rt.userId is not None
return cb
# When calling this function you _must_ remove the cluster_template manually
# If a cluster is created also it must be removed after the template
def create_cluster_template(creator, members, admin_mc):
template_name = random_str()
cluster_template = \
creator.client.create_cluster_template(
name=template_name,
description="demo template",
members=members)
rbac = kubernetes.client.RbacAuthorizationV1Api(admin_mc.k8s_client)
rb_name = cluster_template.id.split(":")[1] + "-ct-a"
wait_for(lambda: check_subject_in_rb(rbac, 'cattle-global-data',
creator.user.id, rb_name),
timeout=60,
fail_handler=fail_handler(rb_resource))
return cluster_template
def create_cluster_template_revision(client, clusterTemplateId):
rke_config = getRKEConfig()
cluster_config = {
"dockerRootDir": "/var/lib/docker",
"enableClusterAlerting": "false",
"enableClusterMonitoring": "false",
"enableNetworkPolicy": "false",
"type": "clusterSpecBase",
"localClusterAuthEndpoint": {
"enabled": "true",
"type": "localClusterAuthEndpoint"
},
"rancherKubernetesEngineConfig": rke_config
}
questions = [{
"variable": "dockerRootDir",
"required": "false",
"type": "string",
"default": "/var/lib/docker"
},
{
"variable":
"rancherKubernetesEngineConfig.ignoreDockerVersion",
"required": "false",
"type": "boolean",
"default": "true"
},
{
"variable":
"rancherKubernetesEngineConfig.kubernetesVersion",
"required": "false",
"type": "string",
"default": "1.19.x"
}]
revision_name = random_str()
cluster_template_revision = \
client.create_cluster_template_revision(
name=revision_name,
clusterConfig=cluster_config,
clusterTemplateId=clusterTemplateId,
disabled="false",
questions=questions
)
return cluster_template_revision
def getRKEConfig():
rke_config = {
"addonJobTimeout": 30,
"ignoreDockerVersion": "true",
"sshAgentAuth": "false",
"type": "rancherKubernetesEngineConfig",
"kubernetesVersion": "1.15.x",
"authentication": {
"strategy": "x509",
"type": "authnConfig"
},
"network": {
"plugin": "canal",
"type": "networkConfig",
"options": {
"flannel_backend_type": "vxlan"
}
},
"ingress": {
"provider": "nginx",
"type": "ingressConfig"
},
"monitoring": {
"provider": "metrics-server",
"type": "monitoringConfig"
},
"services": {
"type": "rkeConfigServices",
"kubeApi": {
"alwaysPullImages": "false",
"podSecurityPolicy": "false",
"serviceNodePortRange": "30000-32767",
"type": "kubeAPIService"
},
"etcd": {
"creation": "12h",
"extraArgs": {
"heartbeat-interval": 500,
"election-timeout": 5000
},
"retention": "72h",
"snapshot": "false",
"type": "etcdService",
"backupConfig": {
"enabled": "true",
"intervalHours": 12,
"retention": 6,
"type": "backupConfig"
}
}
}
}
return rke_config
def wait_for_cluster_to_be_deleted(client, clusterId, timeout=45):
deleted = False
start = time.time()
interval = 0.5
while not deleted:
if time.time() - start > timeout:
raise AssertionError(
"Timed out waiting for clusters")
cluster = client.by_id_cluster(clusterId)
if cluster is None:
deleted = True
time.sleep(interval)
interval *= 2
def wait_for_default_revision(client, templateId, revisionId, timeout=60):
updated = False
interval = 0.5
start = time.time()
while not updated:
if time.time() - start > timeout:
raise Exception('Timeout waiting for clustertemplate to update')
template_reloaded = client.by_id_cluster_template(templateId)
if template_reloaded.defaultRevisionId is not None:
updated = True
time.sleep(interval)
interval *= 2
def fail_handler(resource):
return "failed waiting for clustertemplate" + resource + " to get updated"
def wait_for_cluster_create(client, **kwargs):
timeout = DEFAULT_TIMEOUT
interval = 0.5
start = time.time()
while True:
try:
return client.create_cluster(kwargs)
except ApiError as e:
if e.error.status != 404:
raise e
if time.time() - start > timeout:
exception_msg = 'Timeout waiting for condition.'
raise Exception(exception_msg)
time.sleep(interval)
interval *= 2
def wait_for_clusterTemplate_update_failure(client, revision, timeout=45):
updateWorks = True
start = time.time()
interval = 0.5
cconfig = {
"rancherKubernetesEngineConfig": {
}
}
while updateWorks:
if time.time() - start > timeout:
raise AssertionError(
"Timed out waiting for clustertemplate update failure")
try:
client.update(revision, name=random_str(), clusterConfig=cconfig)
except ApiError as e:
if e.error.status == 422:
updateWorks = False
time.sleep(interval)
interval *= 2
def wait_for_clusterTemplate_list_failure(client, revision, timeout=45):
listWorks = True
start = time.time()
interval = 0.5
while listWorks:
if time.time() - start > timeout:
raise AssertionError(
"Timed out waiting for clustertemplate list failure")
try:
client.by_id_cluster_template_revision(revision.id)
except ApiError as e:
if e.error.status == 403:
listWorks = False
time.sleep(interval)
interval *= 2
| 41,302 | 36.99724 | 79 |
py
|
rancher
|
rancher-master/tests/integration/suite/test_persistent_volume.py
|
from .common import random_str
def test_persistent_volume_update(admin_cc, remove_resource):
client = admin_cc.client
name = random_str()
pv = client.create_persistent_volume(
clusterId="local",
name=name,
accessModes=["ReadWriteOnce"],
capacity={"storage": "10Gi"},
cinder={"readOnly": "false",
"secretRef": {"name": "fss",
"namespace": "fsf"},
"volumeID": "fss",
"fsType": "fss"})
remove_resource(pv)
assert pv is not None
# fields shouldn't get updated
toUpdate = {"readOnly": "true"}
pv = client.update(pv, cinder=toUpdate)
assert (pv["cinder"]["readOnly"]) is False
# persistentVolumeSource type cannot be changed
pv = client.update(pv, azureFile={"readOnly": "true",
"shareName": "abc"}, cinder={})
assert "azureFile" not in pv
| 940 | 33.851852 | 69 |
py
|
rancher
|
rancher-master/tests/integration/suite/conftest.py
|
import os
import pytest
import requests
import time
import urllib3
import yaml
import socket
import subprocess
import json
import rancher
from sys import platform
from .common import random_str, wait_for_template_to_be_created
from kubernetes.client import ApiClient, Configuration, CustomObjectsApi, \
ApiextensionsV1beta1Api
from kubernetes.client.rest import ApiException
from kubernetes.config.kube_config import KubeConfigLoader
from rancher import ApiError
from .cluster_common import \
generate_cluster_config, \
create_cluster, \
import_cluster
# This stops ssl warnings for unsecure certs
urllib3.disable_warnings()
def get_ip():
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
try:
# doesn't even have to be reachable
s.connect(('10.255.255.255', 1))
IP = s.getsockname()[0]
except Exception:
IP = '127.0.0.1'
finally:
s.close()
return IP
IP = get_ip()
SERVER_URL = 'https://' + IP + ':8443'
BASE_URL = SERVER_URL + '/v3'
AUTH_URL = BASE_URL + '-public/localproviders/local?action=login'
DEFAULT_TIMEOUT = 120
DEFAULT_CATALOG = "https://github.com/rancher/integration-test-charts"
WAIT_HTTP_ERROR_CODES = [404, 405]
class ManagementContext:
"""Contains a client that is scoped to the managment plane APIs. That is,
APIs that are not specific to a cluster or project."""
def __init__(self, client, k8s_client=None, user=None):
self.client = client
self.k8s_client = k8s_client
self.user = user
class ClusterContext:
"""Contains a client that is scoped to a specific cluster. Also contains
a reference to the ManagementContext used to create cluster client and
the cluster object itself.
"""
def __init__(self, management, cluster, client):
self.management = management
self.cluster = cluster
self.client = client
class ProjectContext:
"""Contains a client that is scoped to a newly created project. Also
contains a reference to the clusterContext used to crete the project and
the project object itself.
"""
def __init__(self, cluster_context, project, client):
self.cluster = cluster_context
self.project = project
self.client = client
class DINDContext:
"""Returns a DINDContext for a new RKE cluster for the default global
admin user."""
def __init__(
self, name, admin_mc, cluster, client, cluster_file, kube_file
):
self.name = name
self.admin_mc = admin_mc
self.cluster = cluster
self.client = client
self.cluster_file = cluster_file
self.kube_file = kube_file
@pytest.fixture(scope="session")
def admin_mc():
"""Returns a ManagementContext for the default global admin user."""
r = requests.post(AUTH_URL, json={
'username': 'admin',
'password': 'admin',
'responseType': 'json',
}, verify=False)
protect_response(r)
client = rancher.Client(url=BASE_URL, token=r.json()['token'],
verify=False)
k8s_client = kubernetes_api_client(client, 'local')
admin = client.list_user(username='admin').data[0]
return ManagementContext(client, k8s_client, user=admin)
@pytest.fixture
def admin_cc(admin_mc):
"""Returns a ClusterContext for the local cluster for the default global
admin user."""
cluster, client = cluster_and_client('local', admin_mc.client)
return ClusterContext(admin_mc, cluster, client)
def cluster_and_client(cluster_id, mgmt_client):
cluster = mgmt_client.by_id_cluster(cluster_id)
url = cluster.links.self + '/schemas'
client = rancher.Client(url=url,
verify=False,
token=mgmt_client.token)
return cluster, client
def user_project_client(user, project):
"""Returns a project level client for the user"""
return rancher.Client(url=project.links.self+'/schemas', verify=False,
token=user.client.token)
def user_cluster_client(user, cluster):
"""Returns a cluster level client for the user"""
return rancher.Client(url=cluster.links.self+'/schemas', verify=False,
token=user.client.token)
@pytest.fixture
def admin_pc_factory(admin_cc, remove_resource):
"""Returns a ProjectContext for a newly created project in the local
cluster for the default global admin user. The project will be deleted
when this fixture is cleaned up."""
def _admin_pc():
admin = admin_cc.management.client
p = admin.create_project(name='test-' + random_str(),
clusterId=admin_cc.cluster.id)
p = admin.wait_success(p)
wait_for_condition("BackingNamespaceCreated", "True",
admin_cc.management.client, p)
assert p.state == 'active'
remove_resource(p)
p = admin.reload(p)
url = p.links.self + '/schemas'
return ProjectContext(admin_cc, p, rancher.Client(url=url,
verify=False,
token=admin.token))
return _admin_pc
@pytest.fixture
def admin_pc(admin_pc_factory):
return admin_pc_factory()
@pytest.fixture
def admin_system_pc(admin_mc):
"""Returns a ProjectContext for the system project in the local cluster
for the default global admin user."""
admin = admin_mc.client
plist = admin.list_project(name='System', clusterId='local')
assert len(plist) == 1
p = plist.data[0]
url = p.links.self + '/schemas'
return ProjectContext(admin_cc, p, rancher.Client(url=url,
verify=False,
token=admin.token))
@pytest.fixture
def user_mc(user_factory):
"""Returns a ManagementContext for a newly created standard user"""
return user_factory()
@pytest.fixture
def user_factory(admin_mc, remove_resource):
"""Returns a factory for creating new users which a ManagementContext for
a newly created standard user is returned.
This user and globalRoleBinding will be cleaned up automatically by the
fixture remove_resource.
"""
def _create_user(globalRoleId='user'):
admin = admin_mc.client
username = random_str()
password = random_str()
user = admin.create_user(username=username, password=password)
remove_resource(user)
grb = admin.create_global_role_binding(
userId=user.id, globalRoleId=globalRoleId)
remove_resource(grb)
response = requests.post(AUTH_URL, json={
'username': username,
'password': password,
'responseType': 'json',
}, verify=False)
protect_response(response)
client = rancher.Client(url=BASE_URL, token=response.json()['token'],
verify=False)
return ManagementContext(client, user=user)
return _create_user
@pytest.fixture
def admin_cc_client(admin_cc):
"""Returns the client from the default admin's ClusterContext"""
return admin_cc.client
@pytest.fixture
def admin_pc_client(admin_pc):
"""Returns the client from the default admin's ProjectContext """
return admin_pc.client
@pytest.fixture(scope="session")
def custom_catalog(admin_mc, remove_resource_session):
"""Create a catalog from the URL and cleanup after tests finish"""
def _create_catalog(name=random_str(), catalogURL=DEFAULT_CATALOG):
client = admin_mc.client
catalog = client.create_catalog(name=name,
branch="master",
url=catalogURL,
)
remove_resource_session(catalog)
wait_for_template_to_be_created(client, name)
return _create_catalog
@pytest.fixture()
def restore_rancher_version(request, admin_mc):
client = admin_mc.client
server_version = client.by_id_setting('server-version')
def _restore():
client.update_by_id_setting(
id=server_version.id, value=server_version.value)
request.addfinalizer(_restore)
def set_server_version(client, version):
client.update_by_id_setting(id='server-version', value=version)
def _wait_for_version():
server_version = client.by_id_setting('server-version')
return server_version.value == version
wait_for(_wait_for_version)
@pytest.fixture(scope="session")
def dind_cc(request, admin_mc):
# verify platform is linux
if platform != 'linux':
raise Exception('rke dind only supported on linux')
def set_server_url(url):
admin_mc.client.update_by_id_setting(id='server-url', value=url)
original_url = admin_mc.client.by_id_setting('server-url').value
# make sure server-url is set to IP address for dind accessibility
set_server_url(SERVER_URL)
# revert server url to original when done
request.addfinalizer(lambda: set_server_url(original_url))
# create the cluster & import
name, config, cluster_file, kube_file = generate_cluster_config(request, 1)
create_cluster(cluster_file)
cluster = import_cluster(admin_mc, kube_file, cluster_name=name)
# delete cluster when done
request.addfinalizer(lambda: admin_mc.client.delete(cluster))
# wait for cluster to completely provision
wait_for_condition("Ready", "True", admin_mc.client, cluster, 120)
cluster, client = cluster_and_client(cluster.id, admin_mc.client)
# get ip address of cluster node
node_name = config['nodes'][0]['address']
node_inspect = subprocess.check_output('docker inspect rke-dind-' +
node_name, shell=True).decode()
node_json = json.loads(node_inspect)
node_ip = node_json[0]['NetworkSettings']['IPAddress']
# update cluster fqdn with node ip
admin_mc.client.update_by_id_cluster(
id=cluster.id,
name=cluster.name,
localClusterAuthEndpoint={
'enabled': True,
'fqdn': node_ip + ':6443',
'caCerts': cluster.caCert,
},
)
return DINDContext(
name, admin_mc, cluster, client, cluster_file, kube_file
)
def wait_for(callback, timeout=DEFAULT_TIMEOUT, fail_handler=None):
sleep_time = _sleep_time()
start = time.time()
ret = callback()
while ret is None or ret is False:
time.sleep(next(sleep_time))
if time.time() - start > timeout:
exception_msg = 'Timeout waiting for condition.'
if fail_handler:
exception_msg = exception_msg + ' Fail handler message: ' + \
fail_handler()
raise Exception(exception_msg)
ret = callback()
return ret
def _sleep_time():
sleep = 0.01
while True:
yield sleep
sleep *= 2
if sleep > 1:
sleep = 1
def wait_until_available(client, obj, timeout=DEFAULT_TIMEOUT):
start = time.time()
sleep = 0.01
while True:
time.sleep(sleep)
sleep *= 2
if sleep > 2:
sleep = 2
try:
obj = client.reload(obj)
except ApiError as e:
if e.error.status != 403:
raise e
else:
return obj
delta = time.time() - start
if delta > timeout:
msg = 'Timeout waiting for [{}:{}] for condition after {}' \
' seconds'.format(obj.type, obj.id, delta)
raise Exception(msg)
@pytest.fixture
def remove_resource(admin_mc, request):
"""Remove a resource after a test finishes even if the test fails."""
client = admin_mc.client
def _cleanup(resource):
def clean():
try:
client.delete(resource)
except ApiError as e:
code = e.error.status
if code == 409 and "namespace will automatically be purged " \
in e.error.message:
pass
elif code not in WAIT_HTTP_ERROR_CODES:
raise e
request.addfinalizer(clean)
return _cleanup
@pytest.fixture
def remove_resouce_func(request):
"""Call the delete_func passing in the name of the resource. This is useful
when dealing with the k8s clients for objects that don't exist in the
Rancher client
"""
def _cleanup(delete_func, name):
def clean():
try:
delete_func(name)
except ApiException as e:
body = json.loads(e.body)
if body["code"] not in WAIT_HTTP_ERROR_CODES:
raise e
request.addfinalizer(clean)
return _cleanup
@pytest.fixture
def raw_remove_custom_resource(admin_mc, request):
"""Remove a custom resource, using the k8s client, after a test finishes
even if the test fails. This should only be used if remove_resource, which
exclusively uses the rancher api, cannot be used"""
def _cleanup(resource):
k8s_v1beta1_client = ApiextensionsV1beta1Api(admin_mc.k8s_client)
k8s_client = CustomObjectsApi(admin_mc.k8s_client)
def clean():
kind = resource["kind"]
metadata = resource["metadata"]
api_version = resource["apiVersion"]
api_version_parts = api_version.split("/")
if len(api_version_parts) != 2:
raise ValueError("Error parsing ApiVersion [" + api_version
+ "]." + "Expected form \"group/version\""
)
group = api_version_parts[0]
version = api_version_parts[1]
crd_list = k8s_v1beta1_client.\
list_custom_resource_definition().items
crd = list(filter(lambda x: x.spec.names.kind == kind and
x.spec.group == group and
x.spec.version == version,
crd_list))[0]
try:
k8s_client.delete_namespaced_custom_object(
group,
version,
metadata["namespace"],
crd.spec.names.plural,
metadata["name"])
except ApiException as e:
body = json.loads(e.body)
if body["code"] not in WAIT_HTTP_ERROR_CODES:
raise e
request.addfinalizer(clean)
return _cleanup
@pytest.fixture(scope="session")
def remove_resource_session(admin_mc, request):
"""Remove a resource after the test session finishes. Can only be used
with fixtures that are 'session' scoped.
"""
client = admin_mc.client
def _cleanup(resource):
def clean():
try:
client.delete(resource)
except ApiError as e:
if e.error.status not in WAIT_HTTP_ERROR_CODES:
raise e
request.addfinalizer(clean)
return _cleanup
@pytest.fixture()
def wait_remove_resource(admin_mc, request, timeout=DEFAULT_TIMEOUT):
"""Remove a resource after a test finishes even if the test fails and
wait until deletion is confirmed."""
client = admin_mc.client
def _cleanup(resource):
def clean():
try:
client.delete(resource)
except ApiError as e:
code = e.error.status
if code == 409 and "namespace will automatically be purged " \
in e.error.message:
pass
elif code not in WAIT_HTTP_ERROR_CODES:
raise e
wait_until(lambda: client.reload(resource) is None)
request.addfinalizer(clean)
return _cleanup
@pytest.fixture()
def list_remove_resource(admin_mc, request):
"""Takes list of resources to remove & supports reordering of the list """
client = admin_mc.client
def _cleanup(resource):
def clean():
for item in resource:
try:
client.delete(item)
except ApiError as e:
if e.error.status not in WAIT_HTTP_ERROR_CODES:
raise e
wait_until(lambda: client.reload(item) is None)
request.addfinalizer(clean)
return _cleanup
def wait_for_condition(condition_type, status, client, obj, timeout=45):
start = time.time()
obj = client.reload(obj)
sleep = 0.01
while not find_condition(condition_type, status, obj):
time.sleep(sleep)
sleep *= 2
if sleep > 2:
sleep = 2
obj = client.reload(obj)
delta = time.time() - start
if delta > timeout:
msg = 'Expected condition {} to have status {}\n'\
'Timeout waiting for [{}:{}] for condition after {} ' \
'seconds\n {}'.format(condition_type, status, obj.type, obj.id,
delta, str(obj))
raise Exception(msg)
return obj
def wait_until(cb, timeout=DEFAULT_TIMEOUT, backoff=True):
start_time = time.time()
interval = 1
while time.time() < start_time + timeout and cb() is False:
if backoff:
interval *= 2
time.sleep(interval)
def find_condition(condition_type, status, obj):
if not hasattr(obj, "conditions"):
return False
if obj.conditions is None:
return False
for condition in obj.conditions:
if condition.type == condition_type and condition.status == status:
return True
return False
def kubernetes_api_client(rancher_client, cluster_name):
c = rancher_client.by_id_cluster(cluster_name)
kc = c.generateKubeconfig()
loader = KubeConfigLoader(config_dict=yaml.full_load(kc.config))
client_configuration = type.__call__(Configuration)
loader.load_and_set(client_configuration)
k8s_client = ApiClient(configuration=client_configuration)
return k8s_client
def protect_response(r):
if r.status_code >= 300:
message = 'Server responded with {r.status_code}\nbody:\n{r.text}'
raise ValueError(message)
def create_kubeconfig(request, dind_cc, client):
# request cluster scoped kubeconfig, permissions may not be synced yet
def generateKubeconfig(max_attempts=5):
for attempt in range(1, max_attempts+1):
try:
# get cluster for client
cluster = client.by_id_cluster(dind_cc.cluster.id)
return cluster.generateKubeconfig()['config']
except ApiError as err:
if attempt == max_attempts:
raise err
time.sleep(1)
cluster_kubeconfig = generateKubeconfig()
# write cluster scoped kubeconfig
cluster_kubeconfig_file = "kubeconfig-" + random_str() + ".yml"
f = open(cluster_kubeconfig_file, "w")
f.write(cluster_kubeconfig)
f.close()
# cleanup file when done
request.addfinalizer(lambda: os.remove(cluster_kubeconfig_file))
# extract token name
config = yaml.safe_load(cluster_kubeconfig)
token_name = config['users'][0]['user']['token'].split(':')[0]
# wait for token to sync
crd_client = CustomObjectsApi(
kubernetes_api_client(
dind_cc.admin_mc.client,
dind_cc.cluster.id
)
)
def cluster_token_available():
try:
return crd_client.get_namespaced_custom_object(
'cluster.cattle.io',
'v3',
'cattle-system',
'clusterauthtokens',
token_name
)
except ApiException:
return None
wait_for(cluster_token_available)
return cluster_kubeconfig_file
def set_cluster_psp(admin_mc, value):
"""Enable or Disable the pod security policy at the local cluster"""
k8s_dynamic_client = CustomObjectsApi(admin_mc.k8s_client)
# these create a mock pspts... not valid for real psp's
def update_cluster():
try:
local_cluster = k8s_dynamic_client.get_cluster_custom_object(
"management.cattle.io", "v3", "clusters", "local")
local_cluster["metadata"]["annotations"][
"capabilities/pspEnabled"] = value
k8s_dynamic_client.replace_cluster_custom_object(
"management.cattle.io", "v3", "clusters", "local",
local_cluster)
except ApiException as e:
assert e.status == 409
return False
return True
wait_for(update_cluster)
def check_psp():
cluster_obj = admin_mc.client.by_id_cluster(id="local")
return str(cluster_obj.capabilities.pspEnabled).lower() == value
wait_for(check_psp)
@pytest.fixture()
def restore_cluster_psp(admin_mc, request):
cluster_obj = admin_mc.client.by_id_cluster(id="local")
value = str(cluster_obj.capabilities.pspEnabled).lower()
def _restore():
set_cluster_psp(admin_mc, value)
request.addfinalizer(_restore)
| 21,251 | 31.545176 | 79 |
py
|
rancher
|
rancher-master/tests/integration/suite/test_pipeline.py
|
import pytest
import time
from rancher import ApiError
from .pipeline_common import MockGithub
from .conftest import ProjectContext, rancher, \
wait_until_available, user_project_client
from .common import random_str
MOCK_GITHUB_PORT = 4016
MOCK_GITHUB_HOST = "localhost:4016"
MOCK_GITHUB_REPO_URL = 'https://github.com/octocat/Hello-World.git'
MOCK_GITHUB_USER = 'octocat'
GITHUB_TYPE = 'github'
@pytest.fixture(scope="module")
def mock_github():
server = MockGithub(port=MOCK_GITHUB_PORT)
server.start()
yield server
server.shutdown_server()
@pytest.mark.nonparallel
def test_pipeline_set_up_github(admin_pc, mock_github):
client = admin_pc.client
set_up_pipeline_github(admin_pc)
configs = client.list_source_code_provider_config()
gh = None
for c in configs:
if c.type == "githubPipelineConfig":
gh = c
assert gh is not None
assert gh.enabled is True
assert gh.disable
providers = client.list_source_code_provider()
assert len(providers) == 1
gh_provider = providers.data[0]
assert gh_provider.type == 'githubProvider'
assert gh_provider.login
creds = client.list_source_code_credential()
assert len(creds) == 1
assert creds.data[0].sourceCodeType == GITHUB_TYPE
assert creds.data[0].loginName == MOCK_GITHUB_USER
repos = client.list_source_code_repository()
assert len(repos) == 1
assert repos.data[0].sourceCodeType == GITHUB_TYPE
assert repos.data[0].url == MOCK_GITHUB_REPO_URL
@pytest.mark.nonparallel
def test_pipeline_set_up_github_with_custom_role(admin_mc,
admin_pc,
mock_github,
user_factory,
remove_resource):
# Create a new user with custom global role
user = user_factory(globalRoleId="user-base")
remove_resource(user)
# Preference creation triggers user ns creation
user.client.create_preference(name="language", value="\"en-us\"")
client = admin_mc.client
project = admin_pc.project
# Add this user as project-owner
prtb_owner = client.create_project_role_template_binding(
projectId=project.id,
roleTemplateId="project-owner",
userId=user.user.id)
remove_resource(prtb_owner)
url = project.links.self + '/schemas'
user_pc = ProjectContext(None, project,
rancher.Client(url=url,
verify=False,
token=user.client.token))
set_up_pipeline_github(user_pc)
user_client = user_pc.client
creds = user_client.list_source_code_credential()
assert len(creds) == 1
assert creds.data[0].sourceCodeType == GITHUB_TYPE
assert creds.data[0].loginName == MOCK_GITHUB_USER
repos = user_client.list_source_code_repository()
assert len(repos) == 1
assert repos.data[0].sourceCodeType == GITHUB_TYPE
assert repos.data[0].url == MOCK_GITHUB_REPO_URL
@pytest.mark.nonparallel
def test_pipeline_disable_github(admin_pc, mock_github):
client = admin_pc.client
set_up_pipeline_github(admin_pc)
configs = client.list_source_code_provider_config()
gh = None
for c in configs:
if c.type == "githubPipelineConfig":
gh = c
assert gh is not None
assert gh.enabled is True
assert gh.disable
gh.disable()
providers = client.list_source_code_provider()
assert len(providers) == 0
@pytest.mark.nonparallel
def test_pipeline_github_log_in_out(admin_pc, mock_github):
client = admin_pc.client
set_up_pipeline_github(admin_pc)
providers = client.list_source_code_provider()
gh_provider = providers.data[0]
creds = client.list_source_code_credential()
creds.data[0].refreshrepos()
repos = client.list_source_code_repository()
assert len(repos) == 1
repos_by_cred = creds.data[0].repos()
assert len(repos_by_cred) == 1
creds.data[0].logout_action()
creds = client.list_source_code_credential()
assert len(creds) == 0
gh_provider.login(code='test_code')
creds = client.list_source_code_credential()
assert len(creds) == 1
def test_pipeline_run_access(admin_mc, admin_pc, user_mc, remove_resource):
"""Tests that a user with read-only access is not
able to run a pipeline.
"""
prtb = admin_mc.client.create_project_role_template_binding(
name="prtb-" + random_str(),
userId=user_mc.user.id,
projectId=admin_pc.project.id,
roleTemplateId="read-only")
remove_resource(prtb)
pipeline = admin_pc.client.create_pipeline(
projectId=admin_pc.project.id,
repositoryUrl="https://github.com/rancher/pipeline-example-go.git",
name=random_str(),
)
remove_resource(pipeline)
wait_until_available(admin_pc.client, pipeline)
# ensure user can get pipeline
proj_user_client = user_project_client(user_mc, admin_pc.project)
wait_until_available(proj_user_client, pipeline)
with pytest.raises(ApiError) as e:
# Doing run action with pipeline obj from admin_client should fail
user_mc.client.action(obj=pipeline, action_name="run", branch="master")
assert e.value.error.status == 404
def set_up_pipeline_github(user_pc):
gh = get_source_code_provider_config(user_pc, "githubPipelineConfig")
assert gh is not None
gh.testAndApply(code="test_code",
hostname=MOCK_GITHUB_HOST,
tls=False,
clientId="test_id",
clientSecret="test_secret")
def get_source_code_provider_config(user_pc, config_type):
client = user_pc.client
start_time = int(time.time())
while int(time.time()) - start_time < 30:
configs = client.list_source_code_provider_config()
for c in configs:
if c.type == config_type:
return c
time.sleep(3)
raise Exception('Timeout getting {0}'.format(config_type))
| 6,127 | 31.08377 | 79 |
py
|
rancher
|
rancher-master/tests/integration/suite/alert_common.py
|
import requests
from flask import request
from threading import Thread
class MockServer(Thread):
def __init__(self, port=5000):
super().__init__()
from flask import Flask
self.port = port
self.app = Flask(__name__)
self.url = "http://127.0.0.1:%s" % self.port
self.app.add_url_rule("/shutdown", view_func=self._shutdown_server)
def _shutdown_server(self):
from flask import request
if 'werkzeug.server.shutdown' not in request.environ:
raise RuntimeError('Not running the development server')
request.environ['werkzeug.server.shutdown']()
return 'Server shutting down...'
def shutdown_server(self):
requests.get("http://127.0.0.1:%s/shutdown" % self.port,
headers={'Connection': 'close'})
self.join()
def run(self):
self.app.run(host='0.0.0.0', port=self.port, threaded=True)
class MockReceiveAlert(MockServer):
def api_microsoft_teams(self):
message = request.json.get("text")
assert message == MICROSOFTTEAMS_MESSAGE
return "success"
def api_dingtalk(self, url):
message = request.json.get("text")
assert message.get('content') == DINGTALK_MESSAGE
return '{"errcode":0,"errmsg":""}'
def add_endpoints(self):
self.app.add_url_rule("/microsoftTeams",
view_func=self.api_microsoft_teams,
methods=('POST',))
self.app.add_url_rule("/dingtalk/<path:url>/",
view_func=self.api_dingtalk,
methods=('POST',))
pass
def __init__(self, port):
super().__init__(port)
self.add_endpoints()
DINGTALK_MESSAGE = "Dingtalk setting validated"
MICROSOFTTEAMS_MESSAGE = "MicrosoftTeams setting validated"
| 1,877 | 29.290323 | 75 |
py
|
rancher
|
rancher-master/tests/integration/suite/cluster_common.py
|
import subprocess
import os
import re
import yaml
from .common import random_str
from jinja2 import Template
def generate_cluster_config(request, dind_rke_node_num):
# generate a random and kube_config file
dind_name = 'dind-' + random_str()
dind_cluster_config_file = dind_name + '.yml'
dind_kube_config_file = 'kube_config_' + dind_name + '.yml'
cluster_config_tmpl = get_rke_config_template()
# generate nodes
random_nodes = [
'node-' +
random_str()
for x in range(dind_rke_node_num)]
rke_config_template = Template(cluster_config_tmpl)
rendered_tmpl = rke_config_template.render(
random_nodes=random_nodes)
# write config file on disk
cluster_config_file = open(dind_cluster_config_file, 'w')
cluster_config_file.write(rendered_tmpl)
cluster_config_file.close()
request.addfinalizer(lambda: cleanup_dind(
dind_cluster_config_file,
dind_name + '.rkestate'
))
return \
dind_name, \
yaml.safe_load(rendered_tmpl), \
dind_cluster_config_file, \
dind_kube_config_file
def cleanup_dind(cluster_file, state_file):
remove_cluster(cluster_file)
os.remove(cluster_file)
os.remove(state_file)
def get_rke_config_template():
dind_cluster_config_j2 = """
---
authentication:
strategy: "x509|webhook"
nodes:{% for node in random_nodes %}
- address: {{ node }}
user: docker
role:
- controlplane
- worker
- etcd{% endfor %}
"""
return dind_cluster_config_j2
def create_cluster(cluster_config_file):
raise Exception('cluster creation needs refactor')
# attempt to resolve unknown random rke up errors
for _ in range(3):
try:
return subprocess.check_output(
'rke up --dind --config ' +
cluster_config_file,
stderr=subprocess.STDOUT, shell=True
)
except subprocess.CalledProcessError as err:
print('RKE up error: ' + str(err.output))
raise Exception('rke up failure')
def remove_cluster(cluster_config_file):
try:
return subprocess.check_output(
'rke remove --force --dind --config ' +
cluster_config_file,
stderr=subprocess.STDOUT, shell=True
)
except subprocess.CalledProcessError as err:
print('RKE down error: ' + str(err.output))
raise err
def import_cluster(admin_mc, kube_config_file, cluster_name):
client = admin_mc.client
imported_cluster = client.create_cluster(
replace=True,
name=cluster_name,
localClusterAuthEndpoint={
'enabled': True,
},
rancherKubernetesEngineConfig={},
)
reg_token = client.create_cluster_registration_token(
clusterId=imported_cluster.id
)
# modify import command to add auth image
match = r'\.yaml \|'
replace = '.yaml?authImage=fixed |'
insecure_command = re.sub(match, replace, reg_token.insecureCommand)
# run kubectl command
os_env = os.environ.copy()
os_env['KUBECONFIG'] = kube_config_file
subprocess.check_output(insecure_command, env=os_env, shell=True)
return imported_cluster
| 3,399 | 28.824561 | 72 |
py
|
rancher
|
rancher-master/tests/integration/suite/test_cluster_scan.py
|
from .common import random_str
from .conftest import cluster_and_client
def test_run_scan_not_available_on_not_ready_cluster(admin_mc,
remove_resource):
client = admin_mc.client
cluster = client.create_cluster(
name=random_str(),
rancherKubernetesEngineConfig={
"accessKey": "junk"
}
)
remove_resource(cluster)
_, cluster_client = cluster_and_client(cluster.id, client)
assert 'runSecurityScan' not in cluster.actions
| 533 | 30.411765 | 70 |
py
|
rancher
|
rancher-master/tests/integration/suite/test_cluster_role_template_bindings.py
|
import pytest
from .common import random_str
from .conftest import wait_for
from rancher import ApiError
def test_cannot_target_users_and_group(admin_mc, remove_resource):
"""Asserts that a clusterroletemplatebinding cannot target both
user and group subjects"""
admin_client = admin_mc.client
with pytest.raises(ApiError) as e:
crtb = admin_client.create_cluster_role_template_binding(
name="crtb-"+random_str(),
clusterId="local",
userId=admin_mc.user.id,
groupPrincipalId="someauthprovidergroupid",
roleTemplateId="clustercatalogs-view")
remove_resource(crtb)
assert e.value.error.status == 422
assert "must target a user [userId]/[userPrincipalId] OR a group " \
"[groupId]/[groupPrincipalId]" in e.value.error.message
def test_must_have_target(admin_mc, remove_resource):
"""Asserts that a clusterroletemplatebinding must have a subject"""
admin_client = admin_mc.client
with pytest.raises(ApiError) as e:
crtb = admin_client.create_cluster_role_template_binding(
name="crtb-" + random_str(),
clusterId="local",
roleTemplateId="clustercatalogs-view")
remove_resource(crtb)
assert e.value.error.status == 422
assert "must target a user [userId]/[userPrincipalId] OR a group " \
"[groupId]/[groupPrincipalId]" in e.value.error.message
def test_cannot_update_subjects_or_cluster(admin_mc, remove_resource):
"""Asserts non-metadata fields cannot be updated"""
admin_client = admin_mc.client
old_crtb = admin_client.create_cluster_role_template_binding(
name="crtb-" + random_str(),
clusterId="local",
userId=admin_mc.user.id,
roleTemplateId="clustercatalogs-view")
remove_resource(old_crtb)
wait_for(lambda: admin_client.reload(old_crtb).userPrincipalId is not None)
old_crtb = admin_client.reload(old_crtb)
crtb = admin_client.update_by_id_cluster_role_template_binding(
id=old_crtb.id,
clusterId="fakecluster",
userId="",
userPrincipalId="asdf",
groupPrincipalId="asdf",
group="asdf"
)
assert crtb.get("clusterId") == old_crtb.get("clusterId")
assert crtb.get("userId") == old_crtb.get("userId")
assert crtb.get("userPrincipalId") == old_crtb.get("userPrincipalId")
assert crtb.get("groupPrincipalId") == old_crtb.get("groupPrincipalId")
assert crtb.get("group") == old_crtb.get("group")
| 2,526 | 36.161765 | 79 |
py
|
rancher
|
rancher-master/tests/integration/suite/test_kontainer_drivers.py
|
import platform
import pytest
import sys
import requests
from rancher import ApiError
from .conftest import wait_for_condition, wait_until, random_str, \
wait_for, BASE_URL
NEW_DRIVER_URL = "https://github.com/rancher/kontainer-engine-driver-" \
"example/releases/download/v0.2.2/kontainer-engine-" \
"driver-example-" + sys.platform + "-amd64"
NEW_DRIVER_ARM64_URL = "https://github.com/rancher/kontainer-engine-driver-" \
"example/releases/download/v0.2.2/kontainer-engine-" \
"driver-example-" + sys.platform + "-arm64"
DRIVER_AMD64_URL = "https://github.com/rancher/" \
"kontainer-engine-driver-example/" \
"releases/download/v0.2.1/kontainer-engine-driver-example-"\
+ sys.platform
DRIVER_ARM64_URL = "https://github.com/jianghang8421/" \
"kontainer-engine-driver-example/" \
"releases/download/v0.2.1-multiarch/" \
"kontainer-engine-driver-example-" \
+ sys.platform + "-arm64"
def test_builtin_drivers_are_present(admin_mc):
"""Test if builtin kd are present and cannot be deleted via API or UI"""
admin_mc.client.reload_schema()
types = admin_mc.client.schema.types
for name in ['azureKubernetesService',
'googleKubernetesEngine',
'amazonElasticContainerService']:
kd = admin_mc.client.list_kontainerDriver(
name=name,
).data[0]
wait_for_condition('Active', "True", admin_mc.client, kd, timeout=90)
# check in schema
assert name + "Config" in types
# verify has no delete link because its built in
kd = admin_mc.client.by_id_kontainer_driver(name.lower())
assert not hasattr(kd.links, 'remove')
# assert cannot delete it via API
with pytest.raises(ApiError) as e:
admin_mc.client.delete(kd)
assert e.value.error.status == 405
@pytest.mark.skip
@pytest.mark.nonparallel
def test_kontainer_driver_lifecycle(admin_mc, list_remove_resource):
URL = DRIVER_AMD64_URL
if platform.machine() == "aarch64":
URL = DRIVER_ARM64_URL
kd = admin_mc.client.create_kontainerDriver(
createDynamicSchema=True,
active=True,
url=URL
)
remove_list = [kd]
list_remove_resource(remove_list)
# Test that it is in downloading state while downloading
kd = wait_for_condition('Downloaded', 'Unknown', admin_mc.client, kd)
assert "downloading" == kd.state
# no actions should be present while downloading/installing
assert not hasattr(kd, 'actions')
# test driver goes active and appears in schema
kd = wait_for_condition('Active', 'True', admin_mc.client, kd,
timeout=90)
verify_driver_in_types(admin_mc.client, kd)
# verify the leading kontainer driver identifier and trailing system
# type are removed from the name
assert kd.name == "example"
# verify the kontainer driver has activate and no deactivate links
assert not hasattr(kd.actions, "activate")
assert hasattr(kd.actions, "deactivate")
assert kd.actions.deactivate != ""
# verify driver has delete link
assert kd.links.remove != ""
# associate driver with a cluster
cluster = admin_mc.client.create_cluster(
name=random_str(),
exampleEngineConfig={
"credentials": "bad credentials",
"nodeCount": 3
})
# order matters here, need to remove cluster before kontainer driver
remove_list.insert(0, cluster)
def check_remove_link(kod):
kod = admin_mc.client.reload(kod)
if hasattr(kod.links, "remove"):
return False
return True
wait_for(lambda: check_remove_link(kd))
with pytest.raises(ApiError) as e:
admin_mc.client.delete(kd)
assert e.value.error.status == 405
# cleanup local cluster, note this depends on a force delete of the cluster
# within rancher since this cluster is not a "true" cluster
def cluster_steady_state(clus):
clus = admin_mc.client.reload(clus)
if "lifecycle.cattle.io/" \
"create.mgmt-cluster-rbac-remove" in clus.annotations:
return True
return False
# this typically takes at least 45 seconds
wait_for(lambda: cluster_steady_state(cluster), timeout=90)
admin_mc.client.delete(cluster)
# wait for removal link to return
wait_for(lambda: not (check_remove_link(kd)), timeout=90)
admin_mc.client.delete(kd)
# test driver is removed from schema after deletion
verify_driver_not_in_types(admin_mc.client, kd)
@pytest.mark.skip
@pytest.mark.nonparallel
def test_enabling_driver_exposes_schema(admin_mc, wait_remove_resource):
""" Test if enabling driver exposes its dynamic schema, drivers are
downloaded / installed once they are active, and if re-activating a
driver exposes its schema again"""
URL = DRIVER_AMD64_URL
if platform.machine() == "aarch64":
URL = DRIVER_ARM64_URL
kd = admin_mc.client.create_kontainerDriver(
createDynamicSchema=True,
active=False,
url=URL
)
wait_remove_resource(kd)
kd = wait_for_condition('Inactive', 'True', admin_mc.client, kd,
timeout=90)
# verify the kontainer driver has no activate and a deactivate link
assert hasattr(kd.actions, "activate")
assert kd.actions.activate != ""
assert not hasattr(kd.actions, "deactivate")
verify_driver_not_in_types(admin_mc.client, kd)
kd.active = True # driver should begin downloading / installing
admin_mc.client.update_by_id_kontainerDriver(kd.id, kd)
kd = wait_for_condition('Active', 'True', admin_mc.client, kd,
timeout=90)
verify_driver_in_types(admin_mc.client, kd)
kd.active = False
admin_mc.client.update_by_id_kontainerDriver(kd.id, kd)
verify_driver_not_in_types(admin_mc.client, kd)
# test re-activation flow
kd.active = True
admin_mc.client.update_by_id_kontainerDriver(kd.id, kd)
verify_driver_in_types(admin_mc.client, kd)
@pytest.mark.skip
@pytest.mark.nonparallel
def test_upgrade_changes_schema(admin_mc, wait_remove_resource):
client = admin_mc.client
URL = DRIVER_AMD64_URL
if platform.machine() == "aarch64":
URL = DRIVER_ARM64_URL
kd = client.create_kontainerDriver(
createDynamicSchema=True,
active=True,
url=URL
)
wait_remove_resource(kd)
kd = wait_for_condition('Active', 'True', admin_mc.client, kd,
timeout=90)
verify_driver_in_types(client, kd)
kdSchema = client.schema.types[kd.name + 'EngineConfig']
assert 'specialTestingField' not in kdSchema.resourceFields
NEW_URL = NEW_DRIVER_URL
if platform.machine() == "aarch64":
NEW_URL = NEW_DRIVER_ARM64_URL
kd.url = NEW_URL
kd = client.update_by_id_kontainerDriver(kd.id, kd)
def schema_updated():
client.reload_schema()
kdSchema = client.schema.types[kd.name + 'EngineConfig']
return 'specialTestingField' in kdSchema.resourceFields
wait_until(schema_updated)
kdSchema = client.schema.types[kd.name + 'EngineConfig']
assert 'specialTestingField' in kdSchema.resourceFields
@pytest.mark.skip
@pytest.mark.nonparallel
def test_create_duplicate_driver_conflict(admin_mc, wait_remove_resource):
""" Test if adding a driver with a pre-existing driver's URL
returns a conflict error"""
URL = DRIVER_AMD64_URL
if platform.machine() == "aarch64":
URL = DRIVER_ARM64_URL
kd = admin_mc.client.create_kontainerDriver(
createDynamicSchema=True,
active=True,
url=URL
)
wait_remove_resource(kd)
kd = wait_for_condition('Active', 'True', admin_mc.client, kd, timeout=90)
try:
kd2 = admin_mc.client.create_kontainerDriver(
createDynamicSchema=True,
active=True,
url=URL
)
wait_remove_resource(kd2)
pytest.fail("Failed to catch duplicate driver URL on create")
except ApiError as e:
assert e.error.status == 409
assert "Driver URL already in use:" in e.error.message
@pytest.mark.skip
@pytest.mark.nonparallel
def test_update_duplicate_driver_conflict(admin_mc, wait_remove_resource):
""" Test if updating a driver's URL to a pre-existing driver's URL
returns a conflict error"""
URL = DRIVER_AMD64_URL
if platform.machine() == "aarch64":
URL = DRIVER_ARM64_URL
kd1 = admin_mc.client.create_kontainerDriver(
createDynamicSchema=True,
active=True,
url=URL
)
wait_remove_resource(kd1)
kd1 = wait_for_condition('Active', 'True', admin_mc.client, kd1,
timeout=90)
kd2 = admin_mc.client.create_kontainerDriver(
createDynamicSchema=True,
active=True,
url=URL + "2"
)
wait_remove_resource(kd2)
kd2.url = URL
try:
admin_mc.client.update_by_id_kontainerDriver(kd2.id, kd2)
pytest.fail("Failed to catch duplicate driver URL on update")
except ApiError as e:
assert e.error.status == 409
assert "Driver URL already in use:" in e.error.message
def test_kontainer_driver_links(admin_mc):
client = admin_mc.client
lister = client.list_kontainerDriver()
assert 'rancher-images' in lister.links
assert 'rancher-windows-images' in lister.links
token = 'Bearer '+client.token
url = BASE_URL + "/kontainerdrivers/rancher-images"
images = get_images(url, token)
assert "hyperkube" in images
assert "rke-tools" in images
assert "kubelet-pause" not in images
# test windows link
url = BASE_URL + "/kontainerdrivers/rancher-windows-images"
images = get_images(url, token)
assert "hyperkube" in images
assert "rke-tools" in images
assert "kubelet-pause" in images
def get_images(url, token):
data = requests.get(
url=url,
verify=False,
headers={'Accept': '*/*', 'Authorization': token})
assert data is not None
content = data.content.splitlines()
assert len(content) > 0
test = {}
for line in content:
if "rancher/hyperkube" in str(line):
test["hyperkube"] = True
elif "rancher/rke-tools" in str(line):
test["rke-tools"] = True
elif "rancher/kubelet-pause" in str(line):
test["kubelet-pause"] = True
return test
def verify_driver_in_types(client, kd):
def check():
client.reload_schema()
types = client.schema.types
return kd.name + 'EngineConfig' in types
wait_until(check)
client.reload_schema()
assert kd.name + 'EngineConfig' in client.schema.types
def verify_driver_not_in_types(client, kd):
def check():
client.reload_schema()
types = client.schema.types
return kd.name + 'EngineConfig' not in types
wait_until(check)
client.reload_schema()
assert kd.name + 'EngineConfig' not in client.schema.types
@pytest.mark.nonparallel
def test_user_update_settings(admin_mc):
client = admin_mc.client
k8s_version_setting = client.by_id_setting('k8s-version')
default_k8s_version = k8s_version_setting["default"]
k8s_versions_curr = client.by_id_setting(
'k8s-versions-current')["value"].split(",")
# user updates correct value
user_value = k8s_versions_curr[0]
updated_version = admin_mc.client.update_by_id_setting(
id='k8s-version', value=user_value)
assert updated_version["default"] == default_k8s_version
assert updated_version["value"] == user_value
assert updated_version["labels"]["io.cattle.user.updated"] == "true"
# assert refresh action doesn't override
lister = client.list_kontainerDriver()
try:
client.action(obj=lister, action_name="refresh")
except ApiError as e:
assert e.value.error.status == 422
new_k8s_version = client.by_id_setting('k8s-version')
assert new_k8s_version["default"] == default_k8s_version
assert new_k8s_version["value"] == user_value
# user updates invalid value
user_value = "v1.15.4-rancher13"
try:
updated_version = admin_mc.client.update_by_id_setting(
id='k8s-version', value=user_value)
except ApiError as e:
assert e.error.code == "MissingRequired"
assert e.error.status == 422
# bring back the default value, user updates with empty value
user_value = ""
updated_version = admin_mc.client.update_by_id_setting(
id='k8s-version', value=user_value)
assert updated_version["default"] == default_k8s_version
assert updated_version["value"] == default_k8s_version
assert updated_version["labels"]["io.cattle.user.updated"] == "false"
| 13,034 | 33.302632 | 79 |
py
|
rancher
|
rancher-master/tests/integration/suite/test_globaldns.py
|
from .common import random_str
from rancher import ApiError
from kubernetes.client import CustomObjectsApi
from kubernetes.client import CoreV1Api
import pytest
import time
import kubernetes
import base64
def test_dns_fqdn_unique(admin_mc):
client = admin_mc.client
provider_name = random_str()
access = random_str()
secret = random_str()
globaldns_provider = \
client.create_global_dns_provider(
name=provider_name,
rootDomain="example.com",
route53ProviderConfig={
'accessKey': access,
'secretKey': secret})
fqdn = random_str() + ".example.com"
globaldns_entry = \
client.create_global_dns(fqdn=fqdn, providerId=provider_name)
with pytest.raises(ApiError) as e:
client.create_global_dns(fqdn=fqdn, providerId=provider_name)
assert e.value.error.status == 422
client.delete(globaldns_entry)
client.delete(globaldns_provider)
def test_dns_provider_deletion(admin_mc):
client = admin_mc.client
provider_name = random_str()
access = random_str()
secret = random_str()
globaldns_provider = \
client.create_global_dns_provider(
name=provider_name,
rootDomain="example.com",
route53ProviderConfig={
'accessKey': access,
'secretKey': secret})
fqdn = random_str() + ".example.com"
provider_id = "cattle-global-data:"+provider_name
globaldns_entry = \
client.create_global_dns(fqdn=fqdn, providerId=provider_id)
with pytest.raises(ApiError) as e:
client.delete(globaldns_provider)
assert e.value.error.status == 403
client.delete(globaldns_entry)
client.delete(globaldns_provider)
def test_share_globaldns_provider_entry(admin_mc, user_factory,
remove_resource):
client = admin_mc.client
provider_name = random_str()
access = random_str()
secret = random_str()
# Add regular user as member to gdns provider
user_member = user_factory()
remove_resource(user_member)
user_client = user_member.client
members = [{"userPrincipalId": "local://" + user_member.user.id,
"accessType": "owner"}]
globaldns_provider = \
client.create_global_dns_provider(
name=provider_name,
rootDomain="example.com",
route53ProviderConfig={
'accessKey': access,
'secretKey': secret},
members=members)
remove_resource(globaldns_provider)
fqdn = random_str() + ".example.com"
globaldns_entry = \
client.create_global_dns(fqdn=fqdn, providerId=provider_name,
members=members)
remove_resource(globaldns_entry)
# Make sure creator can access both, provider and entry
gdns_provider_id = "cattle-global-data:" + provider_name
gdns_provider = client.by_id_global_dns_provider(gdns_provider_id)
assert gdns_provider is not None
gdns_entry_id = "cattle-global-data:" + globaldns_entry.name
gdns = client.by_id_global_dns(gdns_entry_id)
assert gdns is not None
# user should be able to list this gdns provider
api_instance = kubernetes.client.RbacAuthorizationV1Api(
admin_mc.k8s_client)
provider_rb_name = provider_name + "-gp-a"
wait_to_ensure_user_in_rb_subject(api_instance, provider_rb_name,
user_member.user.id)
gdns_provider = user_client.by_id_global_dns_provider(gdns_provider_id)
assert gdns_provider is not None
# user should be able to list this gdns entry
entry_rb_name = globaldns_entry.name + "-g-a"
wait_to_ensure_user_in_rb_subject(api_instance, entry_rb_name,
user_member.user.id)
gdns = user_client.by_id_global_dns(gdns_entry_id)
assert gdns is not None
def test_user_access_global_dns(admin_mc, user_factory, remove_resource):
user1 = user_factory()
remove_resource(user1)
user_client = user1.client
provider_name = random_str()
access = random_str()
secret = random_str()
globaldns_provider = \
user_client.create_global_dns_provider(
name=provider_name,
rootDomain="example.com",
route53ProviderConfig={
'accessKey': access,
'secretKey': secret})
remove_resource(globaldns_provider)
fqdn = random_str() + ".example.com"
globaldns_entry = \
user_client.create_global_dns(fqdn=fqdn, providerId=provider_name)
remove_resource(globaldns_entry)
# Make sure creator can access both, provider and entry
api_instance = kubernetes.client.RbacAuthorizationV1Api(
admin_mc.k8s_client)
provider_rb_name = provider_name + "-gp-a"
wait_to_ensure_user_in_rb_subject(api_instance, provider_rb_name,
user1.user.id)
gdns_provider_id = "cattle-global-data:" + provider_name
gdns_provider = user_client.by_id_global_dns_provider(gdns_provider_id)
assert gdns_provider is not None
entry_rb_name = globaldns_entry.name + "-g-a"
wait_to_ensure_user_in_rb_subject(api_instance, entry_rb_name,
user1.user.id)
gdns_entry_id = "cattle-global-data:" + globaldns_entry.name
gdns = user_client.by_id_global_dns(gdns_entry_id)
assert gdns is not None
def test_update_gdns_entry(admin_mc, remove_resource):
client = admin_mc.client
provider_name = random_str()
access = random_str()
secret = random_str()
globaldns_provider = \
client.create_global_dns_provider(
name=provider_name,
rootDomain="example.com",
route53ProviderConfig={
'accessKey': access,
'secretKey': secret})
remove_resource(globaldns_provider)
fqdn = random_str() + ".example.com"
gdns_entry_name = random_str()
globaldns_entry = \
client.create_global_dns(name=gdns_entry_name,
fqdn=fqdn, providerId=provider_name)
remove_resource(globaldns_entry)
new_fqdn = random_str()
wait_for_gdns_entry_creation(admin_mc, gdns_entry_name)
client.update(globaldns_entry, fqdn=new_fqdn)
wait_for_gdns_update(admin_mc, gdns_entry_name, new_fqdn)
def test_create_globaldns_provider_regular_user(remove_resource,
user_factory):
provider_name = random_str()
access = random_str()
secret = random_str()
user = user_factory()
globaldns_provider = \
user.client.create_global_dns_provider(
name=provider_name,
rootDomain="example.com",
route53ProviderConfig={
'accessKey': access,
'secretKey': secret})
remove_resource(globaldns_provider)
def wait_to_ensure_user_in_rb_subject(api, name,
userId, timeout=60):
found = False
interval = 0.5
start = time.time()
while not found:
time.sleep(interval)
interval *= 2
try:
rb = api.read_namespaced_role_binding(name, "cattle-global-data")
for i in range(0, len(rb.subjects)):
if rb.subjects[i].name == userId:
found = True
except kubernetes.client.rest.ApiException:
found = False
if time.time() - start > timeout:
raise AssertionError(
"Timed out waiting for user to get added to rb")
def wait_for_gdns_update(admin_mc, gdns_entry_name, new_fqdn, timeout=60):
client = admin_mc.client
updated = False
interval = 0.5
start = time.time()
id = "cattle-global-data:" + gdns_entry_name
while not updated:
if time.time() - start > timeout:
raise Exception('Timeout waiting for gdns entry to update')
gdns = client.by_id_global_dns(id)
if gdns is not None and gdns.fqdn == new_fqdn:
updated = True
time.sleep(interval)
interval *= 2
def wait_for_gdns_entry_creation(admin_mc, gdns_name, timeout=60):
start = time.time()
interval = 0.5
client = admin_mc.client
found = False
while not found:
if time.time() - start > timeout:
raise Exception('Timeout waiting for globalDNS entry creation')
gdns = client.list_global_dns(name=gdns_name)
if len(gdns) > 0:
found = True
time.sleep(interval)
interval *= 2
def test_cloudflare_provider_proxy_setting(admin_mc, remove_resource):
client = admin_mc.client
provider_name = random_str()
apiEmail = random_str()
apiKey = random_str()
globaldns_provider = \
client.create_global_dns_provider(
name=provider_name,
rootDomain="example.com",
cloudflareProviderConfig={
'proxySetting': True,
'apiEmail': apiEmail,
'apiKey': apiKey})
gdns_provider_id = "cattle-global-data:" + provider_name
gdns_provider = client.by_id_global_dns_provider(gdns_provider_id)
assert gdns_provider is not None
assert gdns_provider.cloudflareProviderConfig.proxySetting is True
remove_resource(globaldns_provider)
def test_dns_fqdn_hostname(admin_mc, remove_resource):
client = admin_mc.client
provider_name = random_str()
access = random_str()
secret = random_str()
globaldns_provider = \
client.create_global_dns_provider(
name=provider_name,
rootDomain="example.com",
route53ProviderConfig={
'accessKey': access,
'secretKey': secret})
remove_resource(globaldns_provider)
fqdn = random_str() + ".example!!!*.com"
with pytest.raises(ApiError) as e:
client.create_global_dns(fqdn=fqdn, providerId=provider_name)
assert e.value.error.status == 422
def test_globaldnsprovider_secret(admin_mc, remove_resource):
client = admin_mc.client
provider_name = random_str()
access_key = random_str()
secret_key = random_str()
globaldns_provider = \
client.create_global_dns_provider(
name=provider_name,
rootDomain="example.com",
route53ProviderConfig={
'accessKey': access_key,
'secretKey': secret_key})
# Test password not present in api
assert globaldns_provider is not None
assert globaldns_provider.route53ProviderConfig.get('secretKey') is None
crdClient, k8sclient = getClients(admin_mc)
ns, name = globaldns_provider["id"].split(":")
# Test password is in k8s secret after creation
verifyGDNSPassword(crdClient, k8sclient, ns, name, secret_key)
# Test updating password
newSecretPassword = random_str()
_ = client.update(globaldns_provider, route53ProviderConfig={
'accessKey': access_key,
'secretKey': newSecretPassword})
verifyGDNSPassword(crdClient, k8sclient, ns, name, newSecretPassword)
def getClients(admin_mc):
return CustomObjectsApi(admin_mc.k8s_client), \
CoreV1Api(admin_mc.k8s_client)
def verifyGDNSPassword(crdClient, k8sclient, ns, name, secretPassword):
k8es = crdClient.get_namespaced_custom_object(
"management.cattle.io", "v3", ns, 'globaldnsproviders', name)
secretName = k8es['spec']['route53ProviderConfig']['secretKey']
ns, name = secretName.split(":")
assert ns is not None
assert name is not None
secret = k8sclient.read_namespaced_secret(name, ns)
assert base64.b64decode(secret.data[name]).\
decode("utf-8") == secretPassword
| 12,409 | 35.824926 | 77 |
py
|
rancher
|
rancher-master/tests/integration/suite/test_pod_security_policies.py
|
import kubernetes
from .conftest import kubernetes_api_client, wait_for, set_cluster_psp
from .common import random_str
from rancher import ApiError
import pytest
from kubernetes.client.rest import ApiException
def cleanup_pspt(client, request, cluster):
def remove_pspt_from_cluster_and_delete(cluster):
pspt_id = cluster.defaultPodSecurityPolicyTemplateId
pspt = client.by_id_pod_security_policy_template(pspt_id)
cluster.defaultPodSecurityPolicyTemplateId = ""
client.update_by_id_cluster(cluster.id, cluster)
client.delete(pspt)
request.addfinalizer(
lambda: remove_pspt_from_cluster_and_delete(cluster)
)
def create_pspt(client):
""" Creates a minimally valid pspt with cleanup left to caller"""
runas = {"rule": "RunAsAny"}
selinx = {"rule": "RunAsAny"}
supgrp = {"ranges": [{"max": 65535, "min": 1}],
"rule": "MustRunAs"
}
fsgrp = {"ranges": [{"max": 65535, "min": 1, }],
"rule": "MustRunAs",
}
pspt = \
client.create_pod_security_policy_template(name="test" + random_str(),
description="Test PSPT",
privileged=False,
seLinux=selinx,
supplementalGroups=supgrp,
runAsUser=runas,
fsGroup=fsgrp,
volumes='*'
)
return pspt
def setup_cluster_with_pspt(client, request):
"""
Sets the 'local' cluster to mock a PSP by applying a minimally valid
restricted type PSPT
"""
pspt = create_pspt(client)
pspt_id = pspt.id
# this won't enforce pod security policies on the local cluster but it
# will let us test that the role bindings are being created correctly
cluster = client.by_id_cluster("local")
setattr(cluster, "defaultPodSecurityPolicyTemplateId", pspt_id)
client.update_by_id_cluster("local", cluster)
cleanup_pspt(client, request, cluster)
return pspt
def service_account_has_role_binding(rbac, pspt):
try:
rbac.read_namespaced_role_binding("default-asdf-default-" + pspt.id +
"-clusterrole-binding", "default")
return True
except ApiException:
return False
def test_service_accounts_have_role_binding(admin_mc, request):
api_client = admin_mc.client
pspt = setup_cluster_with_pspt(api_client, request)
k8s_client = kubernetes_api_client(admin_mc.client, 'local')
core = kubernetes.client.CoreV1Api(api_client=k8s_client)
rbac = kubernetes.client.RbacAuthorizationV1Api(api_client=k8s_client)
service_account = kubernetes.client.V1ServiceAccount()
service_account.metadata = kubernetes.client.V1ObjectMeta()
service_account.metadata.name = "asdf"
core.create_namespaced_service_account("default", service_account)
request.addfinalizer(lambda: core.delete_namespaced_service_account(
"asdf", "default"))
request.addfinalizer(
lambda: rbac.delete_namespaced_role_binding(
"default-asdf-default-" + pspt.id + "-clusterrole-binding",
"default"))
wait_for(lambda: service_account_has_role_binding(rbac, pspt), timeout=30)
@pytest.mark.nonparallel
def test_pod_security_policy_template_del(admin_mc, admin_pc, remove_resource,
restore_cluster_psp):
""" Test for pod security policy template binding correctly.
May have to mark this test as nonparallel if new test are introduced
that toggle pspEnabled.
ref https://github.com/rancher/rancher/issues/15728
ref https://localhost:8443/v3/podsecuritypolicytemplates
"""
api_client = admin_mc.client
pspt_proj = create_pspt(api_client)
# add a finalizer to delete the pspt
remove_resource(pspt_proj)
# creates a project and handles cleanup
proj = admin_pc.project
# this will retry 3 times if there is an ApiError
set_cluster_psp(admin_mc, "false")
with pytest.raises(ApiError) as e:
api_client.action(obj=proj,
action_name="setpodsecuritypolicytemplate",
podSecurityPolicyTemplateId=pspt_proj.id)
assert e.value.error.status == 422
assert "cluster [local] does not have Pod Security Policies enabled" in \
e.value.error.message
set_cluster_psp(admin_mc, "true")
api_client.action(obj=proj, action_name="setpodsecuritypolicytemplate",
podSecurityPolicyTemplateId=pspt_proj.id)
proj = api_client.wait_success(proj)
# Check that project was created successfully with pspt
assert proj.state == 'active'
assert proj.podSecurityPolicyTemplateId == pspt_proj.id
def check_psptpb():
proj_obj = proj.podSecurityPolicyTemplateProjectBindings()
for data in proj_obj.data:
if (data.targetProjectId == proj.id and
data.podSecurityPolicyTemplateId == pspt_proj.id):
return True
return False
wait_for(check_psptpb, lambda: "PSPTB project binding not found")
# allow for binding deletion
api_client.delete(proj)
def check_project():
return api_client.by_id_project(proj.id) is None
wait_for(check_project)
# delete the PSPT that was associated with the deleted project
api_client.delete(pspt_proj)
def pspt_del_check():
if api_client.by_id_pod_security_policy_template(pspt_proj.id) is None:
return True
else: # keep checking to see delete occurred
return False
# will timeout if pspt is not deleted
wait_for(pspt_del_check)
assert api_client.by_id_pod_security_policy_template(pspt_proj.id) is None
set_cluster_psp(admin_mc, "false")
def test_incorrect_pspt(admin_mc, remove_resource):
""" Test that incorrect pod security policy templates cannot be created"""
api_client = admin_mc.client
name = "pspt" + random_str()
with pytest.raises(ApiError) as e:
api_client.create_podSecurityPolicyTemplate(name=name)
assert e.value.error.status == 422
name = "pspt" + random_str()
with pytest.raises(ApiError) as e:
args = {'name': name,
'description': 'Test PSPT',
'fsGroup': {"rule": "RunAsAny"},
'runAsUser': {"rule": "RunAsAny"},
'seLinux': {"rule": "RunAsAny"},
'supplementalGroups': {"rule": "RunAsAny"},
'allowPrivilegeEscalation': False,
'defaultAllowPrivilegeEscalation': True}
# Should not set the default True if allowPrivilegedEscalation is false
api_client.create_podSecurityPolicyTemplate(**args)
assert e.value.error.status == 422
assert e.value.error.code == 'InvalidBodyContent'
def test_pspt_binding(admin_mc, admin_pc, remove_resource):
"""Test that a PSPT binding is validated before creating it"""
api_client = admin_mc.client
# No podSecurityPolicyTemplateId causes a 422
name = random_str()
with pytest.raises(ApiError) as e:
b = api_client.create_podSecurityPolicyTemplateProjectBinding(
name=name,
namespaceId='default',
podSecurityPolicyTemplateId=None,
targetProjectId=admin_pc.project.id,
)
remove_resource(b)
assert e.value.error.status == 422
assert e.value.error.message == \
'missing required podSecurityPolicyTemplateId'
# An invalid podSecurityPolicyTemplateId causes a 422
name = random_str()
with pytest.raises(ApiError) as e:
b = api_client.create_podSecurityPolicyTemplateProjectBinding(
name=name,
namespaceId='default',
podSecurityPolicyTemplateId='thisdoesntexist',
targetProjectId=admin_pc.project.id,
)
remove_resource(b)
assert e.value.error.status == 422
assert e.value.error.message == 'podSecurityPolicyTemplate not found'
@pytest.mark.nonparallel
def test_project_action_set_pspt(admin_mc, admin_pc,
remove_resource, restore_cluster_psp):
"""Test project's action: setpodsecuritypolicytemplate"""
api_client = admin_mc.client
# these create a mock pspt
pspt_proj = create_pspt(api_client)
# add a finalizer to delete the pspt
remove_resource(pspt_proj)
# creates a project
proj = admin_pc.project
set_cluster_psp(admin_mc, "false")
# Check 1: the action should error out if psp is disabled at cluster level
with pytest.raises(ApiError) as e:
api_client.action(obj=proj,
action_name="setpodsecuritypolicytemplate",
podSecurityPolicyTemplateId=pspt_proj.id)
assert e.value.error.status == 422
assert "cluster [local] does not have Pod Security Policies enabled" in \
e.value.error.message
set_cluster_psp(admin_mc, "true")
# Check 2: the action should succeed if psp is enabled at cluster level
# and podSecurityPolicyTemplateId is valid
api_client.action(obj=proj,
action_name="setpodsecuritypolicytemplate",
podSecurityPolicyTemplateId=pspt_proj.id)
proj = api_client.wait_success(proj)
assert proj.state == 'active'
assert proj.podSecurityPolicyTemplateId == pspt_proj.id
def check_psptpb():
proj_obj = proj.podSecurityPolicyTemplateProjectBindings()
for data in proj_obj.data:
if (data.targetProjectId == proj.id and
data.podSecurityPolicyTemplateId == pspt_proj.id):
return True
return False
wait_for(check_psptpb, lambda: "PSPTB project binding not found")
# Check 3: an invalid podSecurityPolicyTemplateId causes 422
with pytest.raises(ApiError) as e:
api_client.action(obj=proj,
action_name="setpodsecuritypolicytemplate",
podSecurityPolicyTemplateId="doNotExist")
assert e.value.error.status == 422
assert "podSecurityPolicyTemplate [doNotExist] not found" in \
e.value.error.message
api_client.delete(proj)
def check_project():
return api_client.by_id_project(proj.id) is None
wait_for(check_project)
set_cluster_psp(admin_mc, "false")
def test_psp_annotations(admin_mc, remove_resouce_func):
"""Test that a psp with a pspt owner annotation will get cleaned up if the
parent pspt does not exist"""
k8s_client = kubernetes_api_client(admin_mc.client, 'local')
policy = kubernetes.client.PolicyV1beta1Api(api_client=k8s_client)
kubernetes.client.PolicyV1beta1PodSecurityPolicy
psp_name = random_str()
args = {
'metadata': {
'name': psp_name
},
'spec': {
"allowPrivilegeEscalation": True,
"fsGroup": {
"rule": "RunAsAny"
},
"runAsUser": {
"rule": "RunAsAny"
},
"seLinux": {
"rule": "RunAsAny"
},
"supplementalGroups": {
"rule": "RunAsAny"
},
"volumes": [
"*"
]
}
}
psp = policy.create_pod_security_policy(args)
remove_resouce_func(policy.delete_pod_security_policy, psp_name)
psp = policy.read_pod_security_policy(psp_name)
assert psp is not None
anno = {
'metadata': {
'annotations': {
'serviceaccount.cluster.cattle.io/pod-security': 'doesntexist'
}
}
}
# Add the annotation the controller is looking for
psp = policy.patch_pod_security_policy(psp_name, anno)
# Controller will delete the PSP as the parent PSPT doesn't exist
def _get_psp():
try:
policy.read_pod_security_policy(psp_name)
return False
except ApiException as e:
if e.status != 404:
raise e
return True
wait_for(_get_psp, fail_handler=lambda: "psp was not cleaned up")
with pytest.raises(ApiException) as e:
policy.read_pod_security_policy(psp_name)
assert e.value.status == 404
| 12,543 | 35.254335 | 79 |
py
|
rancher
|
rancher-master/tests/integration/suite/test_system_app_creator.py
|
from .common import random_str
import time
def test_system_app_creator(admin_mc, admin_system_pc, remove_resource):
client = admin_mc.client
provider_name = random_str()
access = random_str()
secret = random_str()
globaldns_provider = \
client.create_global_dns_provider(
name=provider_name,
rootDomain="example.com",
route53ProviderConfig={
'accessKey': access,
'secretKey': secret})
remove_resource(globaldns_provider)
app = wait_for_system_app(
admin_system_pc.client,
"systemapp-"+globaldns_provider.name)
# the creator id of system app won't be listed in api
assert app.creatorId != globaldns_provider.creatorId
def wait_for_system_app(client, name, timeout=60):
start = time.time()
interval = 0.5
apps = client.list_app(name=name)
while len(apps.data) != 1:
if time.time() - start > timeout:
print(apps)
raise Exception('Timeout waiting for workload service')
time.sleep(interval)
interval *= 2
apps = client.list_app(name=name)
return apps.data[0]
| 1,308 | 34.378378 | 72 |
py
|
rancher
|
rancher-master/tests/integration/suite/test_deployment.py
|
from .common import random_str
import kubernetes
from .conftest import kubernetes_api_client, user_project_client
def test_dep_creation_kubectl(admin_mc, admin_cc, remove_resource):
name = random_str()
project = admin_mc.client.create_project(name=random_str(),
clusterId='local')
remove_resource(project)
namespace_name = random_str()
ns = admin_cc.client.create_namespace(name=namespace_name,
projectId=project.id)
remove_resource(ns)
k8s_client = kubernetes_api_client(admin_mc.client, 'local')
d_api = kubernetes.client.AppsV1Api(k8s_client)
d = kubernetes.client.V1beta2Deployment()
# Metadata
d.metadata = kubernetes.client.V1ObjectMeta(
name=name,
namespace=namespace_name)
pod_meta = kubernetes.client.V1ObjectMeta(
labels={"foo": "bar"})
port = kubernetes.client.V1ContainerPort(
container_port=80,
host_port=8099,
)
container = {"name": "nginx", "image": "nginx:1.7.9", "ports": [port]}
spec = kubernetes.client.V1PodSpec(
containers=[container])
template = kubernetes.client.V1PodTemplateSpec(
metadata=pod_meta,
spec=spec
)
selector = kubernetes.client.V1LabelSelector(
match_labels={"foo": "bar"}
)
d.spec = kubernetes.client.V1beta2DeploymentSpec(
selector=selector,
template=template
)
dep = d_api.create_namespaced_deployment(namespace=namespace_name,
body=d)
remove_resource(dep)
assert dep is not None
# now get this through rancher api as namespacedCertificate
p_client = user_project_client(admin_mc, project)
d = p_client.list_workload(name=name, namespace=namespace_name).data[0]
assert d is not None
port = d['containers'][0]['ports'][0]
assert port['sourcePort'] == 8099
assert port['kind'] == 'HostPort'
def test_port(admin_pc):
client = admin_pc.client
ports = [{
'sourcePort': 776,
'containerPort': 80,
'kind': 'HostPort',
'protocol': 'TCP', },
{
'sourcePort': 777,
'containerPort': 80,
'kind': 'NodePort',
'protocol': 'TCP', },
{
'sourcePort': 778,
'containerPort': 80,
'kind': 'LoadBalancer',
'protocol': 'TCP', },
{
'sourcePort': 779,
'containerPort': 80,
'kind': 'ClusterIP',
'protocol': 'TCP', },
]
for port in ports:
ns = admin_pc.cluster.client.create_namespace(name=random_str(),
projectId=admin_pc.
project.id)
name = random_str()
workload = client.create_workload(
name=name,
namespaceId=ns.id,
scale=1,
containers=[{
'name': 'one',
'image': 'nginx',
'ports': [port],
}])
workload_ports = workload['containers'][0]['ports']
assert workload_ports is not None
assert workload_ports[0]['kind'] == port['kind']
assert workload_ports[0]['containerPort'] == port['containerPort']
assert workload_ports[0]['sourcePort'] == port['sourcePort']
| 3,384 | 31.238095 | 75 |
py
|
rancher
|
rancher-master/tests/integration/suite/test_secrets.py
|
from .common import random_str
import kubernetes
from .conftest import kubernetes_api_client, user_project_client
CERT = """-----BEGIN CERTIFICATE-----
MIIDEDCCAfgCCQC+HwE8rpMN7jANBgkqhkiG9w0BAQUFADBKMQswCQYDVQQGEwJV
UzEQMA4GA1UECBMHQXJpem9uYTEVMBMGA1UEChMMUmFuY2hlciBMYWJzMRIwEAYD
VQQDEwlsb2NhbGhvc3QwHhcNMTYwNjMwMDExMzMyWhcNMjYwNjI4MDExMzMyWjBK
MQswCQYDVQQGEwJVUzEQMA4GA1UECBMHQXJpem9uYTEVMBMGA1UEChMMUmFuY2hl
ciBMYWJzMRIwEAYDVQQDEwlsb2NhbGhvc3QwggEiMA0GCSqGSIb3DQEBAQUAA4IB
DwAwggEKAoIBAQC1PR0EiJjM0wbFQmU/yKSb7AuQdzhdW02ya+RQe+31/B+sOTMr
z9b473KCKf8LiFKFOIQUhR5fPvwyrrIWKCEV9pCp/wM474fX32j0zYaH6ezZjL0r
L6hTeGFScGse3dk7ej2+6nNWexpujos0djFi9Gu11iVHIJyT2Sx66kPPPZVRkJO9
5Pfetm5SLIQtJHUwy5iWv5Br+AbdXlUAjTYUqS4mhKIIbblAPbOKrYRxGXX/6oDV
J5OGLle8Uvlb8poxqmy67FPyMObNHhjggKwboXhmNuuT2OGf/VeZANMYubs4JP2V
ZLs3U/1tFMAOaQM+PbT9JuwMSmGYFX0Qiuh/AgMBAAEwDQYJKoZIhvcNAQEFBQAD
ggEBACpkRCQpCn/zmTOwboBckkOFeqMVo9cvSu0Sez6EPED4WUv/6q5tlJeHekQm
6YVcsXeOMkpfZ7qtGmBDwR+ly7D43dCiPKplm0uApO1CkogG5ePv0agvKHEybd36
xu9pt0fnxDdrP2NrP6trHq1D+CzPZooLRfmYqbt1xmIb00GpnyiJIUNuMu7GUM3q
NxWGK3eq+1cyt6xr8nLOC5zaGeSyZikw4+9vqLudNSyYdnw9mdHtrYT0GlcEP1Vc
NK+yrhDCvEWH6+4+pp8Ve2P2Le5tvbA1m24AxyuC9wHS5bUmiNHweLXNpxLFTjK8
BBUi6y1Vm9jrDi/LiiHcN4sJEoU=
-----END CERTIFICATE-----"""
KEY = """-----BEGIN RSA PRIVATE KEY-----
MIIEowIBAAKCAQEAtT0dBIiYzNMGxUJlP8ikm+wLkHc4XVtNsmvkUHvt9fwfrDkz
K8/W+O9ygin/C4hShTiEFIUeXz78Mq6yFighFfaQqf8DOO+H199o9M2Gh+ns2Yy9
Ky+oU3hhUnBrHt3ZO3o9vupzVnsabo6LNHYxYvRrtdYlRyCck9kseupDzz2VUZCT
veT33rZuUiyELSR1MMuYlr+Qa/gG3V5VAI02FKkuJoSiCG25QD2ziq2EcRl1/+qA
1SeThi5XvFL5W/KaMapsuuxT8jDmzR4Y4ICsG6F4Zjbrk9jhn/1XmQDTGLm7OCT9
lWS7N1P9bRTADmkDPj20/SbsDEphmBV9EIrofwIDAQABAoIBAGehHxN1i3EqhKeL
9FrJPh4NlPswwCDZUQ7hFDZU9lZ9qBqQxkqZ18CVIXN90eBlPVIBY7xb9Wbem9Pb
AecbYPeu+T7KmqwWgiUUEG5RikfyoMQv7gZghK3dmkBKGWYX0dtpZR7h7bsYPp/S
j5QatNhxC5l4be5CnmUHe6B4jPdUt8kRfTj0ukYGm/h3cOm/tEQeRYIIN/N6JN2Z
JWYzsyqGmlOTp7suczkRIUS0AjiljT1186bQSou62iMtMqEgArusFFb9m/dXCCYo
t/Q1SR4lRodDfzcF/CRbdR/ZC8gZlyCdbI4WHOw9IwwHnmrllx4MXFP/p6p+gEtl
cKMzHXECgYEA27KnkDnz338qKC2cCGkMf3ARfTX6gSlqmvgM9zOa8FLWp6GR6Rvo
NgVLUi63bQqv9D5qYSsweAp1QTvIxJffWMJDTWtxowOXVW5P8WJ8jp/pAXoWGRbd
pnavy6Ih0XT57huwT7fGGIikXYfw/kB85PPJL3FsT/b6G4ay2+Z7OGkCgYEA0y+d
bxUewYZkpNy7+kIh0x4vrJvNqSL9ZwiP2R159zu7zDwDph/fkhXej0FEtbXybt+O
4s9M3l4nNsY6AS9sIPCB5SxWguhx0z76U5cz1qFFZwIHtL8r1jHrl5iwkVyOAtVV
0BokmJG4Pn07yZo/iCmSTEfwcePvCMvOsPtcvKcCgYEAu5+SbKChfhBaz19MLv6P
ttHdjcIogl/9dAU9BWxj+LO2MAjS1HKJ2ICi97d/3LbQ19TqArvgs9OymZhV+Fb/
Xgzhb1+/94icmFASI8KJP0CfvCwobRrTBlO8BDsdiITO4SNyalI28kLXpCzxiiFG
yDzOZx8FcjEpHZLmctgeCWkCgYAO0rDCM0FNZBl8WOH41tt47g16mBT/Yi1XJgqy
upbs+4xa8XtwFZyjrFVKyNIBzxuNHLPyx4olsYYfGhrIKoP0a+0yIMKRva7/nNQF
Of+xePBeIo5X6XMyPZ7DrTv3d/+fw0maqbsX2mKMQE4KAIGlFQXnxMTjuZP1khiX
44zG0QKBgGwQ8T4DGZK5ukLQmhLi9npCaAW99s/uuKArMzAG9xd/I8YntM/kVY0V
VUi3lKqwXhtReYdrqVTPdjnyGIYIGGNRD7EKqQe15IRfbpy536DSN+LvL65Fdyis
iNITDKNP1H3hedFNFfbTGpueYdRX6QaptK4+NB4+dOm7hn8iqq7U
-----END RSA PRIVATE KEY-----"""
MALFORMED_CERT = """-----BEGIN CERTIFICATE-----
MIIDEDCCAfgCCQC+HwE8rpMN7jANBgkqhkiG9w0BAQUFADBKMQswCQYDVQQGEwJV
UzEQMA4GA1UECBMHQXJpem9uYTEVMBMGA1UEChMMUmFuY2hlciBMYWJzMRIwEAYD
VQQDEwlsb2NhbGhvc3QwHhcNMTYwNjMwMDExMzMyWhcNMjYwNjI4MDExMzMyWjBK
MQswCQYDVQQGEwJVUzEQMA4GA1UECBMHQXJpem9uYTEVMBMGA1UEChMMUmFuY2hl
ciBMYWJzMRIwEAYDVQQDEwlsb2NhbGhvc3QwggEiMA0GCSqGSIb3DQEBAQUAA4IB
DwAwggEKAoIBAQC1PR0EiJjM0wbFQmU/yKSb7AuQdzhdW02ya+RQe+31/B+sOTMr
z9b473KCKf8LiFKFOxyuC9wHS5bUmiNHweLXNpxLFTjK8
BBUi6y1Vm9jrDi/LiiHcN4sJEoU=
-----END CERTIFICATE-----"""
def test_secrets(admin_pc):
client = admin_pc.client
name = random_str()
secret = client.create_secret(name=name, stringData={
'foo': 'bar'
})
assert secret.type == 'secret'
assert secret.kind == 'Opaque'
assert secret.name == name
assert secret.data.foo == 'YmFy'
secret.data.baz = 'YmFy'
secret = client.update(secret, data=secret.data)
secret = client.reload(secret)
assert secret.baseType == 'secret'
assert secret.type == 'secret'
assert secret.kind == 'Opaque'
assert secret.name == name
assert secret.data.foo == 'YmFy'
assert secret.data.baz == 'YmFy'
assert secret.namespaceId is None
assert 'namespace' not in secret.data
assert secret.projectId == admin_pc.project.id
found = False
for i in client.list_secret():
if i.id == secret.id:
found = True
break
assert found
client.delete(secret)
def test_certificates(admin_pc):
client = admin_pc.client
name = random_str()
cert = client.create_certificate(name=name, key=KEY, certs=CERT)
assert cert.baseType == 'secret'
assert cert.expiresAt == '2026-06-28T01:13:32Z'
assert cert.type == 'certificate'
assert cert.name == name
assert cert.certs == CERT
assert cert.namespaceId is None
assert 'namespace' not in cert
# cert = client.update(cert, certs='certdata2')
# cert = client.reload(cert)
#
# assert cert.baseType == 'secret'
# assert cert.type == 'certificate'
# assert cert.name == name
# assert cert.certs == 'certdata2'
# assert cert.namespaceId is None
# assert 'namespace' not in cert
# assert cert.projectId == pc.project.id
found = False
for i in client.list_certificate():
if i.id == cert.id:
found = True
break
assert found
cert = client.by_id_certificate(cert.id)
assert cert is not None
client.delete(cert)
def test_docker_credential(admin_pc):
client = admin_pc.client
name = random_str()
registries = {'index.docker.io': {
'username': 'foo',
'password': 'bar',
}}
cert = client.create_docker_credential(name=name,
registries=registries)
assert cert.baseType == 'secret'
assert cert.type == 'dockerCredential'
assert cert.name == name
assert cert.registries['index.docker.io'].username == 'foo'
assert 'password' in cert.registries['index.docker.io']
assert 'auth' in cert.registries['index.docker.io']
assert cert.namespaceId is None
assert 'namespace' not in cert
assert cert.projectId == admin_pc.project.id
registries['two'] = {
'username': 'blah'
}
cert = client.update(cert, registries=registries)
cert = client.reload(cert)
assert cert.baseType == 'secret'
assert cert.type == 'dockerCredential'
assert cert.name == name
assert cert.registries['index.docker.io'].username == 'foo'
assert cert.registries.two.username == 'blah'
assert 'password' not in cert.registries['index.docker.io']
assert cert.namespaceId is None
assert 'namespace' not in cert
assert cert.projectId == admin_pc.project.id
found = False
for i in client.list_docker_credential():
if i.id == cert.id:
found = True
break
assert found
cert = client.by_id_docker_credential(cert.id)
assert cert is not None
client.delete(cert)
def test_basic_auth(admin_pc):
client = admin_pc.client
name = random_str()
cert = client.create_basic_auth(name=name,
username='foo',
password='bar')
assert cert.baseType == 'secret'
assert cert.type == 'basicAuth'
assert cert.name == name
assert cert.username == 'foo'
assert 'password' in cert
assert cert.namespaceId is None
assert 'namespace' not in cert
assert cert.projectId == admin_pc.project.id
cert = client.update(cert, username='foo2')
cert = client.reload(cert)
assert cert.baseType == 'secret'
assert cert.type == 'basicAuth'
assert cert.name == name
assert cert.username == 'foo2'
assert 'password' not in cert
assert cert.namespaceId is None
assert 'namespace' not in cert
assert cert.projectId == admin_pc.project.id
found = False
for i in client.list_basic_auth():
if i.id == cert.id:
found = True
break
assert found
cert = client.by_id_basic_auth(cert.id)
assert cert is not None
client.delete(cert)
def test_ssh_auth(admin_pc):
client = admin_pc.client
name = random_str()
cert = client.create_ssh_auth(name=name,
privateKey='foo')
assert cert.baseType == 'secret'
assert cert.type == 'sshAuth'
assert cert.name == name
assert 'privateKey' in cert
assert cert.namespaceId is None
assert 'namespace' not in cert
assert cert.projectId == admin_pc.project.id
cert = client.update(cert, privateKey='foo2')
cert = client.reload(cert)
assert cert.baseType == 'secret'
assert cert.type == 'sshAuth'
assert cert.name == name
assert 'privateKey' not in cert
assert cert.namespaceId is None
assert 'namespace' not in cert
assert cert.projectId == admin_pc.project.id
found = False
for i in client.list_ssh_auth():
if i.id == cert.id:
found = True
break
assert found
cert = client.by_id_ssh_auth(cert.id)
assert cert is not None
client.delete(cert)
def test_secret_creation_kubectl(admin_mc, admin_cc, remove_resource):
name = random_str()
project = admin_mc.client.create_project(name=random_str(),
clusterId='local')
remove_resource(project)
namespace_name = random_str()
ns = admin_cc.client.create_namespace(name=namespace_name,
projectId=project.id)
remove_resource(ns)
k8s_client = kubernetes_api_client(admin_mc.client, 'local')
secrets_api = kubernetes.client.CoreV1Api(k8s_client)
secret = kubernetes.client.V1Secret()
# Metadata
secret.metadata = kubernetes.client.V1ObjectMeta(
name=name,
namespace=namespace_name)
secret.string_data = {'tls.key': KEY, 'tls.crt': CERT}
secret.type = "kubernetes.io/tls"
sec = secrets_api.create_namespaced_secret(namespace=namespace_name,
body=secret)
remove_resource(sec)
assert sec is not None
# now get this through rancher api as namespacedCertificate
cert_id = namespace_name+':'+name
proj_client = user_project_client(admin_mc, project)
cert = proj_client.by_id_namespaced_certificate(cert_id)
assert cert is not None
assert "RSA" in cert['algorithm']
assert cert['expiresAt'] is not None
assert cert['issuedAt'] is not None
def test_malformed_secret_parse(admin_mc, admin_cc, remove_resource):
name = random_str()
project = admin_mc.client.create_project(name=random_str(),
clusterId='local')
remove_resource(project)
namespace_name = random_str()
ns = admin_cc.client.create_namespace(name=namespace_name,
projectId=project.id)
remove_resource(ns)
k8s_client = kubernetes_api_client(admin_mc.client, 'local')
secrets_api = kubernetes.client.CoreV1Api(k8s_client)
secret = kubernetes.client.V1Secret()
# Metadata
secret.metadata = kubernetes.client.V1ObjectMeta(
name=name,
namespace=namespace_name)
secret.string_data = {'tls.key': KEY, 'tls.crt': MALFORMED_CERT}
secret.type = "kubernetes.io/tls"
sec = secrets_api.create_namespaced_secret(namespace=namespace_name,
body=secret)
remove_resource(sec)
assert sec is not None
# now get this through rancher api as namespacedCertificate
cert_id = namespace_name+':'+name
proj_client = user_project_client(admin_mc, project)
cert = proj_client.by_id_namespaced_certificate(cert_id)
assert cert is not None
| 11,788 | 33.775811 | 72 |
py
|
rancher
|
rancher-master/tests/integration/suite/test_cluster_catalog.py
|
from .conftest import wait_until, wait_until_available
from rancher import ApiError
from .common import random_str
import time
def test_cluster_catalog_creation(admin_mc, remove_resource,
user_factory):
client = admin_mc.client
# When cluster-owner tries to create cluster catalog, it should succeed
crtb_owner = client.create_cluster_role_template_binding(
clusterId="local",
roleTemplateId="cluster-owner",
userId=admin_mc.user.id,)
remove_resource(crtb_owner)
wait_until(crtb_cb(client, crtb_owner))
cluster_owner_client = admin_mc.client
name = random_str()
template_name = "local:"+name
url = "https://github.com/mrajashree/charts.git"
cluster_catalog = \
cluster_owner_client.create_cluster_catalog(name=name,
branch="onlyOne",
url=url,
clusterId="local",
)
wait_for_clustercatalog_template_to_be_created(cluster_owner_client,
template_name)
cc = cluster_owner_client.list_cluster_catalog(name=name)
assert len(cc) == 1
templates = \
cluster_owner_client.list_template(clusterCatalogId=template_name)
assert len(templates) == 1
# Create a user and add to the "local" cluster as "cluster-member"
# cluster-member should be able to list cluster catalog and its templates
user1 = user_factory()
remove_resource(user1)
crtb_member = client.create_cluster_role_template_binding(
clusterId="local",
roleTemplateId="cluster-member",
userId=user1.user.id)
remove_resource(crtb_member)
wait_until(crtb_cb(client, crtb_member))
# wait_until_available(client, crtb_member)
cluster_member_client = user1.client
cc = cluster_member_client.list_cluster_catalog(name=name)
assert len(cc) == 1
# Both should also be able to list templates of the cluster catalog
templates = \
cluster_member_client.list_template(clusterCatalogId=template_name)
assert len(templates) == 1
# But cluster-member should not be able to create a cluster catalog
try:
cluster_member_client.create_cluster_catalog(name=random_str(),
branch="onlyOne",
url=url,
clusterId="local",
)
except ApiError as e:
assert e.error.status == 403
# Create another user and don't add to cluster, this user should not
# be able to access this cluster catalog or its templates
user2 = user_factory()
templates = \
user2.client.list_template(clusterCatalogId=template_name)
assert len(templates) == 0
cc = user2.client.list_cluster_catalog(name=name)
assert len(cc) == 0
client.delete(cluster_catalog)
wait_for_clustercatalog_template_to_be_deleted(client, template_name)
def test_cluster_catalog_templates_access(admin_mc, user_factory,
remove_resource, admin_pc):
# Cluster-owner,cluster-member, and all project-owners/members
# in that cluster should have access to cluster catalog's templates
# First add a user as cluster member to this cluster
user1 = user_factory()
remove_resource(user1)
admin_client = admin_mc.client
crtb_member = admin_client.create_cluster_role_template_binding(
clusterId="local",
roleTemplateId="cluster-member",
userId=user1.user.id)
remove_resource(crtb_member)
wait_until(crtb_cb(admin_client, crtb_member))
# cluster roles should be able to list global catalog
# so that it shows up in dropdown on the app launch page
c = user1.client.list_catalog(name="library")
assert len(c) == 1
# Now create a cluster catalog
name = random_str()
catalog_name = "local:" + name
url = "https://github.com/mrajashree/charts.git"
cc = admin_client.create_cluster_catalog(name=name,
branch="onlyOne",
url=url,
clusterId="local",
)
wait_for_clustercatalog_template_to_be_created(admin_client, catalog_name)
# Now add a user to a project within this cluster as project-owner
user2 = user_factory()
remove_resource(user2)
prtb_owner = admin_client.create_project_role_template_binding(
userId=user2.user.id,
roleTemplateId="project-owner",
projectId=admin_pc.project.id,
)
remove_resource(prtb_owner)
wait_until(prtb_cb(admin_client, prtb_owner))
wait_until_available(admin_client, prtb_owner)
project_owner_client = user2.client
templates = \
project_owner_client.list_template(clusterCatalogId=catalog_name)
assert len(templates) == 1
templateversions = \
project_owner_client.list_template(clusterCatalogId=catalog_name)
assert len(templateversions) == 1
# project roles should be able to list global and cluster catalogs
# so that they show up in dropdown on the app launch page
c = project_owner_client.list_catalog(name="library")
assert len(c) == 1
cluster_cat = project_owner_client.list_cluster_catalog(name=name)
assert len(cluster_cat) == 1
# but project-owners should't have cud permissions for cluster catalog
# create must fail
try:
project_owner_client.create_cluster_catalog(name=random_str(),
branch="onlyOne",
url=url,
clusterId="local",
)
except ApiError as e:
assert e.error.status == 403
# delete must fail
try:
project_owner_client.delete(cc)
except ApiError as e:
assert e.error.status == 403
# update must fail
try:
project_owner_client.update(cc, branch="master")
except ApiError as e:
assert e.error.status == 403
cluster_member_client = user1.client
templates = \
cluster_member_client.list_template(clusterCatalogId=catalog_name)
assert len(templates) == 1
templateversions = \
cluster_member_client.list_template(clusterCatalogId=catalog_name)
assert len(templateversions) == 1
# Now remove user1 also from the cluster, this should mean user1 should
# no longer be able to access the catalog and templates
admin_client.delete(crtb_member)
wait_for_clustercatalog_template_to_be_deleted(user1.client, catalog_name,
120)
# Now remove the user admin_pc from the project of this cluster,
# so admin_pc should no longer have access to catalog and templates
admin_client.delete(prtb_owner)
wait_for_clustercatalog_template_to_be_deleted(user2.client, catalog_name,
120)
templateversions = \
user2.client.list_template(clusterCatalogId=catalog_name)
assert len(templateversions) == 0
admin_client.delete(cc)
wait_for_clustercatalog_template_to_be_deleted(admin_client, catalog_name,
120)
def wait_for_clustercatalog_template_to_be_created(client, name, timeout=45):
found = False
start = time.time()
interval = 0.5
while not found:
if time.time() - start > timeout:
raise AssertionError(
"Timed out waiting for templates")
templates = client.list_template(clusterCatalogId=name)
if len(templates) > 0:
found = True
time.sleep(interval)
interval *= 2
def wait_for_clustercatalog_template_to_be_deleted(client, name, timeout=60):
found = False
start = time.time()
interval = 0.5
while not found:
if time.time() - start > timeout:
raise AssertionError(
"Timed out waiting for templates")
templates = client.list_template(clusterCatalogId=name)
if len(templates) == 0:
found = True
time.sleep(interval)
interval *= 2
def crtb_cb(client, crtb):
"""Wait for the crtb to have the userId populated"""
def cb():
c = client.reload(crtb)
return c.userPrincipalId is not None
return cb
def prtb_cb(client, prtb):
"""Wait for the crtb to have the userId populated"""
def cb():
p = client.reload(prtb)
return p.userPrincipalId is not None
return cb
| 8,948 | 36.2875 | 78 |
py
|
rancher
|
rancher-master/tests/integration/suite/test_features.py
|
import pytest
from rancher import ApiError
# no one should be able to create features via the api
def test_cannot_create(admin_mc, user_mc, remove_resource):
admin_client = admin_mc.client
user_client = user_mc.client
with pytest.raises(ApiError) as e:
admin_client.create_feature(name="testfeature", value=True)
assert e.value.error.status == 405
with pytest.raises(ApiError) as e:
user_client.create_feature(name="testfeature", value=True)
assert e.value.error.status == 405
# users and admins should be able to list features
def test_can_list(admin_mc, user_mc, remove_resource):
user_client = user_mc.client
user_client.list_feature()
assert True
admin_client = admin_mc.client
admin_client.list_feature()
assert True
| 795 | 25.533333 | 67 |
py
|
rancher
|
rancher-master/tests/integration/suite/test_auth_configs.py
|
import pytest
from rancher import ApiError
from kubernetes.client import CoreV1Api, CustomObjectsApi
from .conftest import wait_for
def test_auth_configs(admin_mc):
client = admin_mc.client
with pytest.raises(AttributeError) as e:
client.list_github_config()
with pytest.raises(AttributeError) as e:
client.create_auth_config({})
configs = client.list_auth_config()
assert configs.pagination.total == 14
gh = None
local = None
ad = None
azure = None
openldap = None
freeIpa = None
ping = None
adfs = None
keycloak = None
okta = None
googleoauth = None
shibboleth = None
oidc = None
keycloakoidc = None
for c in configs:
print(c)
if c.type == "githubConfig":
gh = c
elif c.type == "localConfig":
local = c
elif c.type == "activeDirectoryConfig":
ad = c
elif c.type == "azureADConfig":
azure = c
elif c.type == "openLdapConfig":
openldap = c
elif c.type == "freeIpaConfig":
freeIpa = c
elif c.type == "pingConfig":
ping = c
elif c.type == "adfsConfig":
adfs = c
elif c.type == "keyCloakConfig":
keycloak = c
elif c.type == "oktaConfig":
okta = c
elif c.type == "googleOauthConfig":
googleoauth = c
elif c.type == "shibbolethConfig":
shibboleth = c
elif c.type == "oidcConfig":
oidc = c
elif c.type == "keyCloakOIDCConfig":
keycloakoidc = c
for x in [gh, local, ad, azure, openldap,
freeIpa, ping, adfs, keycloak, okta, googleoauth,
oidc, keycloakoidc]:
assert x is not None
config = client.by_id_auth_config(x.id)
with pytest.raises(ApiError) as e:
client.delete(config)
assert e.value.error.status == 405
assert gh.actions.testAndApply
assert gh.actions.configureTest
assert ad.actions.testAndApply
assert azure.actions.testAndApply
assert azure.actions.configureTest
assert openldap.actions.testAndApply
assert freeIpa.actions.testAndApply
assert ping.actions.testAndEnable
assert adfs.actions.testAndEnable
assert keycloak.actions.testAndEnable
assert okta.actions.testAndEnable
assert googleoauth.actions.configureTest
assert googleoauth.actions.testAndApply
assert shibboleth.actions.testAndEnable
assert oidc.actions.configureTest
assert oidc.actions.testAndApply
def test_auth_config_secrets(admin_mc):
client = admin_mc.client
key_data = {
"spKey": "-----BEGIN PRIVATE KEY-----",
}
ping_config = client.by_id_auth_config("ping")
client.update(ping_config, key_data)
k8sclient = CoreV1Api(admin_mc.k8s_client)
# wait for ping secret to get created
wait_for(lambda: key_secret_creation(k8sclient), timeout=60,
fail_handler=lambda: "failed to create secret for ping spKey")
secrets = k8sclient.list_namespaced_secret("cattle-global-data")
auth_configs_not_setup = ["adfsconfig-spkey", "oktaconfig-spkey",
"keycloakconfig-spkey"]
for s in secrets.items:
assert s.metadata.name not in auth_configs_not_setup
def key_secret_creation(k8sclient):
secrets = k8sclient.list_namespaced_secret("cattle-global-data")
for s in secrets.items:
if s.metadata.name == "pingconfig-spkey":
return True
return False
def test_auth_label(admin_mc, user_factory):
user = user_factory()
k8s_client = CustomObjectsApi(admin_mc.k8s_client)
user_token = wait_for(
lambda: user_token_creation(k8s_client, user.user.id),
timeout=30,
fail_handler=lambda: "failed to find token for factory user login"
)
label_name = "authn.management.cattle.io/kind"
assert user_token["metadata"]["labels"][label_name] == "session"
def user_token_creation(k8s_client, user_id):
tokens = k8s_client.list_cluster_custom_object(
"management.cattle.io",
"v3",
"tokens"
)
user_token = [
token for token in tokens["items"] if token['userId'] == user_id
]
if len(user_token) > 0:
return user_token[0]
return False
| 4,366 | 27.174194 | 75 |
py
|
rancher
|
rancher-master/tests/integration/suite/test_global_role_bindings.py
|
import pytest
from rancher import ApiError
from kubernetes.client.rest import ApiException
from kubernetes.client import RbacAuthorizationV1Api
from .conftest import wait_for
from .common import random_str, string_to_encoding
def test_cannot_update_global_role(admin_mc, remove_resource):
"""Asserts that globalRoleId field cannot be changed"""
admin_client = admin_mc.client
grb = admin_client.create_global_role_binding(
name="gr-" + random_str(),
userId=admin_mc.user.id,
globalRoleId="nodedrivers-manage")
remove_resource(grb)
grb = admin_client.update_by_id_global_role_binding(
id=grb.id,
globalRoleId="settings-manage")
assert grb.globalRoleId == "nodedrivers-manage"
def test_globalrole_must_exist(admin_mc, remove_resource):
"""Asserts that globalRoleId must reference an existing role"""
admin_client = admin_mc.client
with pytest.raises(ApiError) as e:
grb = admin_client.create_global_role_binding(
name="gr-" + random_str(),
globalRoleId="somefakerole",
userId=admin_mc.user.id
)
remove_resource(grb)
assert e.value.error.status == 404
assert "globalroles.management.cattle.io \"somefakerole\" not found" in \
e.value.error.message
def test_cannot_update_subject(admin_mc, user_mc, remove_resource):
"""Asserts that userId and groupPrincipalId fields cannot be
changed"""
admin_client = admin_mc.client
grb = admin_client.create_global_role_binding(
name="gr-" + random_str(),
userId=admin_mc.user.id,
globalRoleId="nodedrivers-manage")
remove_resource(grb)
grb = admin_client.update_by_id_global_role_binding(
id=grb.id,
userId=user_mc.user.id)
assert grb.userId == admin_mc.user.id
grb = admin_client.update_by_id_global_role_binding(
id=grb.id,
groupPrincipalId="groupa")
assert grb.userId == admin_mc.user.id
assert grb.groupPrincipalId is None
def test_grb_crb_lifecycle(admin_mc, remove_resource):
"""Asserts that global role binding creation and deletion
properly creates and deletes underlying cluster role binding"""
admin_client = admin_mc.client
# admin role is used because it requires an
# additional cluster role bindig to be managed
grb = admin_client.create_global_role_binding(
groupPrincipalId="asd", globalRoleId="admin"
)
remove_resource
cattle_grb = "cattle-globalrolebinding-" + grb.id
admin_grb = "globaladmin-u-" + string_to_encoding("asd").lower()
api_instance = RbacAuthorizationV1Api(
admin_mc.k8s_client)
def get_crb_by_id(id):
def get_crb_from_k8s():
try:
return api_instance.read_cluster_role_binding(id)
except ApiException as e:
assert e.status == 404
return get_crb_from_k8s
k8s_grb = wait_for(get_crb_by_id(cattle_grb))
assert k8s_grb.subjects[0].kind == "Group"
assert k8s_grb.subjects[0].name == "asd"
k8s_grb = wait_for(get_crb_by_id(admin_grb))
assert k8s_grb.subjects[0].kind == "Group"
assert k8s_grb.subjects[0].name == "asd"
grb = admin_client.reload(grb)
admin_client.delete(grb)
def crb_deleted_by_id(id):
def is_crb_deleted():
try:
api_instance.read_cluster_role_binding(id)
except ApiException as e:
return e.status == 404
return False
return is_crb_deleted
wait_for(crb_deleted_by_id(cattle_grb))
wait_for(crb_deleted_by_id(admin_grb))
def test_grb_targets_user_or_group(admin_mc, remove_resource):
"""Asserts that a globalrolebinding must exclusively target
a userId or groupPrincipalId"""
admin_client = admin_mc.client
with pytest.raises(ApiError) as e:
grb = admin_client.create_global_role_binding(
userId="asd",
groupPrincipalId="asd",
globalRoleId="admin"
)
remove_resource(grb)
assert e.value.error.status == 422
assert "must contain field [groupPrincipalId] OR field [userId]" in\
e.value.error.message
with pytest.raises(ApiError) as e:
grb = admin_client.create_global_role_binding(
globalRoleId="admin"
)
remove_resource(grb)
assert e.value.error.status == 422
assert "must contain field [groupPrincipalId] OR field [userId]" in \
e.value.error.message
| 4,522 | 30.852113 | 77 |
py
|
rancher
|
rancher-master/tests/integration/suite/test_node_templates.py
|
import pytest
import time
from .common import random_str
from .conftest import wait_for
from rancher import ApiError
from rancher import RestObject
from kubernetes.client import CustomObjectsApi
from kubernetes.client.rest import ApiException
def test_legacy_template_migrate_and_delete(admin_mc, admin_cc,
remove_resource, user_mc,
raw_remove_custom_resource):
"""Asserts that any node template not in cattle-global-nt namespace is
duplicated into cattle-global-nt, then deleted. Also, asserts that
operations on legacy node templates are forwarded to corresponding
migrated node templates"""
admin_client = admin_mc.client
admin_cc_client = admin_cc.client
user_client = user_mc.client
k8s_dynamic_client = CustomObjectsApi(admin_mc.k8s_client)
ns = admin_cc_client.create_namespace(name="ns-" + random_str(),
clusterId=admin_cc.cluster.id)
remove_resource(ns)
node_template_name = "nt-" + random_str()
body = {
"metadata": {
"name": node_template_name,
"annotations": {
"field.cattle.io/creatorId": user_mc.user.id
}
},
"kind": "NodeTemplate",
"apiVersion": "management.cattle.io/v3",
"azureConfig": {"customData": "asdfsadfsd"}
}
# create a node template that will be recognized as legacy
dynamic_nt = k8s_dynamic_client.create_namespaced_custom_object(
"management.cattle.io", "v3", ns.name, 'nodetemplates', body)
raw_remove_custom_resource(dynamic_nt)
def migrated_template_exists(id):
try:
nt = user_client.by_id_node_template(id=id)
remove_resource(nt)
return nt
except ApiError as e:
assert e.error.status == 403
return False
id = "cattle-global-nt:nt-" + ns.id + "-" + dynamic_nt["metadata"]["name"]
legacy_id = dynamic_nt["metadata"]["name"]
legacy_ns = dynamic_nt["metadata"]["namespace"]
full_legacy_id = legacy_ns + ":" + legacy_id
# wait for node template to be migrated
nt = wait_for(lambda: migrated_template_exists(id), fail_handler=lambda:
"failed waiting for node template to migrate")
# assert that config has not been removed from node template
assert nt.azureConfig["customData"] ==\
dynamic_nt["azureConfig"]["customData"]
def legacy_template_deleted():
try:
k8s_dynamic_client.get_namespaced_custom_object(
"management.cattle.io", "v3", ns.name, 'nodetemplates',
legacy_id)
return False
except ApiException as e:
return e.status == 404
# wait for legacy node template to be deleted
wait_for(lambda: legacy_template_deleted(), fail_handler=lambda:
"failed waiting for old node template to delete")
# retrieve node template via legacy id
nt = admin_client.by_id_node_template(id=full_legacy_id)
# retrieve node template via migrated id
migrated_nt = admin_client.by_id_node_template(id=id)
def compare(d1, d2):
if d1 == d2:
return True
if d1.keys() != d2.keys():
return False
for key in d1.keys():
if key in ["id", "namespace", "links", "annotations"]:
continue
if d1[key] == d2[key]:
continue
if callable(d1[key]):
continue
if isinstance(d1[key], RestObject):
if compare(d1[key], d1[key]):
continue
return False
return True
# ensure templates returned are identical aside from fields containing
# id/ns
if not compare(nt, migrated_nt):
raise Exception("forwarded does not match migrated nodetemplate")
nt.azureConfig.customData = "asdfasdf"
new_config = nt.azureConfig
new_config.customData = "adsfasdfadsf"
# update node template via legacy id
nt = admin_client.update_by_id_node_template(
id=full_legacy_id,
azureConfig=new_config)
# assert node template is being updated
assert nt.azureConfig.customData == new_config.customData
nt2 = admin_client.by_id_node_template(id=id)
# assert node template being updated is migrated node template
assert nt2.azureConfig.customData == new_config.customData
# delete node template via legacy id
admin_client.delete(nt)
wait_for(lambda: admin_client.by_id_node_template(id) is None,
fail_handler=lambda:
"failed waiting for migrate node template to delete")
def test_node_template_namespace(admin_mc, remove_resource):
"""asserts that node template is automatically created in
'cattle-global-nt' namespace"""
admin_client = admin_mc.client
node_template = admin_client.create_node_template(name="nt-" +
random_str(),
azureConfig={})
remove_resource(node_template)
assert node_template.id.startswith("cattle-global-nt")
def test_user_can_only_view_own_template(user_factory, remove_resource):
"""Asserts that user can view template after they have created it"""
user_client1 = user_factory().client
user_client2 = user_factory().client
node_template = user_client1.create_node_template(name="nt-" +
random_str(),
azureConfig={})
remove_resource(node_template)
def can_view_template():
try:
return user_client1.by_id_node_template(id=node_template.id)
except ApiError as e:
assert e.error.status == 403
return None
wait_for(can_view_template, fail_handler=lambda:
"creator was unable to view node template")
# assert user cannot view template created by another user
ensure_user_cannot_view_template(user_client2, node_template.id)
def ensure_user_cannot_view_template(client, nodeTemplateId, timeout=5):
"""Asserts user is unable to view node template associated with given node
template id"""
can_view = False
start = time.time()
interval = 0.2
while not can_view:
if time.time() - start > timeout:
return
with pytest.raises(ApiError) as e:
client.by_id_node_template(nodeTemplateId)
assert e.value.error.status == 403
time.sleep(interval)
interval *= 2
| 6,655 | 35.571429 | 78 |
py
|
rancher
|
rancher-master/tests/integration/suite/test_etcdbackups.py
|
from .conftest import wait_until
import kubernetes
role_template = "backups-manage"
def test_backups_manage_role(admin_mc, user_factory, remove_resource):
client = admin_mc.client
restricted_user = user_factory(globalRoleId='user-base')
# add user to local cluster with "Manage cluster backups" role
crtb_rstrd = client.create_cluster_role_template_binding(
clusterId="local",
roleTemplateId=role_template,
userId=restricted_user.user.id, )
remove_resource(crtb_rstrd)
wait_until(crtb_cb(client, crtb_rstrd))
# check that role "backups-manage" was created in the cluster
rbac = kubernetes.client.RbacAuthorizationV1Api(admin_mc.k8s_client)
role = rbac.read_namespaced_role(role_template, "local")
assert role is not None
assert "etcdbackups" in role.rules[0].resources
def test_standard_users_cannot_access_backups(admin_mc, user_factory):
client = admin_mc.client
user_role = client.by_id_global_role("user")
for r in user_role['rules']:
assert "etcdbackups" not in r['resources']
def crtb_cb(client, crtb):
"""Wait for the crtb to have the userId populated"""
def cb():
c = client.reload(crtb)
return c.userPrincipalId is not None
return cb
| 1,269 | 30.75 | 72 |
py
|
rancher
|
rancher-master/tests/integration/suite/test_persistent_volume_claim.py
|
import pytest
import kubernetes
from .conftest import random_str, kubernetes_api_client
from rancher import ApiError
def test_cannot_create_azure_no_accountstoragetype(admin_pc, admin_cc,
admin_mc, remove_resource):
"""Tests that a PVC referencing a storage class with empty skuName and
storageaccounttype fields fails to create
"""
client = admin_pc.client
# using k8s_client is required since rancher client will automatically
# set default if sc has no storageaccounttype/skuName
k8s_client = kubernetes_api_client(admin_mc.client, admin_cc.cluster.id)
storage_client = kubernetes.client.StorageV1Api(api_client=k8s_client)
ns = admin_pc.cluster.client.create_namespace(
name="ns" + random_str(),
projectId=admin_pc.project.id)
remove_resource(ns)
sc = storage_client.create_storage_class(
body={
"metadata": {
"name": "sc" + random_str()
},
"parameters": {
"kind": "shared"
},
"provisioner": "kubernetes.io/azure-disk"})
remove_resource(sc)
with pytest.raises(ApiError) as e:
client.create_persistent_volume_claim(
name="pc" + random_str(),
storageClassId=sc.metadata.name,
namespaceId=ns.id,
accessModes=["ReadWriteOnce"],
resources={
"requests": {
"storage": "30Gi"
}
})
assert e.value.error.status == 422
assert "must provide storageaccounttype or skuName" in \
e.value.error.message
def test_can_create_azure_any_accountstoragetype(admin_pc, admin_cc,
remove_resource):
"""Tests that a PVC referencing a storage class with any non-empty skuName or
storageaccounttype field successfully creates
"""
cc_client = admin_cc.client
pc_client = admin_pc.client
ns = cc_client.create_namespace(
name="ns" + random_str(),
projectId=admin_pc.project.id)
remove_resource(ns)
# try with storageaccounttype value
sc1 = cc_client.create_storage_class(
name="sc" + random_str(),
provisioner="kubernetes.io/azure-disk",
kind="shared",
parameters={
"storageaccounttype": "asdf",
},
)
remove_resource(sc1)
pvc1 = pc_client.create_persistent_volume_claim(
name="pc" + random_str(),
storageClassId=sc1.name,
namespaceId=ns.id,
accessModes=["ReadWriteOnce"],
resources={
"requests": {
"storage": "30Gi"
}
})
remove_resource(pvc1)
# try with skuName value
sc2 = cc_client.create_storage_class(
name="sc" + random_str(),
provisioner="kubernetes.io/azure-disk",
parameters={
"skuName": "asdf",
},
)
remove_resource(sc2)
pvc2 = pc_client.create_persistent_volume_claim(
name="pc" + random_str(),
storageClassId=sc2.name,
namespaceId=ns.id,
accessModes=["ReadWriteOnce"],
resources={
"requests": {
"storage": "30Gi"
}
})
remove_resource(pvc2)
def test_can_create_pvc_no_storage_no_vol(admin_pc, remove_resource):
"""Tests that a PVC referencing no storage class and no volume
can be created
"""
ns = admin_pc.cluster.client.create_namespace(
name="ns" + random_str(),
projectId=admin_pc.project.id)
remove_resource(ns)
pvc = admin_pc.client.create_persistent_volume_claim(
name="pc" + random_str(),
namespaceId=ns.id,
accessModes=["ReadWriteOnce"],
resources={
"requests": {
"storage": "30Gi"
}
})
remove_resource(pvc)
assert pvc is not None
assert pvc.state == "pending"
| 3,968 | 29.530769 | 81 |
py
|
rancher
|
rancher-master/tests/integration/suite/test_global_roles.py
|
import pytest
from rancher import ApiError
from .common import random_str
from .conftest import wait_for
@pytest.mark.nonparallel
def test_builtin_default_can_be_edited(admin_mc, revert_gr):
"""Asserts admins can only edit a builtin global role's newUserDefault
field"""
admin_client = admin_mc.client
gr = admin_client.by_id_global_role(id="admin")
revert_gr(gr)
assert gr.builtin is True
assert "remove" not in gr.links.keys()
assert gr.newUserDefault is False
new_gr = admin_client.update_by_id_global_role(id=gr.id,
displayName="gr-test",
description="asdf",
rules=None,
newUserDefault=True,
builtin=True)
assert new_gr.name == gr.name
assert new_gr.get("description") == gr.description
assert new_gr.rules is not None
assert new_gr.get("builtin") is True
# newUserDefault is the only field that should editable
# for a builtin role
assert new_gr.newUserDefault is True
def test_only_admin_can_crud_global_roles(admin_mc, user_mc, remove_resource):
"""Asserts that only admins can create, get, update, and delete
non-builtin global roles"""
admin_client = admin_mc.client
user_client = user_mc.client
gr = admin_client.create_global_role(name="gr-" + random_str())
remove_resource(gr)
gr.annotations = {"test": "asdf"}
def try_gr_update():
try:
return admin_client.update_by_id_global_role(
id=gr.id,
value=gr)
except ApiError as e:
assert e.error.status == 404
return False
wait_for(try_gr_update)
gr_list = admin_client.list_global_role()
assert len(gr_list.data) > 0
admin_client.delete(gr)
with pytest.raises(ApiError) as e:
gr2 = user_client.create_global_role(name="gr2-" + random_str())
remove_resource(gr2)
assert e.value.error.status == 403
gr3 = admin_client.create_global_role(name="gr3-" + random_str())
remove_resource(gr3)
with pytest.raises(ApiError) as e:
user_client.by_id_global_role(id=gr3.id)
gr3.annotations = {"test2": "jkl"}
def try_gr_unauth():
with pytest.raises(ApiError) as e:
user_client.update_by_id_global_role(id=gr3.id, value=gr3)
if e.value.error.status == 404:
return False
assert e.value.error.status == 403
return True
wait_for(try_gr_unauth)
gr_list = user_client.list_global_role()
assert len(gr_list.data) == 0
with pytest.raises(ApiError) as e:
user_client.delete(gr3)
assert e.value.error.status == 403
def test_admin_can_only_edit_builtin_global_roles(admin_mc, remove_resource):
"""Asserts admins can edit builtin global roles created by rancher but
cannot delete them"""
admin_client = admin_mc.client
gr = admin_client.by_id_global_role(id="admin")
assert gr.builtin is True
assert "remove" not in gr.links.keys()
gr2 = admin_client.create_global_role(name="gr2-" + random_str(),
builtin=True)
remove_resource(gr2)
# assert that builtin cannot be set by admin and is false
assert gr2.builtin is False
admin_client.update_by_id_global_role(id=gr.id)
with pytest.raises(ApiError) as e:
admin_client.delete(gr)
assert e.value.error.status == 403
assert "cannot delete builtin global roles" in e.value.error.message
@pytest.fixture
def revert_gr(admin_mc, request):
"""Ensures gr was reverted to previous state, regardless of test results
"""
def _cleanup(old_gr):
def revert():
reverted_gr = admin_mc.client.update_by_id_global_role(
id=old_gr.id,
displayName=old_gr.name,
description=old_gr.description,
rules=old_gr.rules,
newUserDefault=old_gr.newUserDefault,
builtin=old_gr.builtin)
assert reverted_gr.name == old_gr.name
assert reverted_gr.get("description") == old_gr.description
assert reverted_gr.rules[0].data_dict() == old_gr.rules[0].\
data_dict()
assert reverted_gr.get("builtin") is old_gr.builtin
assert reverted_gr.newUserDefault is old_gr.newUserDefault
request.addfinalizer(revert)
return _cleanup
| 4,610 | 31.935714 | 78 |
py
|
rancher
|
rancher-master/tests/integration/suite/test_multi_cluster_app.py
|
from .common import random_str, check_subject_in_rb
from rancher import ApiError
from .conftest import (
wait_until, wait_for, set_server_version, wait_until_available,
user_project_client
)
import time
import pytest
import kubernetes
roles_resource = 'roles'
projects_resource = 'projects'
members_resource = 'members'
def test_multiclusterapp_create_no_roles(admin_mc, admin_pc, remove_resource):
client = admin_mc.client
mcapp_name = random_str()
temp_ver = "cattle-global-data:library-wordpress-1.0.5"
targets = [{"projectId": admin_pc.project.id}]
# should not be able to create without passing roles
try:
mcapp = client.create_multi_cluster_app(name=mcapp_name,
templateVersionId=temp_ver,
targets=targets)
remove_resource(mcapp)
except ApiError as e:
assert e.error.status == 422
def test_mutliclusterapp_invalid_project(admin_mc, remove_resource):
client = admin_mc.client
mcapp_name = random_str()
temp_ver = "cattle-global-data:library-wordpress-1.0.5"
targets = [{"projectId": "abc:def"}]
try:
mcapp = client.create_multi_cluster_app(name=mcapp_name,
templateVersionId=temp_ver,
targets=targets)
remove_resource(mcapp)
except ApiError as e:
assert e.error.status == 422
@pytest.mark.nonparallel
def test_multiclusterapp_create_with_members(admin_mc, admin_pc,
user_factory, remove_resource,
):
client = admin_mc.client
mcapp_name = random_str()
temp_ver = "cattle-global-data:library-wordpress-1.0.5"
targets = [{"projectId": admin_pc.project.id}]
user_member = user_factory()
remove_resource(user_member)
user_not_member = user_factory()
remove_resource(user_not_member)
members = [{"userPrincipalId": "local://"+user_member.user.id,
"accessType": "read-only"}]
roles = ["cluster-owner", "project-member"]
mcapp1 = client.create_multi_cluster_app(name=mcapp_name,
templateVersionId=temp_ver,
targets=targets,
members=members,
roles=roles)
remove_resource(mcapp1)
wait_for_app(admin_pc, mcapp_name, 60)
# check who has access to the multiclusterapp
# admin and user_member should be able to list it
id = "cattle-global-data:" + mcapp_name
mcapp = client.by_id_multi_cluster_app(id)
assert mcapp is not None
um_client = user_member.client
mcapp = um_client.by_id_multi_cluster_app(id)
assert mcapp is not None
# member should also get access to the mcapp revision
if mcapp['status']['revisionId'] != '':
mcapp_revision_id = "cattle-global-data:" + \
mcapp['status']['revisionId']
mcr = um_client.\
by_id_multi_cluster_app_revision(mcapp_revision_id)
assert mcr is not None
# user who's not a member shouldn't get access
unm_client = user_not_member.client
try:
unm_client.by_id_multi_cluster_app(id)
except ApiError as e:
assert e.error.status == 403
# add the special char * to indicate sharing of resource with all
# authenticated users
new_members = [{"userPrincipalId": "local://"+user_member.user.id,
"accessType": "read-only"}, {"groupPrincipalId": "*"}]
client.update(mcapp, members=new_members, roles=roles)
# now user_not_member should be able to access this mcapp without
# being explicitly added
rbac = kubernetes.client.RbacAuthorizationV1Api(admin_mc.k8s_client)
split = mcapp.id.split(":")
name = split[1]
rb_name = name + "-m-r"
wait_for(lambda: check_subject_in_rb(rbac, 'cattle-global-data',
'system:authenticated', rb_name),
timeout=60, fail_handler=lambda:
'failed to check updated rolebinding')
mcapp = user_not_member.client.by_id_multi_cluster_app(id)
assert mcapp is not None
# even newly created users should be able to access this mcapp
new_user = user_factory()
remove_resource(new_user)
mcapp = new_user.client.by_id_multi_cluster_app(id)
assert mcapp is not None
def test_multiclusterapp_admin_create(admin_mc, admin_pc, remove_resource):
client = admin_mc.client
mcapp_name = random_str()
temp_ver = "cattle-global-data:library-wordpress-1.0.5"
targets = [{"projectId": admin_pc.project.id}]
roles = ["cluster-owner", "project-member"]
# roles check should be relaxed for admin
mcapp1 = client.create_multi_cluster_app(name=mcapp_name,
templateVersionId=temp_ver,
targets=targets,
roles=roles)
remove_resource(mcapp1)
wait_for_app(admin_pc, mcapp_name, 60)
def test_multiclusterapp_cluster_owner_create(admin_mc, admin_pc,
remove_resource, user_factory):
client = admin_mc.client
mcapp_name = random_str()
cowner = user_factory()
crtb_owner = client.create_cluster_role_template_binding(
clusterId="local",
roleTemplateId="cluster-owner",
userId=cowner.user.id)
remove_resource(crtb_owner)
wait_until(rtb_cb(client, crtb_owner))
temp_ver = "cattle-global-data:library-wordpress-1.0.5"
targets = [{"projectId": admin_pc.project.id}]
roles = ["cluster-owner", "project-member"]
# user isn't explicitly added as project-member, but this check should be
# relaxed since user is added as cluster-owner
mcapp1 = cowner.client.\
create_multi_cluster_app(name=mcapp_name,
templateVersionId=temp_ver,
targets=targets,
roles=roles)
remove_resource(mcapp1)
def test_multiclusterapp_project_owner_create(admin_mc, admin_pc,
remove_resource, user_factory):
client = admin_mc.client
mcapp_name = random_str()
powner = user_factory()
prtb_owner = client.create_project_role_template_binding(
projectId=admin_pc.project.id,
roleTemplateId="project-owner",
userId=powner.user.id)
remove_resource(prtb_owner)
wait_until(rtb_cb(client, prtb_owner))
temp_ver = "cattle-global-data:library-wordpress-1.0.5"
targets = [{"projectId": admin_pc.project.id}]
roles = ["project-member"]
# user isn't explicitly added as project-member, but this check should be
# relaxed since user is added as project-owner
mcapp1 = powner.client.\
create_multi_cluster_app(name=mcapp_name,
templateVersionId=temp_ver,
targets=targets,
roles=roles)
remove_resource(mcapp1)
def test_multiclusterapp_user_create(admin_mc, admin_pc, remove_resource,
user_factory):
client = admin_mc.client
mcapp_name = random_str()
temp_ver = "cattle-global-data:library-wordpress-1.0.5"
targets = [{"projectId": admin_pc.project.id}]
# make regular user cluster-owner and project-owner in the cluster and
# it's project
user = user_factory()
remove_resource(user)
user_client = user.client
crtb_owner = client.create_cluster_role_template_binding(
clusterId="local",
roleTemplateId="cluster-owner",
userId=user.user.id)
remove_resource(crtb_owner)
wait_until(rtb_cb(client, crtb_owner))
prtb_member = client.create_project_role_template_binding(
projectId=admin_pc.project.id,
roleTemplateId="project-member",
userId=user.user.id)
remove_resource(prtb_member)
wait_until(rtb_cb(client, prtb_member))
roles = ["cluster-owner", "project-member"]
mcapp1 = user_client.create_multi_cluster_app(name=mcapp_name,
templateVersionId=temp_ver,
targets=targets,
roles=roles)
remove_resource(mcapp1)
wait_for_app(admin_pc, mcapp_name, 60)
# try creating as a user who is not cluster-owner,
# but that is one of the roles listed, must fail
user_no_roles = user_factory()
remove_resource(user_no_roles)
# add user to project as member but not to cluster as owner
prtb_member = client.create_project_role_template_binding(
projectId=admin_pc.project.id,
roleTemplateId="project-member",
userId=user_no_roles.user.id)
remove_resource(prtb_member)
wait_until(rtb_cb(client, prtb_member))
try:
user_no_roles.client.\
create_multi_cluster_app(name=random_str(),
templateVersionId=temp_ver,
targets=targets,
roles=roles)
except ApiError as e:
assert e.error.status == 403
assert "does not have roles cluster-owner in cluster"\
in e.error.message
assert "cluster-owner" in e.error.message
def test_multiclusterapp_admin_update_roles(admin_mc, admin_pc,
remove_resource):
client = admin_mc.client
mcapp_name = random_str()
temp_ver = "cattle-global-data:library-wordpress-1.0.5"
targets = [{"projectId": admin_pc.project.id}]
roles = ["project-member"]
mcapp1 = client.create_multi_cluster_app(name=mcapp_name,
templateVersionId=temp_ver,
targets=targets,
roles=roles)
remove_resource(mcapp1)
wait_for_app(admin_pc, mcapp_name, 60)
# admin doesn't get cluster/project roles (crtb/prtb) by default
# but updating the mcapp to add these roles must pass, since global admin
# should have access to everything and must be excused
new_roles = ["cluster-owner", "project-member"]
client.update(mcapp1, roles=new_roles)
wait_for(lambda: check_updated_roles(admin_mc, mcapp_name, new_roles),
timeout=60, fail_handler=fail_handler(roles_resource))
def test_multiclusterapp_user_update_roles(admin_mc, admin_pc, remove_resource,
user_factory):
client = admin_mc.client
mcapp_name = random_str()
temp_ver = "cattle-global-data:library-wordpress-1.0.5"
targets = [{"projectId": admin_pc.project.id}]
# create mcapp as admin, passing "cluster-owner" role
roles = ["cluster-owner"]
# add a user as a member with access-type owner
user = user_factory()
remove_resource(user)
members = [{"userPrincipalId": "local://" + user.user.id,
"accessType": "owner"}]
mcapp1 = client.create_multi_cluster_app(name=mcapp_name,
templateVersionId=temp_ver,
targets=targets,
roles=roles,
members=members)
remove_resource(mcapp1)
wait_for_app(admin_pc, mcapp_name, 60)
# user wants to update roles to add project-member role
# but user is not a part of target project, so this must fail
new_roles = ["cluster-owner", "project-member"]
try:
user.client.update(mcapp1, roles=new_roles)
except ApiError as e:
assert e.error.status == 403
assert "does not have roles project-member in project" \
in e.error.message
assert "of cluster local" in e.error.message
# now admin adds this user to project as project-member
prtb_member = client.create_project_role_template_binding(
projectId=admin_pc.project.id,
roleTemplateId="project-member",
userId=user.user.id)
remove_resource(prtb_member)
wait_until(rtb_cb(client, prtb_member))
# now user should be able to add project-member role
user.client.update(mcapp1, roles=new_roles)
wait_for(lambda: check_updated_roles(admin_mc, mcapp_name, new_roles),
timeout=60, fail_handler=fail_handler(roles_resource))
def test_admin_access(admin_mc, admin_pc, user_factory, remove_resource):
client = admin_mc.client
mcapp_name = random_str()
temp_ver = "cattle-global-data:library-wordpress-1.0.5"
targets = [{"projectId": admin_pc.project.id}]
user = user_factory()
remove_resource(user)
prtb_member = client.create_project_role_template_binding(
projectId=admin_pc.project.id,
roleTemplateId="project-member",
userId=user.user.id)
remove_resource(prtb_member)
wait_until(rtb_cb(client, prtb_member))
mcapp1 = user.client.\
create_multi_cluster_app(name=mcapp_name,
templateVersionId=temp_ver,
targets=targets,
roles=["project-member"])
wait_for_app(admin_pc, mcapp_name, 60)
client.update(mcapp1, roles=["cluster-owner"])
wait_for(lambda: check_updated_roles(admin_mc, mcapp_name,
["cluster-owner"]), timeout=60,
fail_handler=fail_handler(roles_resource))
def test_add_projects(admin_mc, admin_pc, admin_cc, remove_resource):
client = admin_mc.client
mcapp_name = random_str()
temp_ver = "cattle-global-data:library-wordpress-1.0.5"
targets = [{"projectId": admin_pc.project.id}]
mcapp1 = client.\
create_multi_cluster_app(name=mcapp_name,
templateVersionId=temp_ver,
targets=targets,
roles=["project-member"])
remove_resource(mcapp1)
wait_for_app(admin_pc, mcapp_name, 60)
p = client.create_project(name='test-' + random_str(),
clusterId=admin_cc.cluster.id)
remove_resource(p)
p = admin_cc.management.client.wait_success(p)
client.action(obj=mcapp1, action_name="addProjects",
projects=[p.id])
new_projects = [admin_pc.project.id, p.id]
wait_for(lambda: check_updated_projects(admin_mc, mcapp_name,
new_projects), timeout=60,
fail_handler=fail_handler(projects_resource))
def test_remove_projects(admin_mc, admin_pc, admin_cc, remove_resource):
client = admin_mc.client
mcapp_name = random_str()
temp_ver = "cattle-global-data:library-wordpress-1.0.5"
p = client.create_project(name='test-' + random_str(),
clusterId=admin_cc.cluster.id)
remove_resource(p)
p = admin_cc.management.client.wait_success(p)
targets = [{"projectId": admin_pc.project.id}, {"projectId": p.id}]
mcapp1 = client. \
create_multi_cluster_app(name=mcapp_name,
templateVersionId=temp_ver,
targets=targets,
roles=["project-member"])
remove_resource(mcapp1)
wait_for_app(admin_pc, mcapp_name, 60)
client.action(obj=mcapp1, action_name="removeProjects", projects=[p.id])
new_projects = [admin_pc.project.id]
wait_for(lambda: check_updated_projects(admin_mc, mcapp_name,
new_projects), timeout=60,
fail_handler=fail_handler(projects_resource))
def test_multiclusterapp_revision_access(admin_mc, admin_pc, remove_resource,
user_factory):
client = admin_mc.client
mcapp_name = random_str()
temp_ver = "cattle-global-data:library-mysql-0.3.8"
targets = [{"projectId": admin_pc.project.id}]
user = user_factory()
remove_resource(user)
user_client = user.client
# assign user to local cluster as project-member
prtb_member = client.create_project_role_template_binding(
projectId=admin_pc.project.id,
roleTemplateId="project-member",
userId=user.user.id)
remove_resource(prtb_member)
wait_until(rtb_cb(client, prtb_member))
roles = ["project-member"]
mcapp1 = user_client.create_multi_cluster_app(name=mcapp_name,
templateVersionId=temp_ver,
targets=targets,
roles=roles)
remove_resource(mcapp1)
wait_for_app(admin_pc, mcapp_name, 60)
mcapp_revisions = user_client.list_multi_cluster_app_revision()
assert len(mcapp_revisions) == 1
@pytest.mark.skip(reason='flaky test maybe, skipping for now')
def test_app_upgrade_mcapp_roles_change(admin_mc, admin_pc,
remove_resource):
client = admin_mc.client
mcapp_name = random_str()
temp_ver = "cattle-global-data:library-grafana-0.0.31"
targets = [{"projectId": admin_pc.project.id}]
roles = ["project-member"]
mcapp1 = client.create_multi_cluster_app(name=mcapp_name,
templateVersionId=temp_ver,
targets=targets,
roles=roles)
remove_resource(mcapp1)
wait_for_app(admin_pc, mcapp_name, 60)
# changing roles should trigger app upgrade
roles = ["cluster-owner"]
client.update(mcapp1, roles=roles)
wait_for_app_condition(admin_pc, mcapp_name, 'UserTriggeredAction', 60)
def wait_for_app_condition(admin_pc, name, condition, timeout=60):
start = time.time()
interval = 0.5
client = admin_pc.client
cluster_id, project_id = admin_pc.project.id.split(':')
app_name = name+"-"+project_id
found = False
while not found:
if time.time() - start > timeout:
raise Exception('Timeout waiting for app of multiclusterapp')
apps = client.list_app(name=app_name)
if len(apps) > 0:
conditions = apps['data'][0]['conditions']
for c in conditions:
if c['type'] == condition and\
c['status'] == 'True':
found = True
time.sleep(interval)
interval *= 2
@pytest.mark.nonparallel
def test_mcapp_create_validation(admin_mc, admin_pc, custom_catalog,
remove_resource, restore_rancher_version):
"""Test create validation of multi cluster apps. This test will set the
rancher version explicitly and attempt to create apps with rancher version
requirements
"""
# 1.6.0 uses 2.0.0-2.2.0
# 1.6.2 uses 2.1.0-2.3.0
c_name = random_str()
custom_catalog(name=c_name)
client = admin_mc.client
set_server_version(client, "2.0.0")
cat_ns_name = "cattle-global-data:"+c_name
mcapp_data = {
'name': random_str(),
'templateVersionId': cat_ns_name+"-chartmuseum-1.6.2",
'targets': [{"projectId": admin_pc.project.id}],
'roles': ["cluster-owner", "project-member"],
}
# First app requires a min rancher version of 2.1 so we expect an error
with pytest.raises(ApiError) as e:
mcapp1 = client.create_multi_cluster_app(mcapp_data)
remove_resource(mcapp1)
assert e.value.error.status == 422
assert e.value.error.message == 'rancher min version not met'
# Second app requires a min of 2.0 so no error should be returned
mcapp_data['name'] = random_str()
mcapp_data['templateVersionId'] = cat_ns_name+"-chartmuseum-1.6.0",
mcapp2 = client.create_multi_cluster_app(mcapp_data)
remove_resource(mcapp2)
wait_for_app(admin_pc, mcapp_data['name'])
set_server_version(client, "2.2.1")
# Third app requires a max of version 2.2.0 so expect error
with pytest.raises(ApiError) as e:
mcapp_data['name'] = random_str()
mcapp3 = client.create_multi_cluster_app(mcapp_data)
remove_resource(mcapp3)
assert e.value.error.status == 422
assert e.value.error.message == 'rancher max version exceeded'
@pytest.mark.nonparallel
def test_mcapp_update_validation(admin_mc, admin_pc, custom_catalog,
remove_resource, restore_rancher_version):
"""Test update validation of multi cluster apps. This test will set the
rancher version explicitly and attempt to update an app with rancher
version requirements
"""
# 1.6.0 uses 2.0.0-2.2.0
# 1.6.2 uses 2.1.0-2.3.0
c_name = random_str()
custom_catalog(name=c_name)
client = admin_mc.client
set_server_version(client, "2.0.0")
cat_ns_name = "cattle-global-data:"+c_name
mcapp_data = {
'name': random_str(),
'templateVersionId': cat_ns_name+"-chartmuseum-1.6.0",
'targets': [{"projectId": admin_pc.project.id}],
'roles': ["cluster-owner", "project-member"],
}
# First app requires a min rancher version of 2.0 so no error
mcapp1 = client.create_multi_cluster_app(mcapp_data)
remove_resource(mcapp1)
wait_for_app(admin_pc, mcapp_data['name'])
# App upgrade requires a min of 2.1 so expect error
with pytest.raises(ApiError) as e:
mcapp1 = client.update_by_id_multi_cluster_app(
id=mcapp1.id, templateVersionId=cat_ns_name+"-chartmuseum-1.6.2")
assert e.value.error.status == 422
assert e.value.error.message == 'rancher min version not met'
set_server_version(client, "2.3.1")
# App upgrade requires a max of 2.3 so expect error
with pytest.raises(ApiError) as e:
mcapp1 = client.update_by_id_multi_cluster_app(
id=mcapp1.id, templateVersionId=cat_ns_name+"-chartmuseum-1.6.2")
assert e.value.error.status == 422
assert e.value.error.message == 'rancher max version exceeded'
@pytest.mark.nonparallel
def test_mcapp_rollback_validation(admin_mc, admin_pc, custom_catalog,
remove_resource, restore_rancher_version):
"""Test rollback validation of multi cluster apps. This test will set the
rancher version explicitly and attempt to rollback an app with rancher
version requirements
"""
# 1.6.0 uses 2.0.0-2.2.0
# 1.6.2 uses 2.1.0-2.3.0
c_name = random_str()
custom_catalog(name=c_name)
client = admin_mc.client
set_server_version(client, "2.1.0")
cat_ns_name = "cattle-global-data:"+c_name
mcapp_data = {
'name': random_str(),
'templateVersionId': cat_ns_name+"-chartmuseum-1.6.0",
'targets': [{"projectId": admin_pc.project.id}],
'roles': ["cluster-owner", "project-member"],
"answers": [{
"type": "answer",
"clusterId": None,
"projectId": None,
"values": {
"defaultImage": "true",
"image.repository": "chartmuseum/chartmuseum",
"image.tag": "v0.7.1",
"env.open.STORAGE": "local",
"gcp.secret.enabled": "false",
"gcp.secret.key": "credentials.json",
"persistence.enabled": "true",
"persistence.size": "10Gi",
"ingress.enabled": "true",
"ingress.hosts[0]": "xip.io",
"service.type": "NodePort",
"env.open.SHOW_ADVANCED": "false",
"env.open.DEPTH": "0",
"env.open.ALLOW_OVERWRITE": "false",
"env.open.AUTH_ANONYMOUS_GET": "false",
"env.open.DISABLE_METRICS": "true"
}
}]
}
# First app requires a min rancher version of 2.0 so no error
mcapp1 = client.create_multi_cluster_app(mcapp_data)
remove_resource(mcapp1)
wait_for_app(admin_pc, mcapp_data['name'])
mcapp1 = client.reload(mcapp1)
original_rev = mcapp1.revisions().data[0].name
mcapp1.templateVersionId = cat_ns_name+"-chartmuseum-1.6.2"
# Upgrade the app to get a rollback version
mcapp1 = client.update_by_id_multi_cluster_app(mcapp1.id, mcapp1)
wait_for_app(admin_pc, mcapp_data['name'])
mcapp1 = client.reload(mcapp1)
assert mcapp1.status.revisionId != original_rev, 'app did not upgrade'
set_server_version(client, "2.3.1")
# App rollback requires a max of 2.2 so expect error
with pytest.raises(ApiError) as e:
client.action(obj=mcapp1, action_name='rollback',
revisionId=original_rev)
assert e.value.error.status == 422
assert e.value.error.message == 'rancher max version exceeded'
def test_perform_mca_action_read_only(admin_mc, admin_pc, remove_resource,
user_mc, user_factory):
"""Tests MCA actions with a read-only user and a member user."""
client = admin_mc.client
project = admin_pc.project
user = user_mc
user_member = user_factory()
ns = admin_pc.cluster.client.create_namespace(
name=random_str(),
projectId=project.id)
remove_resource(ns)
# Create a read-only user binding.
prtb1 = admin_mc.client.create_project_role_template_binding(
name="prtb-" + random_str(),
userId=user.user.id,
projectId=project.id,
roleTemplateId="read-only")
remove_resource(prtb1)
wait_until_available(user.client, project)
# Then, create a member user binding.
prtb2 = admin_mc.client.create_project_role_template_binding(
name="prtb-" + random_str(),
userId=user_member.user.id,
projectId=project.id,
roleTemplateId="project-member")
remove_resource(prtb2)
wait_until_available(user_member.client, project)
user_pc = user_project_client(user, project)
user_member_pc = user_project_client(user_member, project)
# Admin user creates the MCA and specifically adds both users. The
# project-member user should have permissions by default since their role
# is specified in the MCA creation.
mcapp_name = random_str()
mcapp_user_read_only = "local://" + user.user.id
mcapp_user_member = "local://" + user_member.user.id
mcapp = client.create_multi_cluster_app(
name=mcapp_name,
templateVersionId="cattle-global-data:library-docker-registry-1.9.2",
targets=[{"projectId": admin_pc.project.id}],
members=[{"userPrincipalId": mcapp_user_read_only,
"accessType": "read-only"},
{"userPrincipalId": mcapp_user_member,
"accessType": "member"}],
roles=["cluster-owner", "project-member"])
remove_resource(mcapp)
wait_for_app(admin_pc, mcapp_name)
# Admin user updates the MCA to yield a rollback option. We change the
# image version below.
mcapp = client.reload(mcapp)
original_rev = mcapp.revisions().data[0].name
mcapp.templateVersionId = (
"cattle-global-data:library-docker-registry-1.8.1")
mcapp = client.update_by_id_multi_cluster_app(mcapp.id, mcapp)
wait_for_app(admin_pc, mcapp_name)
mcapp = client.reload(mcapp)
# Read-only users should receive a 404 error.
with pytest.raises(ApiError) as e:
user_pc.action(obj=mcapp, action_name="rollback",
revisionId=original_rev)
assert e.value.error.status == 404
# Member users will be able to perform the rollback.
user_member_pc.action(obj=mcapp, action_name="rollback",
revisionId=original_rev)
def wait_for_app(admin_pc, name, timeout=60):
start = time.time()
interval = 0.5
client = admin_pc.client
project_id = admin_pc.project.id.split(':')[1]
app_name = name+"-"+project_id
found = False
while not found:
if time.time() - start > timeout:
raise Exception('Timeout waiting for app of multiclusterapp')
apps = client.list_app(name=app_name)
if len(apps) > 0:
found = True
time.sleep(interval)
interval *= 2
def rtb_cb(client, rtb):
"""Wait for the prtb to have the userId populated"""
def cb():
rt = client.reload(rtb)
return rt.userPrincipalId is not None
return cb
def check_updated_projects(admin_mc, mcapp_name, projects):
mcapp_projects = []
id = "cattle-global-data:" + mcapp_name
mcapp = admin_mc.client.by_id_multi_cluster_app(id)
for t in mcapp.targets:
mcapp_projects.append(t.projectId)
if mcapp_projects == projects:
return True
return False
def check_updated_roles(admin_mc, mcapp_name, roles):
id = "cattle-global-data:" + mcapp_name
mcapp = admin_mc.client.by_id_multi_cluster_app(id)
if mcapp is not None and mcapp.roles == roles:
return True
return False
def fail_handler(resource):
return "failed waiting for multiclusterapp " + resource + " to get updated"
| 29,292 | 38.746269 | 79 |
py
|
rancher
|
rancher-master/tests/integration/suite/test_cluster_auth_tokens.py
|
import subprocess
import pytest
from .conftest import create_kubeconfig, wait_for
from sys import platform
from kubernetes.client import CustomObjectsApi
from rancher import ApiError
# test if the kubeconfig works to list api-resources for the fqdn context
def exec_kubectl(request, dind_cc, client, cmd='api-resources'):
cluster_kubeconfig_file = create_kubeconfig(request, dind_cc, client)
# verify cluster scoped access
try:
return subprocess.check_output(
'kubectl ' + cmd +
' --kubeconfig ' + cluster_kubeconfig_file +
' --context ' + dind_cc.name + '-fqdn',
stderr=subprocess.STDOUT, shell=True,
)
except subprocess.CalledProcessError as err:
print('kubectl error: ' + str(err.output))
raise err
# test generator for multiple attempts
def kubectl_available(request, dind_cc, client):
def test():
try:
exec_kubectl(request, dind_cc, client)
return True
except subprocess.CalledProcessError:
return False
return test
# as an admin, we should have access
@pytest.mark.skip(reason='cluster testing needs refactor')
@pytest.mark.skipif(platform != 'linux', reason='requires linux for dind')
@pytest.mark.nonparallel
def test_admin_api_resources(request, dind_cc):
wait_for(kubectl_available(request, dind_cc, dind_cc.admin_mc.client))
# as a user which has not been given permission, we should fail
@pytest.mark.skip(reason='cluster testing needs refactor')
@pytest.mark.skipif(platform != 'linux', reason='requires linux for dind')
@pytest.mark.nonparallel
def test_user_no_template(request, dind_cc, user_mc):
test_admin_api_resources(request, dind_cc)
with pytest.raises(ApiError) as e:
exec_kubectl(request, dind_cc, user_mc.client)
assert e.value.error.status == 403, 'user should not have permission'
# as a user that is a cluster member, we should have access
@pytest.mark.skip(reason='cluster testing needs refactor')
@pytest.mark.skipif(platform != 'linux', reason='requires linux for dind')
@pytest.mark.nonparallel
def test_user_with_template(request, dind_cc, user_mc):
test_user_no_template(request, dind_cc, user_mc)
role_template = {
'clusterId': dind_cc.cluster.id,
'userPrincipalId': 'local://' + user_mc.user.id,
'roleTemplateId': 'cluster-member'
}
dind_cc.admin_mc.client.create_clusterRoleTemplateBinding(role_template)
wait_for(kubectl_available(request, dind_cc, user_mc.client))
# as a user that is part of a group that has access, we should have access
@pytest.mark.skip(reason='cluster testing needs refactor')
@pytest.mark.skipif(platform != 'linux', reason='requires linux for dind')
@pytest.mark.nonparallel
def test_user_group_with_template(request, dind_cc, user_mc):
test_user_no_template(request, dind_cc, user_mc)
crdClient = CustomObjectsApi(dind_cc.admin_mc.k8s_client)
user_attribute = crdClient.get_cluster_custom_object(
'management.cattle.io',
'v3',
'userattributes',
user_mc.user.id
)
user_attribute['GroupPrincipals']['local']['Items'] = [{
'metadata': {
'name': 'local_group://test-123'
}
}]
crdClient.replace_cluster_custom_object(
'management.cattle.io',
'v3',
'userattributes',
user_mc.user.id,
user_attribute
)
role_template = {
'clusterId': dind_cc.cluster.id,
'groupPrincipalId': 'local_group://test-123',
'roleTemplateId': 'cluster-member'
}
dind_cc.admin_mc.client.create_clusterRoleTemplateBinding(role_template)
wait_for(kubectl_available(request, dind_cc, user_mc.client))
| 3,736 | 35.637255 | 76 |
py
|
rancher
|
rancher-master/tests/integration/suite/test_project_role_template_bindings.py
|
import pytest
from .common import random_str
from .conftest import wait_for
from rancher import ApiError
def test_cannot_target_users_and_group(admin_mc, remove_resource):
"""Asserts that a projectroletemplatebinding cannot target both
user and group subjects"""
admin_client = admin_mc.client
project = admin_client.create_project(
name="p-" + random_str(),
clusterId="local")
remove_resource(project)
with pytest.raises(ApiError) as e:
prtb = admin_client.create_project_role_template_binding(
name="prtb-"+random_str(),
projectId=project.id,
userId=admin_mc.user.id,
groupPrincipalId="someauthprovidergroupid",
roleTemplateId="projectcatalogs-view")
remove_resource(prtb)
assert e.value.error.status == 422
assert "must target a user [userId]/[userPrincipalId] OR a group " \
"[groupId]/[groupPrincipalId]" in e.value.error.message
def test_must_have_target(admin_mc, admin_pc, remove_resource):
"""Asserts that a projectroletemplatebinding must have a subject"""
admin_client = admin_mc.client
with pytest.raises(ApiError) as e:
prtb = admin_client.create_project_role_template_binding(
name="prtb-" + random_str(),
projectId=admin_pc.project.id,
roleTemplateId="projectcatalogs-view")
remove_resource(prtb)
assert e.value.error.status == 422
assert "must target a user [userId]/[userPrincipalId] OR a group " \
"[groupId]/[groupPrincipalId]" in e.value.error.message
def test_cannot_update_subject_or_proj(admin_mc, admin_pc, remove_resource):
"""Asserts non-metadata fields cannot be updated"""
admin_client = admin_mc.client
old_prtb = admin_client.create_project_role_template_binding(
name="prtb-" + random_str(),
projectId=admin_pc.project.id,
userId=admin_mc.user.id,
roleTemplateId="projectcatalogs-view")
remove_resource(old_prtb)
wait_for(lambda: admin_client.reload(old_prtb).userPrincipalId is not None)
old_prtb = admin_client.reload(old_prtb)
prtb = admin_client.update_by_id_project_role_template_binding(
id=old_prtb.id,
clusterId="fakeproject",
userId="",
userPrincipalId="asdf",
groupPrincipalId="asdf",
group="asdf"
)
assert prtb.get("projectId") == old_prtb.get("projectId")
assert prtb.get("userId") == old_prtb.get("userId")
assert prtb.get("userPrincipalId") == old_prtb.get("userPrincipalId")
assert prtb.get("groupPrincipalId") == old_prtb.get("groupPrincipalId")
assert prtb.get("group") == old_prtb.get("group")
| 2,704 | 35.554054 | 79 |
py
|
rancher
|
rancher-master/tests/integration/suite/test_dns.py
|
from .common import random_str, auth_check
from rancher import ApiError
import pytest
def test_dns_fields(admin_pc_client):
auth_check(admin_pc_client.schema, 'dnsRecord', 'crud', {
'namespaceId': 'cr',
'projectId': 'cr',
'hostname': 'cru',
'allocateLoadBalancerNodePorts': 'cru',
'ipAddresses': 'cru',
'ipFamilies': 'cru',
'ipFamilyPolicy': 'cru',
'clusterIPs': 'cru',
'clusterIp': 'r',
'selector': 'cru',
'targetWorkloadIds': 'cru',
'workloadId': 'r',
'targetDnsRecordIds': 'cru',
'topologyKeys': 'cru',
'publicEndpoints': 'r',
'ports': 'r',
})
def test_dns_hostname(admin_pc, admin_cc_client):
client = admin_pc.client
ns = admin_cc_client.create_namespace(name=random_str(),
projectId=admin_pc.project.id)
name = random_str()
dns_record = client.create_dns_record(name=name,
hostname='target',
namespaceId=ns.id)
assert dns_record.baseType == 'dnsRecord'
assert dns_record.type == 'dnsRecord'
assert dns_record.name == name
assert dns_record.hostname == 'target'
assert "clusterIp" not in dns_record
assert dns_record.namespaceId == ns.id
assert 'namespace' not in dns_record
assert dns_record.projectId == admin_pc.project.id
dns_record = client.update(dns_record, hostname='target2')
dns_record = client.reload(dns_record)
assert dns_record.baseType == 'dnsRecord'
assert dns_record.type == 'dnsRecord'
assert dns_record.name == name
assert dns_record.hostname == 'target2'
assert "clusterIp" not in dns_record
assert dns_record.namespaceId == ns.id
assert 'namespace' not in dns_record
assert dns_record.projectId == admin_pc.project.id
found = False
for i in client.list_dns_record():
if i.id == dns_record.id:
found = True
break
assert found
dns_record = client.by_id_dns_record(dns_record.id)
assert dns_record is not None
client.delete(dns_record)
def test_dns_ips(admin_pc, admin_cc_client):
client = admin_pc.client
ns = admin_cc_client.create_namespace(name=random_str(),
projectId=admin_pc.project.id)
name = random_str()
dns_record = client.create_dns_record(name=name,
ipAddresses=['1.1.1.1',
'2.2.2.2'],
namespaceId=ns.id)
assert dns_record.baseType == 'dnsRecord'
assert dns_record.type == 'dnsRecord'
assert dns_record.name == name
assert 'hostname' not in dns_record
assert dns_record.ipAddresses == ['1.1.1.1', '2.2.2.2']
assert dns_record.clusterIp is None
assert dns_record.namespaceId == ns.id
assert 'namespace' not in dns_record
assert dns_record.projectId == admin_pc.project.id
dns_record = client.update(dns_record, ipAddresses=['1.1.1.2', '2.2.2.1'])
dns_record = client.reload(dns_record)
assert dns_record.baseType == 'dnsRecord'
assert dns_record.type == 'dnsRecord'
assert dns_record.name == name
assert 'hostname' not in dns_record
assert dns_record.ipAddresses == ['1.1.1.2', '2.2.2.1']
assert dns_record.clusterIp is None
assert dns_record.namespaceId == ns.id
assert 'namespace' not in dns_record
assert dns_record.projectId == admin_pc.project.id
dnsname = random_str()
with pytest.raises(ApiError) as e:
client.create_dns_record(name=dnsname,
ipAddresses=['127.0.0.2'],
namespaceId='default')
assert e.value.error.status == 422
found = False
for i in client.list_dns_record():
if i.id == dns_record.id:
found = True
break
assert found
dns_record = client.by_id_dns_record(dns_record.id)
assert dns_record is not None
client.delete(dns_record)
| 4,132 | 32.064 | 78 |
py
|
rancher
|
rancher-master/tests/integration/suite/test_kontainer_engine_validation.py
|
from .common import random_str
from .conftest import wait_until
def assert_has_error_message(admin_mc, remove_resource, eks, message):
cluster = admin_mc.client.create_cluster(
name=random_str(), amazonElasticContainerServiceConfig=eks)
remove_resource(cluster)
def get_provisioned_type(cluster):
for condition in cluster.conditions:
if condition.type == "Provisioned":
if hasattr(condition, 'message'):
return condition.message
return None
def has_provision_status():
new_cluster = admin_mc.client.reload(cluster)
return \
hasattr(new_cluster, "conditions") and \
get_provisioned_type(new_cluster) is not None
def has_error_message():
for condition in cluster.conditions:
if condition.type == "Provisioned":
if getattr(condition, 'message') == message:
return True
return False
wait_until(has_provision_status)
cluster = admin_mc.client.reload(cluster)
wait_until(has_error_message, timeout=120, backoff=False)
cluster = admin_mc.client.reload(cluster)
assert has_error_message(), "no error message %r was present" % \
message
def test_min_nodes_cannot_be_greater_than_max(admin_mc, remove_resource):
eks = {
"accessKey": "not a real access key",
"secretKey": "not a real secret key",
"region": "us-west-2",
"kubernetesVersion": "1.17",
"minimumNodes": 3,
"maximumNodes": 2
}
assert_has_error_message(admin_mc, remove_resource, eks,
"error parsing state: maximum nodes cannot "
"be less than minimum nodes")
def test_min_nodes_cannot_be_zero(admin_mc, remove_resource):
eks = {
"accessKey": "not a real access key",
"secretKey": "not a real secret key",
"region": "us-west-2",
"kubernetesVersion": "1.17",
"minimumNodes": 0,
"maximumNodes": 0
}
assert_has_error_message(admin_mc, remove_resource, eks,
"error parsing state: minimum nodes must be "
"greater than 0")
def test_node_volume_size_cannot_be_zero(admin_mc, remove_resource):
eks = {
"accessKey": "not a real access key",
"secretKey": "not a real secret key",
"region": "us-west-2",
"kubernetesVersion": "1.17",
"minimumNodes": 1,
"maximumNodes": 3,
"nodeVolumeSize": 0
}
assert_has_error_message(admin_mc, remove_resource, eks,
"error parsing state: node volume size must "
"be greater than 0")
def test_private_cluster_requires_vpc_subnets(admin_mc, remove_resource):
eks = {
"accessKey": "not a real access key",
"secretKey": "not a real secret key",
"region": "us-west-2",
"kubernetesVersion": "1.17",
"minimumNodes": 1,
"maximumNodes": 3,
"associateWorkerNodePublicIp": False
}
assert_has_error_message(admin_mc, remove_resource, eks,
"error parsing state: if "
"AssociateWorkerNodePublicIP is set to "
"false a VPC and subnets must also be provided")
| 3,401 | 33.02 | 77 |
py
|
rancher
|
rancher-master/tests/integration/suite/common.py
|
import base64
import hashlib
import random
import time
def random_str():
return 'random-{0}-{1}'.format(random_num(), int(time.time()))
def random_num():
return random.randint(0, 1000000)
def find_one(method, *args, **kw):
return find_count(1, method, *args, **kw)[0]
def find_count(count, method, *args, **kw):
ret = method(*args, **kw)
assert len(ret) == count
return ret
def auth_check(schema, id, access, props=None):
schema_type = schema.types[id]
access_actual = set()
try:
if 'GET' in schema_type.collectionMethods:
access_actual.add('r')
except AttributeError:
pass
try:
if 'GET' in schema_type.resourceMethods:
access_actual.add('r')
except AttributeError:
pass
try:
if 'POST' in schema_type.collectionMethods:
access_actual.add('c')
except AttributeError:
pass
try:
if 'DELETE' in schema_type.resourceMethods:
access_actual.add('d')
except AttributeError:
pass
try:
if 'PUT' in schema_type.resourceMethods:
access_actual.add('u')
except AttributeError:
pass
assert access_actual == set(access)
if props is None:
return 1
for i in ['description', 'annotations', 'labels']:
if i not in props and i in schema_type.resourceFields.keys():
props[i] = 'cru'
for i in ['created', 'removed', 'transitioning', 'transitioningProgress',
'removeTime', 'transitioningMessage', 'id', 'uuid', 'kind',
'state', 'creatorId', 'finalizers', 'ownerReferences', 'type',
'scaledownTime']:
if i not in props and i in schema_type.resourceFields.keys():
props[i] = 'r'
for i in ['name']:
if i not in props and i in schema_type.resourceFields.keys():
props[i] = 'cr'
prop = set(props.keys())
prop_actual = set(schema_type.resourceFields.keys())
if prop_actual != prop:
for k in prop:
assert k in prop_actual
for k in prop_actual:
assert k in prop
assert prop_actual == prop
for name, field in schema_type.resourceFields.items():
assert name in props
prop = set(props[name])
prop_actual = set('r')
prop.add(name)
prop_actual.add(name)
if field.create:
prop_actual.add('c')
if field.update:
prop_actual.add('u')
if 'writeOnly' in field and field.writeOnly:
prop_actual.add('o')
if prop_actual != prop:
assert prop_actual == prop
return 1
def wait_for_template_to_be_created(client, name, timeout=45):
found = False
start = time.time()
interval = 0.5
while not found:
if time.time() - start > timeout:
raise AssertionError(
"Timed out waiting for templates")
templates = client.list_template(catalogId=name)
if len(templates) > 0:
found = True
time.sleep(interval)
interval *= 2
def wait_for_template_to_be_deleted(client, name, timeout=60):
found = False
start = time.time()
interval = 0.5
while not found:
if time.time() - start > timeout:
raise AssertionError(
"Timed out waiting for templates")
templates = client.list_template(catalogId=name)
if len(templates) == 0:
found = True
time.sleep(interval)
interval *= 2
def check_subject_in_rb(rbac, ns, subject_id, name):
rbs = rbac.list_namespaced_role_binding(ns)
for rb in rbs.items:
if rb.metadata.name == name:
for i in range(0, len(rb.subjects)):
if rb.subjects[i].name == subject_id:
return True
return False
def wait_for_atleast_workload(pclient, nsid, timeout=60, count=0):
start = time.time()
interval = 0.5
workloads = pclient.list_workload(namespaceId=nsid)
while len(workloads.data) < count:
if time.time() - start > timeout:
raise Exception('Timeout waiting for workload service')
time.sleep(interval)
interval *= 2
workloads = pclient.list_workload(namespaceId=nsid)
return workloads
def string_to_encoding(input):
m = hashlib.sha256()
m.update(bytes(input, 'utf-8'))
return base64.b32encode(m.digest())[:10].decode('utf-8')
| 4,472 | 25.467456 | 77 |
py
|
rancher
|
rancher-master/tests/integration/suite/test_cluster_defaults.py
|
import json
import pytest
from rancher import ApiError
from .common import random_str
from .conftest import wait_for
@pytest.mark.skip(reason="cluster-defaults disabled")
def test_generic_initial_defaults(admin_mc):
cclient = admin_mc.client
schema_defaults = {}
setting_defaults = {}
data = cclient.schema.types['cluster'].resourceFields
default = data["enableNetworkPolicy"]["default"]
for name in cclient.schema.types['cluster'].resourceFields.keys():
if name == "enableNetworkPolicy":
schema_defaults["enableNetworkPolicy"] = default
for name in cclient.schema.types['rancherKubernetesEngineConfig'] \
.resourceFields.keys():
if name == "ignoreDockerVersion":
schema_defaults["ignoreDockerVersion"] = cclient.schema. \
types["rancherKubernetesEngineConfig"]. \
resourceFields["ignoreDockerVersion"]. \
data_dict()["default"]
setting = cclient.list_setting(name="cluster-defaults")
data = json.loads(setting['data'][0]['default'])
setting_defaults["enableNetworkPolicy"] = data["enableNetworkPolicy"]
setting_defaults["ignoreDockerVersion"] = \
data["rancherKubernetesEngineConfig"]["ignoreDockerVersion"]
assert schema_defaults == setting_defaults
def test_generic_initial_conditions(admin_mc, remove_resource):
cluster = admin_mc.client.create_cluster(
name=random_str(), amazonElasticContainerServiceConfig={
"accessKey": "asdfsd"})
remove_resource(cluster)
assert len(cluster.conditions) == 3
assert cluster.conditions[0].type == 'Pending'
assert cluster.conditions[0].status == 'True'
assert cluster.conditions[1].type == 'Provisioned'
assert cluster.conditions[1].status == 'Unknown'
assert cluster.conditions[2].type == 'Waiting'
assert cluster.conditions[2].status == 'Unknown'
assert 'exportYaml' not in cluster.actions
def test_eks_cluster_immutable_subnets(admin_mc, remove_resource):
cluster = admin_mc.client.create_cluster(
name=random_str(), amazonElasticContainerServiceConfig={
"accessKey": "asdfsd",
"secretKey": "verySecretKey",
"subnets": [
"subnet-045bfaeca7d3f1cb3",
"subnet-02388a166136f98c4"
]})
remove_resource(cluster)
def cannot_modify_error():
with pytest.raises(ApiError) as e:
# try to edit cluster subnets
admin_mc.client.update_by_id_cluster(
id=cluster.id,
amazonElasticContainerServiceConfig={
"accessKey": "asdfsd",
"secretKey": "verySecretKey",
"subnets": [
"subnet-045bfaeca7d3f1cb3"
]})
if e.value.error.status == 404 or e.value.error.status == 500:
return False
print(e)
assert e.value.error.status == 422
assert e.value.error.message ==\
'cannot modify EKS subnets after creation'
return True
# lister used by cluster validator may not be up to date, may need to retry
wait_for(cannot_modify_error)
# tests updates still work
new = admin_mc.client.update_by_id_cluster(
id=cluster.id,
name=cluster.name,
description="update",
amazonElasticContainerServiceConfig={
# required field when updating KE clusters
"driverName": "amazonelasticcontainerservice",
"accessKey": "asdfsd",
"secretKey": "verySecretKey",
"subnets": [
"subnet-045bfaeca7d3f1cb3",
"subnet-02388a166136f98c4"
]})
assert new.id == cluster.id
assert not hasattr(cluster, "description")
assert hasattr(new, "description")
def test_rke_initial_conditions(admin_mc, remove_resource):
cluster = admin_mc.client.create_cluster(
name=random_str(), rancherKubernetesEngineConfig={
"accessKey": "asdfsd"})
remove_resource(cluster)
assert len(cluster.conditions) == 3
assert cluster.conditions[0].type == 'Pending'
assert cluster.conditions[0].status == 'True'
assert cluster.conditions[1].type == 'Provisioned'
assert cluster.conditions[1].status == 'Unknown'
assert cluster.conditions[2].type == 'Waiting'
assert cluster.conditions[2].status == 'Unknown'
assert 'exportYaml' in cluster.actions
def test_psp_enabled_set(admin_mc, remove_resource):
"""Asserts podSecurityPolicy field is used to populate pspEnabled in
cluster capabilities"""
admin_client = admin_mc.client
cluster = admin_client.create_cluster(
name=random_str(), rancherKubernetesEngineConfig={
"accessKey": "asdfsd",
"services": {
"kubeApi": {
"podSecurityPolicy": True,
}
}
})
remove_resource(cluster)
def psp_set_to_true():
updated_cluster = admin_client.by_id_cluster(id=cluster.id)
capabilities = updated_cluster.get("capabilities")
if capabilities is not None:
return capabilities.get("pspEnabled") is True
return None
wait_for(lambda: psp_set_to_true(), fail_handler=lambda: "failed waiting "
"for pspEnabled to be set")
def test_import_initial_conditions(admin_mc, remove_resource):
cluster = admin_mc.client.create_cluster(name=random_str())
remove_resource(cluster)
assert not cluster.conditions
def test_rke_k8s_deprecated_versions(admin_mc, remove_resource):
client = admin_mc.client
deprecated_versions_setting = client.by_id_setting(
"k8s-versions-deprecated")
client.update_by_id_setting(id=deprecated_versions_setting.id,
value="{\"v1.8.10-rancher1-1\":true}")
with pytest.raises(ApiError) as e:
cluster = client.create_cluster(
name=random_str(), rancherKubernetesEngineConfig={
"kubernetesVersion": "v1.8.10-rancher1-1"})
remove_resource(cluster)
assert e.value.error.status == 500
assert e.value.error.message == 'Requested kubernetesVersion ' \
'v1.8.10-rancher1-1 is deprecated'
client.update_by_id_setting(id=deprecated_versions_setting.id,
value="")
def test_save_as_template_action_rbac(admin_mc, remove_resource, user_factory):
cluster = admin_mc.client.create_cluster(name=random_str(),
rancherKubernetesEngineConfig={
"services": {
"type":
"rkeConfigServices",
"kubeApi": {
"alwaysPullImages":
"false",
"podSecurityPolicy":
"false",
"serviceNodePort\
Range":
"30000-32767",
"type":
"kubeAPIService"
}
}
})
remove_resource(cluster)
assert cluster.conditions[0].type == 'Pending'
assert cluster.conditions[0].status == 'True'
try:
admin_mc.client.action(obj=cluster, action_name="saveAsTemplate",
clusterTemplateName="template1",
clusterTemplateRevisionName="v1")
except ApiError as e:
assert e.error.status == 503
user = user_factory()
user_cluster = user.client.create_cluster(name=random_str())
remove_resource(user_cluster)
assert cluster.conditions[0].type == 'Pending'
assert cluster.conditions[0].status == 'True'
try:
user.client.action(obj=user_cluster, action_name="saveAsTemplate")
except AttributeError as e:
assert e is not None
| 8,500 | 37.817352 | 79 |
py
|
rancher
|
rancher-master/tests/integration/suite/test_app.py
|
import time
import pytest
from rancher import ApiError
from .test_catalog import wait_for_template_to_be_created
from .common import random_str
from .conftest import set_server_version, wait_for, wait_for_condition, \
wait_until, user_project_client, DEFAULT_CATALOG
def test_app_mysql(admin_pc, admin_mc):
client = admin_pc.client
name = random_str()
ns = admin_pc.cluster.client.create_namespace(name=random_str(),
projectId=admin_pc.
project.id)
wait_for_template_to_be_created(admin_mc.client, "library")
answers = {
"defaultImage": "true",
"image": "mysql",
"imageTag": "5.7.14",
"mysqlDatabase": "admin",
"mysqlPassword": "",
"mysqlUser": "admin",
"persistence.enabled": "false",
"persistence.size": "8Gi",
"persistence.storageClass": "",
"service.nodePort": "",
"service.port": "3306",
"service.type": "ClusterIP"
}
client.create_app(
name=name,
externalId="catalog://?catalog=library&template=mysql&version=1.3.1&"
"namespace=cattle-global-data",
targetNamespace=ns.name,
projectId=admin_pc.project.id,
answers=answers
)
wait_for_workload(client, ns.name, count=1)
def test_app_wordpress(admin_pc, admin_mc):
client = admin_pc.client
name = random_str()
ns = admin_pc.cluster.client.create_namespace(name=random_str(),
projectId=admin_pc.
project.id)
wait_for_template_to_be_created(admin_mc.client, "library")
answers = {
"defaultImage": "true",
"externalDatabase.database": "",
"externalDatabase.host": "",
"externalDatabase.password": "",
"externalDatabase.port": "3306",
"externalDatabase.user": "",
"image.repository": "bitnami/wordpress",
"image.tag": "5.2.3",
"ingress.enabled": "true",
"ingress.hosts[0].name": "xip.io",
"mariadb.enabled": "true",
"mariadb.image.repository": "bitnami/mariadb",
"mariadb.image.tag": "10.1.32",
"mariadb.mariadbDatabase": "wordpress",
"mariadb.mariadbPassword": "",
"mariadb.mariadbUser": "wordpress",
"mariadb.persistence.enabled": "false",
"mariadb.persistence.size": "8Gi",
"mariadb.persistence.storageClass": "",
"nodePorts.http": "",
"nodePorts.https": "",
"persistence.enabled": "false",
"persistence.size": "10Gi",
"persistence.storageClass": "",
"serviceType": "NodePort",
"wordpressEmail": "[email protected]",
"wordpressPassword": "",
"wordpressUsername": "user"
}
external_id = "catalog://?catalog=library&template=wordpress" \
"&version=7.3.8&namespace=cattle-global-data"
client.create_app(
name=name,
externalId=external_id,
targetNamespace=ns.name,
projectId=admin_pc.project.id,
answers=answers
)
wait_for_workload(client, ns.name, count=2)
@pytest.mark.skip(reason="istio disabled")
def test_app_istio(admin_cc, admin_pc, admin_mc):
client = admin_pc.client
name = "rancher-istio"
url = " https://github.com/guangbochen/system-charts.git"
external_id = "catalog://?catalog=system-library" \
"&template=rancher-istio&version=1.1.5"
ns = admin_pc.cluster.client.create_namespace(name="istio-system",
projectId=admin_pc.
project.id)
admin_mc.client.create_catalog(name="system-library",
branch="istio",
url=url,
)
wait_for_template_to_be_created(admin_mc.client, "system-library")
answers = {
"certmanager.enabled": "false",
"enableCRDs": "true",
"galley.enabled": "true",
"gateways.enabled": "false",
"gateways.istio-ingressgateway.type": "NodePort",
"grafana.enabled": "true",
"grafana.persistence.enabled": "false",
"istio_cni.enabled": "false",
"istiocoredns.enabled": "false",
"kiali.enabled": "true",
"mixer.enabled": "true",
"mixer.policy.enabled": "false",
"mixer.telemetry.resources.limits.cpu": "4800m",
"mixer.telemetry.resources.limits.memory": "4048Mi",
"mixer.telemetry.resources.requests.cpu": "1000m",
"mixer.telemetry.resources.requests.memory": "1024Mi",
"mtls.enabled": "false",
"nodeagent.enabled": "false",
"pilot.enabled": "true",
"pilot.resources.limits.cpu": "1000m",
"pilot.resources.limits.memory": "4096Mi",
"pilot.resources.requests.cpu": "500m",
"pilot.resources.requests.memory": "2048Mi",
"pilot.traceSampling": "1",
"prometheus.enabled": "true",
"prometheus.resources.limits.cpu": "1000m",
"prometheus.resources.limits.memory": "1000Mi",
"prometheus.resources.requests.cpu": "750m",
"prometheus.resources.requests.memory": "750Mi",
"prometheus.retention": "6h",
"security.enabled": "true",
"sidecarInjectorWebhook.enabled": "true",
"tracing.enabled": "true"
}
client.create_app(
name=name,
externalId=external_id,
targetNamespace=ns.name,
projectId=admin_pc.project.id,
answers=answers
)
wait_for_monitor_metric(admin_cc, admin_mc)
def test_prehook_chart(admin_pc, admin_mc):
client = admin_pc.client
name = random_str()
ns = admin_pc.cluster.client.create_namespace(name=random_str(),
projectId=admin_pc.
project.id)
url = "https://github.com/StrongMonkey/charts-1.git"
catalog = admin_mc.client.create_catalog(name=random_str(),
branch="test",
url=url,
)
wait_for_template_to_be_created(admin_mc.client, catalog.name)
external_id = "catalog://?catalog=" + \
catalog.name + "&template=busybox&version=0.0.2" \
"&namespace=cattle-global-data"
client.create_app(
name=name,
externalId=external_id,
targetNamespace=ns.name,
projectId=admin_pc.project.id,
)
# it will be only one workload(job), because the deployment has to
# wait for job to be finished, and it will never finish because we
# can't create real container
wait_for_workload(client, ns.name, count=1)
jobs = client.list_job(namespaceId=ns.id)
assert len(jobs) == 1
def test_app_namespace_annotation(admin_pc, admin_mc):
client = admin_pc.client
ns = admin_pc.cluster.client.create_namespace(name=random_str(),
projectId=admin_pc.
project.id)
wait_for_template_to_be_created(admin_mc.client, "library")
app1 = client.create_app(
name=random_str(),
externalId="catalog://?catalog=library&template=mysql&version=1.3.1"
"&namespace=cattle-global-data",
targetNamespace=ns.name,
projectId=admin_pc.project.id,
)
wait_for_workload(client, ns.name, count=1)
external_id = "catalog://?catalog=library&template=wordpress" \
"&version=7.3.8&namespace=cattle-global-data"
app2 = client.create_app(
name=random_str(),
externalId=external_id,
targetNamespace=ns.name,
projectId=admin_pc.project.id,
)
wait_for_workload(client, ns.name, count=3)
ns = admin_pc.cluster.client.reload(ns)
ns = wait_for_app_annotation(admin_pc, ns, app1.name)
ns = wait_for_app_annotation(admin_pc, ns, app2.name)
client.delete(app1)
wait_for_app_to_be_deleted(client, app1)
ns = wait_for_app_annotation(admin_pc, ns, app1.name, exists=False)
assert app1.name not in ns.annotations['cattle.io/appIds']
assert app2.name in ns.annotations['cattle.io/appIds']
client.delete(app2)
wait_for_app_to_be_deleted(client, app2)
ns = wait_for_app_annotation(admin_pc, ns, app2.name, exists=False)
assert app2.name not in ns.annotations.get('cattle.io/appIds', [])
def test_helm_timeout(admin_pc, admin_mc, remove_resource):
"""Test helm timeout flag. This test asserts timeout flag is properly being
passed to helm.
"""
client = admin_pc.client
ns = admin_pc.cluster.client.create_namespace(name="ns-" + random_str(),
projectId=admin_pc.
project.id)
remove_resource(ns)
wait_for_template_to_be_created(admin_mc.client, "library")
# timeout of one second is not sufficient for installing mysql and should
# result in failure
app1 = client.create_app(
name="app-" + random_str(),
externalId="catalog://?catalog=library&template=mysql&version=1.3.1&"
"namespace=cattle-global-data",
targetNamespace=ns.name,
projectId=admin_pc.project.id,
wait=True,
timeout=1,
)
remove_resource(app1)
assert app1.timeout == 1
assert app1.wait
wait_for_workload(client, ns.name, count=1)
def wait_for_transition_error(app):
def transition_error():
test_app = client.reload(app)
if test_app.transitioning != "error":
return False
return test_app
return wait_for(transition_error, timeout=15, fail_handler=lambda:
"expected transitioning to fail")
app1 = wait_for_transition_error(app1)
assert "timed out waiting for the condition" in app1.transitioningMessage
def wait_for_app_annotation(admin_pc, ns, app_name, exists=True, timeout=60):
start = time.time()
interval = 0.5
ns = admin_pc.cluster.client.reload(ns)
while (app_name in ns.annotations.get('cattle.io/appIds', [])) != exists:
if time.time() - start > timeout:
print(ns.annotations)
raise Exception('Timeout waiting for app annotation')
time.sleep(interval)
interval *= 2
ns = admin_pc.cluster.client.reload(ns)
return ns
def test_app_custom_values_file(admin_pc, admin_mc):
client = admin_pc.client
ns = admin_pc.cluster.client.create_namespace(name=random_str(),
projectId=admin_pc.
project.id)
wait_for_template_to_be_created(admin_mc.client, "library")
values_yaml = "replicaCount: 2\r\nimage:\r\n " \
"repository: registry\r\n tag: 2.7"
answers = {
"image.tag": "2.6"
}
app = client.create_app(
name=random_str(),
externalId="catalog://?catalog=library&template=docker-registry"
"&version=1.8.1&namespace=cattle-global-data",
targetNamespace=ns.name,
projectId=admin_pc.project.id,
valuesYaml=values_yaml,
answers=answers
)
workloads = wait_for_workload(client, ns.name, count=1)
workloads = wait_for_replicas(client, ns.name, count=2)
print(workloads)
assert workloads.data[0].deploymentStatus.unavailableReplicas == 2
assert workloads.data[0].containers[0].image == "registry:2.6"
client.delete(app)
wait_for_app_to_be_deleted(client, app)
@pytest.mark.nonparallel
def test_app_create_validation(admin_mc, admin_pc, custom_catalog,
remove_resource, restore_rancher_version):
"""Test create validation for apps. This test will set the rancher version
explicitly and attempt to create apps with rancher version requirements.
"""
# 2.3.1 uses 2.4.1-2.6.0
# 2.7.0 uses 2.5.0-2.7.0
client = admin_mc.client
c_name = random_str()
custom_catalog(name=c_name)
ns = admin_pc.cluster.client.create_namespace(name=random_str(),
projectId=admin_pc.
project.id)
remove_resource(ns)
cat_base = "catalog://?catalog="+c_name+"&template=chartmuseum&version="
app_data = {
'name': random_str(),
'externalId': cat_base+"2.7.0",
'targetNamespace': ns.name,
'projectId': admin_pc.project.id,
"answers": [{
"type": "answer",
"clusterId": None,
"projectId": None,
"values": {
"defaultImage": "true",
"image.repository": "chartmuseum/chartmuseum",
"image.tag": "v0.11.0",
"env.open.STORAGE": "local",
"gcp.secret.enabled": "false",
"gcp.secret.key": "credentials.json",
"persistence.enabled": "true",
"persistence.size": "10Gi",
"ingress.enabled": "true",
"ingress.hosts[0]": "xip.io",
"service.type": "NodePort",
"env.open.SHOW_ADVANCED": "false",
"env.open.DEPTH": "0",
"env.open.ALLOW_OVERWRITE": "false",
"env.open.AUTH_ANONYMOUS_GET": "false",
"env.open.DISABLE_METRICS": "true"
}
}]
}
set_server_version(client, "2.4.2-beta2")
# First try requires a min of 2.5.0 so an error should be returned
with pytest.raises(ApiError) as e:
app1 = admin_pc.client.create_app(app_data)
remove_resource(app1)
assert e.value.error.status == 422
assert e.value.error.message == 'rancher min version not met'
set_server_version(client, "2.7.1")
# Second try requires a max of 2.7.0 so an error should be returned
with pytest.raises(ApiError) as e:
app1 = admin_pc.client.create_app(app_data)
remove_resource(app1)
assert e.value.error.status == 422
assert e.value.error.message == 'rancher max version exceeded'
set_server_version(client, "2.5.1-rc4")
# Third try should work
app1 = admin_pc.client.create_app(app_data)
remove_resource(app1)
wait_for_workload(admin_pc.client, ns.name, count=1)
@pytest.mark.nonparallel
def test_app_update_validation(admin_mc, admin_pc, custom_catalog,
remove_resource, restore_rancher_version):
"""Test update validation for apps. This test will set the rancher version
explicitly and attempt to update apps with rancher version requirements.
"""
# 2.3.1 uses 2.4.1-2.6.0
# 2.7.0 uses 2.5.0-2.7.0
client = admin_mc.client
c_name = random_str()
custom_catalog(name=c_name)
ns = admin_pc.cluster.client.create_namespace(name=random_str(),
projectId=admin_pc.
project.id)
remove_resource(ns)
cat_base = "catalog://?catalog="+c_name+"&template=chartmuseum&version="
app_data = {
'name': random_str(),
'externalId': cat_base+"2.3.1",
'targetNamespace': ns.name,
'projectId': admin_pc.project.id,
"answers": [{
"type": "answer",
"clusterId": None,
"projectId": None,
"values": {
"defaultImage": "true",
"image.repository": "chartmuseum/chartmuseum",
"image.tag": "v0.9.0",
"env.open.STORAGE": "local",
"gcp.secret.enabled": "false",
"gcp.secret.key": "credentials.json",
"persistence.enabled": "true",
"persistence.size": "10Gi",
"ingress.enabled": "true",
"ingress.hosts[0]": "xip.io",
"service.type": "NodePort",
"env.open.SHOW_ADVANCED": "false",
"env.open.DEPTH": "0",
"env.open.ALLOW_OVERWRITE": "false",
"env.open.AUTH_ANONYMOUS_GET": "false",
"env.open.DISABLE_METRICS": "true"
}
}]
}
set_server_version(client, "2.4.2-rc3")
# Launch the app version 2.3.1 with rancher 2.4.2-rc3
app1 = admin_pc.client.create_app(app_data)
remove_resource(app1)
wait_for_workload(admin_pc.client, ns.name, count=1)
upgrade_dict = {
'obj': app1,
'action_name': 'upgrade',
'answers': app_data['answers'],
'externalId': cat_base+"2.7.0",
'forceUpgrade': False,
}
# Attempt to upgrade, app version 2.7.0 requires a min of 2.5.0 so this
# will error
with pytest.raises(ApiError) as e:
app1 = client.action(**upgrade_dict)
assert e.value.error.status == 422
assert e.value.error.message == 'rancher min version not met'
set_server_version(client, "2.7.1")
# # Second try requires a max of 2.7.0 so an error should be returned
with pytest.raises(ApiError) as e:
app1 = client.action(**upgrade_dict)
assert e.value.error.status == 422
assert e.value.error.message == 'rancher max version exceeded'
@pytest.mark.nonparallel
def test_app_rollback_validation(admin_mc, admin_pc, custom_catalog,
remove_resource, restore_rancher_version):
"""Test rollback validation for apps. This test will set the rancher version
explicitly and attempt to rollback apps with rancher version requirements.
"""
# 2.3.1 uses 2.4.1-2.6.0
# 2.7.0 uses 2.5.0-2.7.0
client = admin_mc.client
c_name = random_str()
custom_catalog(name=c_name)
ns = admin_pc.cluster.client.create_namespace(name=random_str(),
projectId=admin_pc.
project.id)
remove_resource(ns)
cat_base = "catalog://?catalog="+c_name+"&template=chartmuseum&version="
app_data = {
'name': random_str(),
'externalId': cat_base+"2.3.1",
'targetNamespace': ns.name,
'projectId': admin_pc.project.id,
"answers": [{
"type": "answer",
"clusterId": None,
"projectId": None,
"values": {
"defaultImage": "true",
"image.repository": "chartmuseum/chartmuseum",
"image.tag": "v0.9.0",
"env.open.STORAGE": "local",
"gcp.secret.enabled": "false",
"gcp.secret.key": "credentials.json",
"persistence.enabled": "true",
"persistence.size": "10Gi",
"ingress.enabled": "true",
"ingress.hosts[0]": "xip.io",
"service.type": "NodePort",
"env.open.SHOW_ADVANCED": "false",
"env.open.DEPTH": "0",
"env.open.ALLOW_OVERWRITE": "false",
"env.open.AUTH_ANONYMOUS_GET": "false",
"env.open.DISABLE_METRICS": "true"
}
}]
}
set_server_version(client, "2.5.0")
# Launch the app version 2.3.1 with rancher 2.4.2
app1 = admin_pc.client.create_app(app_data)
remove_resource(app1)
wait_for_workload(admin_pc.client, ns.name, count=1)
def _app_revision():
app = admin_pc.client.reload(app1)
return app.appRevisionId is not None
wait_for(_app_revision, fail_handler=lambda: 'app has no revision')
app1 = admin_pc.client.reload(app1)
assert app1.appRevisionId is not None, 'app has no revision'
original_rev = app1.appRevisionId
upgrade_dict = {
'obj': app1,
'action_name': 'upgrade',
'answers': app_data['answers'],
'externalId': cat_base+"2.7.0",
'forceUpgrade': False,
}
# Upgrade the app to get a rollback revision
client.action(**upgrade_dict)
def _app_revisions():
app = admin_pc.client.reload(app1)
if len(app.revision().data) > 1:
return app.appRevisionId != original_rev
return False
def _app_fail():
app = admin_pc.client.reload(app1)
return 'app did not upgrade: {}'.format(app)
wait_for(_app_revisions,
fail_handler=_app_fail,
timeout=90)
app1 = admin_pc.client.reload(app1)
assert app1.appRevisionId != original_rev, 'app did not upgrade'
rollback_dict = {
'obj': app1,
'action_name': 'rollback',
'revisionId': original_rev,
'forceUpgrade': False,
}
set_server_version(client, "2.6.1")
# Rollback requires a max of 2.6.0 so an error should be returned
with pytest.raises(ApiError) as e:
client.action(**rollback_dict)
assert e.value.error.status == 422
assert e.value.error.message == 'rancher max version exceeded'
set_server_version(client, "2.0.0-rc3")
# Second try requires a min of 2.4.1 so an error should be returned
with pytest.raises(ApiError) as e:
client.action(**rollback_dict)
msg = e.value.error
assert e.value.error.message == 'rancher min version not met', msg
assert e.value.error.status == 422
def test_app_has_helmversion(admin_pc, admin_mc, remove_resource):
"""Test that app is using specified helm version"""
app_client = admin_pc.client
catalog_client = admin_mc.client
catalog_name1 = random_str()
catalog_name2 = random_str()
app_name1 = random_str()
app_name2 = random_str()
catalog1 = catalog_client.create_catalog(name=catalog_name1,
branch="master",
url=DEFAULT_CATALOG,
)
remove_resource(catalog1)
catalog2 = catalog_client.create_catalog(name=catalog_name2,
branch="master",
url=DEFAULT_CATALOG,
helmVersion="helm_v3"
)
remove_resource(catalog2)
wait_for_template_to_be_created(catalog_client, catalog_name1)
wait_for_template_to_be_created(catalog_client, catalog_name2)
ns1 = admin_pc.cluster.client.create_namespace(name=random_str(),
projectId=admin_pc.
project.id)
remove_resource(ns1)
ns2 = admin_pc.cluster.client.create_namespace(name=random_str(),
projectId=admin_pc.
project.id)
remove_resource(ns2)
app1 = app_client.create_app(
name=app_name1,
externalId="catalog://?catalog="+catalog_name1+"&template=chartmuseum&"
"version=2.7.0&namespace=cattle-global-data",
targetNamespace=ns1.name,
projectId=admin_pc.project.id,
)
remove_resource(app1)
app2 = app_client.create_app(
name=app_name2,
externalId="catalog://?catalog="+catalog_name2+"&template=chartmuseum&"
"version=2.7.0&namespace=cattle-global-data",
targetNamespace=ns2.name,
projectId=admin_pc.project.id,
)
remove_resource(app2)
wait_for_workload(admin_pc.client, ns1.name, count=1)
wait_for_workload(admin_pc.client, ns2.name, count=1)
app1 = admin_pc.client.reload(app1)
app2 = admin_pc.client.reload(app2)
assert "helmVersion" not in app1
assert "helmVersion" in app2
assert app2.helmVersion == "helm_v3"
def test_app_upgrade_has_helmversion(admin_pc, admin_mc, remove_resource):
"""Test helm version exists on new chart versions when added to an
existing catalog and that the helm version carries through template,
templateVersion and app on upgrade"""
app_client = admin_pc.client
catalog_client = admin_mc.client
catalog_name = random_str()
app1_name = random_str()
app2_name = random_str()
helm_3 = 'helm_v3'
cat_base = "catalog://?catalog=" + catalog_name + \
"&template=rancher-v3-issue&version="
helm3_catalog = catalog_client.create_catalog(
name=catalog_name,
branch="helmversion-onupdate-1v",
url=DEFAULT_CATALOG,
helmVersion=helm_3
)
remove_resource(helm3_catalog)
wait_for_template_to_be_created(catalog_client, catalog_name)
ns = admin_pc.cluster.client.create_namespace(name=random_str(),
projectId=admin_pc.
project.id)
remove_resource(ns)
# check helm version at template level
templates = catalog_client.list_template(catalogId=helm3_catalog.id).data
assert templates[1].status.helmVersion == helm_3
# check helm version at templateVersion level
templateVersion = catalog_client.list_templateVersion(
name=catalog_name+"-rancher-v3-issue-0.1.0")
assert templateVersion.data[0].status.helmVersion == helm_3
# creating app with existing chart version in catalog
app1 = app_client.create_app(
name=app1_name,
externalId=cat_base+"0.1.0&namespace="+ns.name,
targetNamespace=ns.name,
projectId=admin_pc.project.id,
)
remove_resource(app1)
wait_for_workload(app_client, ns.name, count=1)
app1 = app_client.reload(app1)
# check that the correct helm version is on the app
assert "helmVersion" in app1
assert app1.helmVersion == helm_3
# changing branch on catalog to simulate adding a new chart version to the
# catalog
catalog_data = {
'name': catalog_name,
'branch': "helmversion-onupdate-2v",
'url': DEFAULT_CATALOG,
'helmVersion': helm_3
}
helm3_catalog = catalog_client.update(helm3_catalog, catalog_data)
def ensure_updated_catalog(catalog):
catalog = catalog_client.reload(catalog)
templates = catalog_client.list_template(catalogId=catalog.id).data
templatesString = ','.join([str(i) for i in templates])
if "0.1.1" in templatesString:
return catalog
return None
helm3_catalog = wait_for(
lambda: ensure_updated_catalog(helm3_catalog),
fail_handler=lambda:
"Timed out waiting for catalog to stop transitioning")
templates = catalog_client.list_template(catalogId=helm3_catalog.id).data
assert templates[1].status.helmVersion == helm_3
templateVersion = catalog_client.list_templateVersion(
name=catalog_name+"-rancher-v3-issue-0.1.1")
assert templateVersion.data[0].status.helmVersion == helm_3
project_client = user_project_client(admin_pc, admin_pc.project)
# update existing app with new version to ensure correct
# helm version is listed
app_data = {
'name': app1_name,
'externalId': cat_base+"0.1.1",
'targetNamespace': ns.name,
'projectId': admin_pc.project.id,
}
project_client.update(app1, app_data)
app1 = project_client.reload(app1)
assert "helmVersion" in app1
assert app1.helmVersion == helm_3
# create a new app with new version to ensure helm version is listed
app2 = app_client.create_app(
name=app2_name,
externalId=cat_base+"0.1.1&namespace="+ns.name,
targetNamespace=ns.name,
projectId=admin_pc.project.id,
)
remove_resource(app2)
wait_for_workload(admin_pc.client, ns.name, count=2)
app2 = app_client.reload(app2)
# check that the correct helm version is on the app
assert "helmVersion" in app2
assert app2.helmVersion == helm_3
def test_app_externalid_target_project_verification(admin_mc,
admin_pc,
user_factory,
remove_resource):
client = admin_mc.client
p1 = client.create_project(name=random_str(), clusterId='local')
remove_resource(p1)
wait_for_condition('InitialRolesPopulated', 'True', client, p1)
p1 = client.reload(p1)
# create a project scoped catalog in p1
project_name = str.lstrip(p1.id, "local:")
name = random_str()
url = "https://github.com/rancher/integration-test-charts.git"
client.create_project_catalog(name=name,
branch="master",
url=url,
projectId=p1.id,
)
wait_until(lambda: len(client.list_template(projectCatalogId=name)) > 0)
external_id = "catalog://?catalog=" + project_name + "/" + name + \
"&type=projectCatalog&template=chartmuseum" \
"&version=2.7.0"
ns = admin_pc.cluster.client.create_namespace(name=random_str(),
projectId=admin_pc.
project.id)
remove_resource(ns)
app_data = {
'name': random_str(),
'externalId': external_id,
'targetNamespace': ns.name,
'projectId': admin_pc.project.id,
}
try:
# using this catalog creating an app in another project should fail
admin_pc.client.create_app(app_data)
except ApiError as e:
assert e.error.status == 422
assert "Cannot use catalog from" in e.error.message
# create app in the p1 project, this should work
ns = admin_pc.cluster.client.create_namespace(name=random_str(),
projectId=p1.id)
remove_resource(ns)
app_name = random_str()
app_data = {
'name': app_name,
'externalId': external_id,
'targetNamespace': ns.name,
'projectId': p1.id,
"answers": [{
"values": {
"defaultImage": "true",
"image.repository": "chartmuseum/chartmuseum",
"image.tag": "v0.7.1",
"env.open.STORAGE": "local",
"gcp.secret.enabled": "false",
"gcp.secret.key": "credentials.json",
"persistence.enabled": "true",
"persistence.size": "10Gi",
"ingress.enabled": "true",
"ingress.hosts[0]": "xip.io",
"service.type": "NodePort",
"env.open.SHOW_ADVANCED": "false",
"env.open.DEPTH": "0",
"env.open.ALLOW_OVERWRITE": "false",
"env.open.AUTH_ANONYMOUS_GET": "false",
"env.open.DISABLE_METRICS": "true"
}
}]
}
p1_client = user_project_client(admin_pc, p1)
app1 = p1_client.create_app(app_data)
remove_resource(app1)
wait_for_workload(p1_client, ns.name, count=1)
app = p1_client.reload(app1)
# updating app without passing projectId should not throw any error
update_data = {
'name': app_name,
'externalId': external_id,
'targetNamespace': ns.name,
'type': app,
"answers": [{
"values": {
"defaultImage": "true",
"image.repository": "chartmuseum/chartmuseum",
"image.tag": "v0.7.1",
"env.open.STORAGE": "local",
"gcp.secret.enabled": "false",
"gcp.secret.key": "credentials.json",
"persistence.enabled": "true",
"persistence.size": "10Gi",
"ingress.enabled": "true",
"ingress.hosts[0]": "xip.io",
"service.type": "NodePort",
"env.open.SHOW_ADVANCED": "false",
"env.open.DEPTH": "1",
"env.open.ALLOW_OVERWRITE": "false",
"env.open.AUTH_ANONYMOUS_GET": "false",
"env.open.DISABLE_METRICS": "true"
}
}]
}
p1_client.update(app, update_data)
def test_local_app_can_deploy(admin_pc, admin_mc, remove_resource):
"""Test that an app without an externalId can be deployed
successfully to simulate a local app deployed through cli"""
app_client = admin_pc.client
app_name = random_str()
ns = admin_pc.cluster.client.create_namespace(name=random_str(),
projectId=admin_pc.
project.id)
remove_resource(ns)
# create app without the externalId value set
app = app_client.create_app(
name=app_name,
targetNamespace=ns.name,
projectId=admin_pc.project.id,
)
remove_resource(app)
wait_for(lambda: app_client.by_id_app(app.id) is not None,
fail_handler=lambda:
"app could not be found")
def wait_for_workload(client, ns, timeout=60, count=0):
start = time.time()
interval = 0.5
workloads = client.list_workload(namespaceId=ns)
while len(workloads.data) < count:
if time.time() - start > timeout:
print(workloads)
raise Exception('Timeout waiting for workload service')
time.sleep(interval)
interval *= 2
workloads = client.list_workload(namespaceId=ns)
return workloads
def wait_for_replicas(client, ns, timeout=60, count=0):
start = time.time()
interval = 0.5
workloads = client.list_workload(namespaceId=ns)
while workloads.data[0].deploymentStatus.replicas != count:
if time.time() - start > timeout:
print(workloads)
raise Exception('Timeout waiting for workload replicas')
time.sleep(interval)
interval *= 2
workloads = client.list_workload(namespaceId=ns)
return workloads
def wait_for_app_to_be_deleted(client, app, timeout=120):
start = time.time()
interval = 0.5
while True:
if time.time() - start > timeout:
raise AssertionError(
"Timed out waiting for apps to be deleted")
apps = client.list_app()
found = False
for a in apps:
if a.id == app.id:
found = True
break
if not found:
break
time.sleep(interval)
interval *= 2
def wait_for_monitor_metric(admin_cc, admin_mc, timeout=60):
client = admin_mc.client
start = time.time()
interval = 0.5
monitorMetrics = client.list_monitor_metric(namespaceId=admin_cc.
cluster.id)
while len(monitorMetrics.data) == 0:
if time.time() - start > timeout:
print(monitorMetrics)
raise Exception('Timeout waiting for monitorMetrics service')
time.sleep(interval)
interval *= 2
monitorMetrics = client.list_monitor_metric(namespaceId=admin_cc.
cluster.id)
found = False
for m in monitorMetrics:
if m.labels.component == "istio":
found = True
break
if not found:
raise AssertionError(
"not found istio expression")
| 35,472 | 36.144503 | 80 |
py
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.