repo
stringlengths 2
99
| file
stringlengths 13
225
| code
stringlengths 0
18.3M
| file_length
int64 0
18.3M
| avg_line_length
float64 0
1.36M
| max_line_length
int64 0
4.26M
| extension_type
stringclasses 1
value |
---|---|---|---|---|---|---|
pySDC | pySDC-master/pySDC/implementations/hooks/log_extrapolated_error_estimate.py | from pySDC.core.Hooks import hooks
class LogExtrapolationErrorEstimate(hooks):
"""
Store the extrapolated error estimate at the end of each step as "error_extrapolation_estimate".
"""
def post_step(self, step, level_number):
"""
Record extrapolated error estimate
Args:
step (pySDC.Step.step): the current step
level_number (int): the current level number
Returns:
None
"""
super().post_step(step, level_number)
# some abbreviations
L = step.levels[level_number]
self.add_to_stats(
process=step.status.slot,
time=L.time + L.dt,
level=L.level_index,
iter=step.status.iter,
sweep=L.status.sweep,
type='error_extrapolation_estimate',
value=L.status.get('error_extrapolation_estimate'),
)
| 908 | 25.735294 | 100 | py |
pySDC | pySDC-master/pySDC/implementations/transfer_classes/BaseTransfer_mass.py | from pySDC.core.BaseTransfer import base_transfer
from pySDC.core.Errors import UnlockError
class base_transfer_mass(base_transfer):
"""
Standard base_transfer class
Attributes:
logger: custom logger for sweeper-related logging
params(__Pars): parameter object containing the custom parameters passed by the user
fine (pySDC.Level.level): reference to the fine level
coarse (pySDC.Level.level): reference to the coarse level
"""
def restrict(self):
"""
Space-time restriction routine
The routine applies the spatial restriction operator to teh fine values on the fine nodes, then reevaluates f
on the coarse level. This is used for the first part of the FAS correction tau via integration. The second part
is the integral over the fine values, restricted to the coarse level. Finally, possible tau corrections on the
fine level are restricted as well.
"""
# get data for easier access
F = self.fine
G = self.coarse
PG = G.prob
PF = F.prob
SF = F.sweep
SG = G.sweep
# only if the level is unlocked at least by prediction
if not F.status.unlocked:
raise UnlockError('fine level is still locked, cannot use data from there')
# restrict fine values in space
tmp_u = []
for m in range(1, SF.coll.num_nodes + 1):
tmp_u.append(self.space_transfer.restrict(F.u[m]))
# restrict collocation values
G.u[0] = self.space_transfer.restrict(F.u[0])
for n in range(1, SG.coll.num_nodes + 1):
G.u[n] = self.Rcoll[n - 1, 0] * tmp_u[0]
for m in range(1, SF.coll.num_nodes):
G.u[n] += self.Rcoll[n - 1, m] * tmp_u[m]
# re-evaluate f on coarse level
G.f[0] = PG.eval_f(G.u[0], G.time)
for m in range(1, SG.coll.num_nodes + 1):
G.f[m] = PG.eval_f(G.u[m], G.time + G.dt * SG.coll.nodes[m - 1])
# build coarse level tau correction part
tauG = G.sweep.integrate()
for m in range(SG.coll.num_nodes):
tauG[m] = PG.apply_mass_matrix(G.u[m + 1]) - tauG[m]
# build fine level tau correction part
tauF = F.sweep.integrate()
for m in range(SF.coll.num_nodes):
tauF[m] = PF.apply_mass_matrix(F.u[m + 1]) - tauF[m]
# restrict fine level tau correction part in space
tmp_tau = []
for m in range(SF.coll.num_nodes):
tmp_tau.append(self.space_transfer.restrict(tauF[m]))
# restrict fine level tau correction part in collocation
tauFG = []
for n in range(1, SG.coll.num_nodes + 1):
tauFG.append(self.Rcoll[n - 1, 0] * tmp_tau[0])
for m in range(1, SF.coll.num_nodes):
tauFG[-1] += self.Rcoll[n - 1, m] * tmp_tau[m]
# build tau correction
for m in range(SG.coll.num_nodes):
G.tau[m] = tauG[m] - tauFG[m]
if F.tau[0] is not None:
# restrict possible tau correction from fine in space
tmp_tau = []
for m in range(SF.coll.num_nodes):
tmp_tau.append(self.space_transfer.restrict(F.tau[m]))
# restrict possible tau correction from fine in collocation
for n in range(SG.coll.num_nodes):
for m in range(SF.coll.num_nodes):
G.tau[n] += self.Rcoll[n, m] * tmp_tau[m]
else:
pass
# save u and rhs evaluations for interpolation
for m in range(1, SG.coll.num_nodes + 1):
G.uold[m] = PG.dtype_u(G.u[m])
G.fold[m] = PG.dtype_f(G.f[m])
# This is somewhat ugly, but we have to apply the mass matrix on u0 only on the finest level
if F.level_index == 0:
G.u[0] = self.space_transfer.restrict(PF.apply_mass_matrix(F.u[0]))
# works as a predictor
G.status.unlocked = True
return None
def prolong(self):
"""
Space-time prolongation routine
This routine applies the spatial prolongation routine to the difference between the computed and the restricted
values on the coarse level and then adds this difference to the fine values as coarse correction.
"""
# get data for easier access
F = self.fine
G = self.coarse
PF = F.prob
SF = F.sweep
SG = G.sweep
# only of the level is unlocked at least by prediction or restriction
if not G.status.unlocked:
raise UnlockError('coarse level is still locked, cannot use data from there')
# build coarse correction
# interpolate values in space first
tmp_u = []
for m in range(1, SG.coll.num_nodes + 1):
tmp_u.append(self.space_transfer.prolong(G.u[m] - G.uold[m]))
# interpolate values in collocation
# F.u[0] += tmp_u[0]
for n in range(1, SF.coll.num_nodes + 1):
for m in range(SG.coll.num_nodes):
F.u[n] += self.Pcoll[n - 1, m] * tmp_u[m]
# re-evaluate f on fine level
# F.f[0] = PF.eval_f(F.u[0], F.time)
for m in range(1, SF.coll.num_nodes + 1):
F.f[m] = PF.eval_f(F.u[m], F.time + F.dt * SF.coll.nodes[m - 1])
return None
def prolong_f(self):
"""
Space-time prolongation routine w.r.t. the rhs f
This routine applies the spatial prolongation routine to the difference between the computed and the restricted
values on the coarse level and then adds this difference to the fine values as coarse correction.
"""
# get data for easier access
F = self.fine
G = self.coarse
SF = F.sweep
SG = G.sweep
# only of the level is unlocked at least by prediction or restriction
if not G.status.unlocked:
raise UnlockError('coarse level is still locked, cannot use data from there')
# build coarse correction
# interpolate values in space first
tmp_u = []
tmp_f = []
for m in range(1, SG.coll.num_nodes + 1):
tmp_u.append(self.space_transfer.prolong(G.u[m] - G.uold[m]))
tmp_f.append(self.space_transfer.prolong(G.f[m] - G.fold[m]))
# interpolate values in collocation
for n in range(1, SF.coll.num_nodes + 1):
for m in range(SG.coll.num_nodes):
F.u[n] += self.Pcoll[n - 1, m] * tmp_u[m]
F.f[n] += self.Pcoll[n - 1, m] * tmp_f[m]
return None
| 6,619 | 34.212766 | 119 | py |
pySDC | pySDC-master/pySDC/implementations/transfer_classes/TransferPETScDMDA.py | from pySDC.core.Errors import TransferError
from pySDC.core.SpaceTransfer import space_transfer
from pySDC.implementations.datatype_classes.petsc_vec import petsc_vec, petsc_vec_imex, petsc_vec_comp2
class mesh_to_mesh_petsc_dmda(space_transfer):
"""
This implementation can restrict and prolong between PETSc DMDA grids
"""
def __init__(self, fine_prob, coarse_prob, params):
"""
Initialization routine
Args:
fine_prob: fine problem
coarse_prob: coarse problem
params: parameters for the transfer operators
"""
# invoke super initialization
super(mesh_to_mesh_petsc_dmda, self).__init__(fine_prob, coarse_prob, params)
# set interpolation type (no effect as far as I can tell)
# self.coarse_prob.init.setInterpolationType(PETSc.DMDA.InterpolationType.Q1)
# define interpolation (only accurate for constant functions)
self.interp, _ = self.coarse_prob.init.createInterpolation(self.fine_prob.init)
# define restriction as injection (tranpose of interpolation does not work)
self.inject = self.coarse_prob.init.createInjection(self.fine_prob.init)
def restrict(self, F):
"""
Restriction implementation
Args:
F: the fine level data
"""
if isinstance(F, petsc_vec):
u_coarse = self.coarse_prob.dtype_u(self.coarse_prob.init)
self.inject.mult(F, u_coarse)
elif isinstance(F, petsc_vec_imex):
u_coarse = self.coarse_prob.dtype_f(self.coarse_prob.init)
self.inject.mult(F.impl, u_coarse.impl)
self.inject.mult(F.expl, u_coarse.expl)
elif isinstance(F, petsc_vec_comp2):
u_coarse = self.coarse_prob.dtype_f(self.coarse_prob.init)
self.inject.mult(F.comp1, u_coarse.comp1)
self.inject.mult(F.comp2, u_coarse.comp2)
else:
raise TransferError('Unknown type of fine data, got %s' % type(F))
return u_coarse
def prolong(self, G):
"""
Prolongation implementation
Args:
G: the coarse level data
"""
if isinstance(G, petsc_vec):
u_fine = self.fine_prob.dtype_u(self.fine_prob.init)
self.interp.mult(G, u_fine)
elif isinstance(G, petsc_vec_imex):
u_fine = self.fine_prob.dtype_f(self.fine_prob.init)
self.interp.mult(G.impl, u_fine.impl)
self.interp.mult(G.expl, u_fine.expl)
elif isinstance(G, petsc_vec_comp2):
u_fine = self.fine_prob.dtype_f(self.fine_prob.init)
self.interp.mult(G.comp1, u_fine.comp1)
self.interp.mult(G.comp2, u_fine.comp2)
else:
raise TransferError('Unknown type of coarse data, got %s' % type(G))
return u_fine
| 2,871 | 36.789474 | 103 | py |
pySDC | pySDC-master/pySDC/implementations/transfer_classes/TransferMesh.py | import numpy as np
import scipy.sparse as sp
import pySDC.helpers.transfer_helper as th
from pySDC.core.Errors import TransferError
from pySDC.core.SpaceTransfer import space_transfer
from pySDC.implementations.datatype_classes.mesh import mesh, imex_mesh, comp2_mesh
class mesh_to_mesh(space_transfer):
"""
Custon base_transfer class, implements Transfer.py
This implementation can restrict and prolong between nd meshes with dirichlet-0 or periodic boundaries
via matrix-vector products
Attributes:
Rspace: spatial restriction matrix, dim. Nf x Nc
Pspace: spatial prolongation matrix, dim. Nc x Nf
"""
def __init__(self, fine_prob, coarse_prob, params):
"""
Initialization routine
Args:
fine_prob: fine problem
coarse_prob: coarse problem
params: parameters for the transfer operators
"""
# invoke super initialization
super(mesh_to_mesh, self).__init__(fine_prob, coarse_prob, params)
if self.params.rorder % 2 != 0:
raise TransferError('Need even order for restriction')
if self.params.iorder % 2 != 0:
raise TransferError('Need even order for interpolation')
if type(self.fine_prob.nvars) is tuple:
if type(self.coarse_prob.nvars) is not tuple:
raise TransferError('nvars parameter of coarse problem needs to be a tuple')
if not len(self.fine_prob.nvars) == len(self.coarse_prob.nvars):
raise TransferError('nvars parameter of fine and coarse level needs to have the same length')
elif type(self.fine_prob.nvars) is int:
if type(self.coarse_prob.nvars) is not int:
raise TransferError('nvars parameter of coarse problem needs to be an int')
else:
raise TransferError("unknow type of nvars for transfer, got %s" % self.fine_prob.nvars)
# we have a 1d problem
if type(self.fine_prob.nvars) is int:
# if number of variables is the same on both levels, Rspace and Pspace are identity
if self.coarse_prob.nvars == self.fine_prob.nvars:
self.Rspace = sp.eye(self.coarse_prob.nvars)
self.Pspace = sp.eye(self.fine_prob.nvars)
# assemble restriction as transpose of interpolation
else:
if not self.params.periodic:
fine_grid = np.array([(i + 1) * self.fine_prob.dx for i in range(self.fine_prob.nvars)])
coarse_grid = np.array([(i + 1) * self.coarse_prob.dx for i in range(self.coarse_prob.nvars)])
else:
fine_grid = np.array([i * self.fine_prob.dx for i in range(self.fine_prob.nvars)])
coarse_grid = np.array([i * self.coarse_prob.dx for i in range(self.coarse_prob.nvars)])
self.Pspace = th.interpolation_matrix_1d(
fine_grid,
coarse_grid,
k=self.params.iorder,
periodic=self.params.periodic,
equidist_nested=self.params.equidist_nested,
)
if self.params.rorder > 0:
restr_factor = 0.5
else:
restr_factor = 1.0
if self.params.iorder == self.params.rorder:
self.Rspace = restr_factor * self.Pspace.T
else:
self.Rspace = (
restr_factor
* th.interpolation_matrix_1d(
fine_grid,
coarse_grid,
k=self.params.rorder,
periodic=self.params.periodic,
equidist_nested=self.params.equidist_nested,
).T
)
# we have an n-d problem
else:
Rspace = []
Pspace = []
for i in range(len(self.fine_prob.nvars)):
# if number of variables is the same on both levels, Rspace and Pspace are identity
if self.coarse_prob.nvars == self.fine_prob.nvars:
Rspace.append(sp.eye(self.coarse_prob.nvars[i]))
Pspace.append(sp.eye(self.fine_prob.nvars[i]))
# assemble restriction as transpose of interpolation
else:
if not self.params.periodic:
fine_grid = np.array([(j + 1) * self.fine_prob.dx for j in range(self.fine_prob.nvars[i])])
coarse_grid = np.array(
[(j + 1) * self.coarse_prob.dx for j in range(self.coarse_prob.nvars[i])]
)
else:
fine_grid = np.array([j * self.fine_prob.dx for j in range(self.fine_prob.nvars[i])])
coarse_grid = np.array([j * self.coarse_prob.dx for j in range(self.coarse_prob.nvars[i])])
Pspace.append(
th.interpolation_matrix_1d(
fine_grid,
coarse_grid,
k=self.params.iorder,
periodic=self.params.periodic,
equidist_nested=self.params.equidist_nested,
)
)
if self.params.rorder > 0:
restr_factor = 0.5
else:
restr_factor = 1.0
if self.params.iorder == self.params.rorder:
Rspace.append(restr_factor * Pspace[-1].T)
else:
mat = th.interpolation_matrix_1d(
fine_grid,
coarse_grid,
k=self.params.rorder,
periodic=self.params.periodic,
equidist_nested=self.params.equidist_nested,
).T
Rspace.append(restr_factor * mat)
# kronecker 1-d operators for n-d
self.Pspace = Pspace[0]
for i in range(1, len(Pspace)):
self.Pspace = sp.kron(self.Pspace, Pspace[i], format='csc')
self.Rspace = Rspace[0]
for i in range(1, len(Rspace)):
self.Rspace = sp.kron(self.Rspace, Rspace[i], format='csc')
def restrict(self, F):
"""
Restriction implementation
Args:
F: the fine level data (easier to access than via the fine attribute)
"""
if isinstance(F, mesh):
G = self.coarse_prob.dtype_u(self.coarse_prob.init)
if hasattr(self.fine_prob, 'ncomp'):
for i in range(self.fine_prob.ncomp):
tmpF = F[..., i].flatten()
tmpG = self.Rspace.dot(tmpF)
G[..., i] = tmpG.reshape(self.coarse_prob.nvars)
else:
tmpF = F.flatten()
tmpG = self.Rspace.dot(tmpF)
G[:] = tmpG.reshape(self.coarse_prob.nvars)
elif isinstance(F, imex_mesh):
G = self.coarse_prob.dtype_f(self.coarse_prob.init)
if hasattr(self.fine_prob, 'ncomp'):
for i in range(self.fine_prob.ncomp):
tmpF = F.impl[..., i].flatten()
tmpG = self.Rspace.dot(tmpF)
G.impl[..., i] = tmpG.reshape(self.coarse_prob.nvars)
tmpF = F.expl[..., i].flatten()
tmpG = self.Rspace.dot(tmpF)
G.expl[..., i] = tmpG.reshape(self.coarse_prob.nvars)
else:
tmpF = F.impl.flatten()
tmpG = self.Rspace.dot(tmpF)
G.impl[:] = tmpG.reshape(self.coarse_prob.nvars)
tmpF = F.expl.flatten()
tmpG = self.Rspace.dot(tmpF)
G.expl[:] = tmpG.reshape(self.coarse_prob.nvars)
elif isinstance(F, comp2_mesh):
G = self.coarse_prob.dtype_f(self.coarse_prob.init)
if hasattr(self.fine_prob, 'ncomp'):
for i in range(self.fine_prob.ncomp):
tmpF = F.comp1[..., i].flatten()
tmpG = self.Rspace.dot(tmpF)
G.comp1[..., i] = tmpG.reshape(self.coarse_prob.nvars)
tmpF = F.comp2[..., i].flatten()
tmpG = self.Rspace.dot(tmpF)
G.comp2[..., i] = tmpG.reshape(self.coarse_prob.nvars)
else:
tmpF = F.comp1.flatten()
tmpG = self.Rspace.dot(tmpF)
G.comp1[:] = tmpG.reshape(self.coarse_prob.nvars)
tmpF = F.comp2.flatten()
tmpG = self.Rspace.dot(tmpF)
G.comp2[:] = tmpG.reshape(self.coarse_prob.nvars)
else:
raise TransferError('Wrong data type for restriction, got %s' % type(F))
return G
def prolong(self, G):
"""
Prolongation implementation
Args:
G: the coarse level data (easier to access than via the coarse attribute)
"""
if isinstance(G, mesh):
F = self.fine_prob.dtype_u(self.fine_prob.init)
if hasattr(self.fine_prob, 'ncomp'):
for i in range(self.fine_prob.ncomp):
tmpG = G[..., i].flatten()
tmpF = self.Pspace.dot(tmpG)
F[..., i] = tmpF.reshape(self.fine_prob.nvars)
else:
tmpG = G.flatten()
tmpF = self.Pspace.dot(tmpG)
F[:] = tmpF.reshape(self.fine_prob.nvars)
elif isinstance(G, imex_mesh):
F = self.fine_prob.dtype_f(self.fine_prob.init)
if hasattr(self.fine_prob, 'ncomp'):
for i in range(self.fine_prob.ncomp):
tmpG = G.impl[..., i].flatten()
tmpF = self.Pspace.dot(tmpG)
F.impl[..., i] = tmpF.reshape(self.fine_prob.nvars)
tmpG = G.expl[..., i].flatten()
tmpF = self.Rspace.dot(tmpG)
F.expl[..., i] = tmpF.reshape(self.fine_prob.nvars)
else:
tmpG = G.impl.flatten()
tmpF = self.Pspace.dot(tmpG)
F.impl[:] = tmpF.reshape(self.fine_prob.nvars)
tmpG = G.expl.flatten()
tmpF = self.Pspace.dot(tmpG)
F.expl[:] = tmpF.reshape(self.fine_prob.nvars)
elif isinstance(G, comp2_mesh):
F = self.fine_prob.dtype_f(self.fine_prob.init)
if hasattr(self.fine_prob, 'ncomp'):
for i in range(self.fine_prob.ncomp):
tmpG = G.comp1[..., i].flatten()
tmpF = self.Pspace.dot(tmpG)
F.comp1[..., i] = tmpF.reshape(self.fine_prob.nvars)
tmpG = G.comp2[..., i].flatten()
tmpF = self.Rspace.dot(tmpG)
F.comp2[..., i] = tmpF.reshape(self.fine_prob.nvars)
else:
tmpG = G.comp1.flatten()
tmpF = self.Pspace.dot(tmpG)
F.comp1[:] = tmpF.reshape(self.fine_prob.nvars)
tmpG = G.comp2.flatten()
tmpF = self.Pspace.dot(tmpG)
F.comp2[:] = tmpF.reshape(self.fine_prob.nvars)
else:
raise TransferError('Wrong data type for prolongation, got %s' % type(G))
return F
| 11,661 | 44.027027 | 115 | py |
pySDC | pySDC-master/pySDC/implementations/transfer_classes/TransferMesh_MPIFFT.py | from pySDC.core.Errors import TransferError
from pySDC.core.SpaceTransfer import space_transfer
from pySDC.implementations.datatype_classes.mesh import mesh, imex_mesh
from mpi4py_fft import PFFT, newDistArray
class fft_to_fft(space_transfer):
"""
Custon base_transfer class, implements Transfer.py
This implementation can restrict and prolong between PMESH datatypes meshes with FFT for periodic boundaries
"""
def __init__(self, fine_prob, coarse_prob, params):
"""
Initialization routine
Args:
fine_prob: fine problem
coarse_prob: coarse problem
params: parameters for the transfer operators
"""
# invoke super initialization
super(fft_to_fft, self).__init__(fine_prob, coarse_prob, params)
assert self.fine_prob.spectral == self.coarse_prob.spectral
self.spectral = self.fine_prob.spectral
Nf = list(self.fine_prob.fft.global_shape())
Nc = list(self.coarse_prob.fft.global_shape())
self.ratio = [int(nf / nc) for nf, nc in zip(Nf, Nc)]
axes = tuple(range(len(Nf)))
self.fft_pad = PFFT(
self.coarse_prob.comm,
Nc,
padding=self.ratio,
axes=axes,
dtype=self.coarse_prob.fft.dtype(False),
slab=True,
)
def restrict(self, F):
"""
Restriction implementation
Args:
F: the fine level data (easier to access than via the fine attribute)
"""
if isinstance(F, mesh):
if self.spectral:
G = self.coarse_prob.dtype_u(self.coarse_prob.init)
if hasattr(self.fine_prob, 'ncomp'):
for i in range(self.fine_prob.ncomp):
tmpF = newDistArray(self.fine_prob.fft, False)
tmpF = self.fine_prob.fft.backward(F[..., i], tmpF)
tmpG = tmpF[:: int(self.ratio[0]), :: int(self.ratio[1])]
G[..., i] = self.coarse_prob.fft.forward(tmpG, G[..., i])
else:
tmpF = self.fine_prob.fft.backward(F)
tmpG = tmpF[:: int(self.ratio[0]), :: int(self.ratio[1])]
G[:] = self.coarse_prob.fft.forward(tmpG, G)
else:
G = self.coarse_prob.dtype_u(self.coarse_prob.init)
G[:] = F[:: int(self.ratio[0]), :: int(self.ratio[1])]
else:
raise TransferError('Unknown data type, got %s' % type(F))
return G
def prolong(self, G):
"""
Prolongation implementation
Args:
G: the coarse level data (easier to access than via the coarse attribute)
"""
if isinstance(G, mesh):
if self.spectral:
F = self.fine_prob.dtype_u(self.fine_prob.init)
if hasattr(self.fine_prob, 'ncomp'):
for i in range(self.fine_prob.ncomp):
tmpF = self.fft_pad.backward(G[..., i])
F[..., i] = self.fine_prob.fft.forward(tmpF, F[..., i])
else:
tmpF = self.fft_pad.backward(G)
F[:] = self.fine_prob.fft.forward(tmpF, F)
else:
F = self.fine_prob.dtype_u(self.fine_prob.init)
if hasattr(self.fine_prob, 'ncomp'):
for i in range(self.fine_prob.ncomp):
G_hat = self.coarse_prob.fft.forward(G[..., i])
F[..., i] = self.fft_pad.backward(G_hat, F[..., i])
else:
G_hat = self.coarse_prob.fft.forward(G)
F[:] = self.fft_pad.backward(G_hat, F)
elif isinstance(G, imex_mesh):
if self.spectral:
F = self.fine_prob.dtype_f(self.fine_prob.init)
if hasattr(self.fine_prob, 'ncomp'):
for i in range(self.fine_prob.ncomp):
tmpF = self.fft_pad.backward(G.impl[..., i])
F.impl[..., i] = self.fine_prob.fft.forward(tmpF, F.impl[..., i])
tmpF = self.fft_pad.backward(G.expl[..., i])
F.expl[..., i] = self.fine_prob.fft.forward(tmpF, F.expl[..., i])
else:
tmpF = self.fft_pad.backward(G.impl)
F.impl[:] = self.fine_prob.fft.forward(tmpF, F.impl)
tmpF = self.fft_pad.backward(G.expl)
F.expl[:] = self.fine_prob.fft.forward(tmpF, F.expl)
else:
F = self.fine_prob.dtype_f(self.fine_prob.init)
if hasattr(self.fine_prob, 'ncomp'):
for i in range(self.fine_prob.ncomp):
G_hat = self.coarse_prob.fft.forward(G.impl[..., i])
F.impl[..., i] = self.fft_pad.backward(G_hat, F.impl[..., i])
G_hat = self.coarse_prob.fft.forward(G.expl[..., i])
F.expl[..., i] = self.fft_pad.backward(G_hat, F.expl[..., i])
else:
G_hat = self.coarse_prob.fft.forward(G.impl)
F.impl[:] = self.fft_pad.backward(G_hat, F.impl)
G_hat = self.coarse_prob.fft.forward(G.expl)
F.expl[:] = self.fft_pad.backward(G_hat, F.expl)
else:
raise TransferError('Unknown data type, got %s' % type(G))
return F
| 5,515 | 41.430769 | 112 | py |
pySDC | pySDC-master/pySDC/implementations/transfer_classes/TransferMesh_NoCoarse.py | from pySDC.core.Errors import TransferError
from pySDC.core.SpaceTransfer import space_transfer
from pySDC.implementations.datatype_classes.mesh import mesh, imex_mesh
class mesh_to_mesh(space_transfer):
"""
Custon base_transfer class, implements Transfer.py
This implementation can restrict and prolong between nd meshes with dirichlet-0 or periodic boundaries
via matrix-vector products
Attributes:
Rspace: spatial restriction matrix, dim. Nf x Nc
Pspace: spatial prolongation matrix, dim. Nc x Nf
"""
def __init__(self, fine_prob, coarse_prob, params):
"""
Initialization routine
Args:
fine_prob: fine problem
coarse_prob: coarse problem
params: parameters for the transfer operators
"""
# invoke super initialization
super(mesh_to_mesh, self).__init__(fine_prob, coarse_prob, params)
def restrict(self, F):
"""
Restriction implementation
Args:
F: the fine level data (easier to access than via the fine attribute)
"""
if isinstance(F, mesh):
G = mesh(F)
elif isinstance(F, imex_mesh):
G = imex_mesh(F)
else:
raise TransferError('Unknown data type, got %s' % type(F))
return G
def prolong(self, G):
"""
Prolongation implementation
Args:
G: the coarse level data (easier to access than via the coarse attribute)
"""
if isinstance(G, mesh):
F = mesh(G)
elif isinstance(G, imex_mesh):
F = imex_mesh(G)
else:
raise TransferError('Unknown data type, got %s' % type(G))
return F
| 1,746 | 28.610169 | 106 | py |
pySDC | pySDC-master/pySDC/implementations/transfer_classes/TransferMesh_FFT2D.py | import numpy as np
from pySDC.core.Errors import TransferError
from pySDC.core.SpaceTransfer import space_transfer
from pySDC.implementations.datatype_classes.mesh import mesh, imex_mesh
class mesh_to_mesh_fft2d(space_transfer):
"""
Custon base_transfer class, implements Transfer.py
This implementation can restrict and prolong between 2d meshes with FFT for periodic boundaries
Attributes:
Rspace: spatial restriction matrix, dim. Nf x Nc
Pspace: spatial prolongation matrix, dim. Nc x Nf
"""
def __init__(self, fine_prob, coarse_prob, params):
"""
Initialization routine
Args:
fine_prob: fine problem
coarse_prob: coarse problem
params: parameters for the transfer operators
"""
# invoke super initialization
super(mesh_to_mesh_fft2d, self).__init__(fine_prob, coarse_prob, params)
# TODO: cleanup and move to real-valued FFT
assert len(self.fine_prob.nvars) == 2
assert len(self.coarse_prob.nvars) == 2
assert self.fine_prob.nvars[0] == self.fine_prob.nvars[1]
assert self.coarse_prob.nvars[0] == self.coarse_prob.nvars[1]
self.ratio = int(self.fine_prob.nvars[0] / self.coarse_prob.nvars[0])
def restrict(self, F):
"""
Restriction implementation
Args:
F: the fine level data (easier to access than via the fine attribute)
"""
if isinstance(F, mesh):
G = mesh(self.coarse_prob.init, val=0.0)
G[:] = F[:: self.ratio, :: self.ratio]
elif isinstance(F, imex_mesh):
G = imex_mesh(self.coarse_prob.init, val=0.0)
G.impl[:] = F.impl[:: self.ratio, :: self.ratio]
G.expl[:] = F.expl[:: self.ratio, :: self.ratio]
else:
raise TransferError('Unknown data type, got %s' % type(F))
return G
def prolong(self, G):
"""
Prolongation implementation
Args:
G: the coarse level data (easier to access than via the coarse attribute)
"""
if isinstance(G, mesh):
F = mesh(self.fine_prob.init)
tmpG = np.fft.fft2(G)
tmpF = np.zeros(self.fine_prob.init[0], dtype=np.complex128)
halfG = int(self.coarse_prob.init[0][0] / 2)
tmpF[0:halfG, 0:halfG] = tmpG[0:halfG, 0:halfG]
tmpF[self.fine_prob.init[0][0] - halfG :, 0:halfG] = tmpG[halfG:, 0:halfG]
tmpF[0:halfG, self.fine_prob.init[0][0] - halfG :] = tmpG[0:halfG, halfG:]
tmpF[self.fine_prob.init[0][0] - halfG :, self.fine_prob.init[0][0] - halfG :] = tmpG[halfG:, halfG:]
F[:] = np.real(np.fft.ifft2(tmpF)) * self.ratio * 2
elif isinstance(G, imex_mesh):
F = imex_mesh(G)
tmpG_impl = np.fft.fft2(G.impl)
tmpF_impl = np.zeros(self.fine_prob.init, dtype=np.complex128)
halfG = int(self.coarse_prob.init[0][0] / 2)
tmpF_impl[0:halfG, 0:halfG] = tmpG_impl[0:halfG, 0:halfG]
tmpF_impl[self.fine_prob.init[0][0] - halfG :, 0:halfG] = tmpG_impl[halfG:, 0:halfG]
tmpF_impl[0:halfG, self.fine_prob.init[0][0] - halfG :] = tmpG_impl[0:halfG, halfG:]
tmpF_impl[self.fine_prob.init[0][0] - halfG :, self.fine_prob.init[0][0] - halfG :] = tmpG_impl[
halfG:, halfG:
]
F.impl[:] = np.real(np.fft.ifft2(tmpF_impl)) * self.ratio * 2
tmpG_expl = np.fft.fft2(G.expl) / (self.coarse_prob.init[0] * self.coarse_prob.init[1])
tmpF_expl = np.zeros(self.fine_prob.init[0], dtype=np.complex128)
halfG = int(self.coarse_prob.init[0][0] / 2)
tmpF_expl[0:halfG, 0:halfG] = tmpG_expl[0:halfG, 0:halfG]
tmpF_expl[self.fine_prob.init[0][0] - halfG :, 0:halfG] = tmpG_expl[halfG:, 0:halfG]
tmpF_expl[0:halfG, self.fine_prob.init[0][0] - halfG :] = tmpG_expl[0:halfG, halfG:]
tmpF_expl[self.fine_prob.init[0][0] - halfG :, self.fine_prob.init[0][0] - halfG :] = tmpG_expl[
halfG:, halfG:
]
F.expl[:] = np.real(np.fft.ifft2(tmpF_expl)) * self.ratio * 2
else:
raise TransferError('Unknown data type, got %s' % type(G))
return F
| 4,322 | 42.666667 | 113 | py |
pySDC | pySDC-master/pySDC/implementations/transfer_classes/__init__.py | 0 | 0 | 0 | py |
|
pySDC | pySDC-master/pySDC/implementations/transfer_classes/TransferFenicsMesh.py | import dolfin as df
from pySDC.core.Errors import TransferError
from pySDC.core.SpaceTransfer import space_transfer
from pySDC.implementations.datatype_classes.fenics_mesh import fenics_mesh, rhs_fenics_mesh
class mesh_to_mesh_fenics(space_transfer):
"""
This implementation can restrict and prolong between fenics meshes
"""
def __init__(self, fine_prob, coarse_prob, params):
"""
Initialization routine
Args:
fine_prob: fine problem
coarse_prob: coarse problem
params: parameters for the transfer operators
"""
# invoke super initialization
super(mesh_to_mesh_fenics, self).__init__(fine_prob, coarse_prob, params)
pass
def project(self, F):
"""
Restriction implementation via projection
Args:
F: the fine level data
"""
if isinstance(F, fenics_mesh):
u_coarse = fenics_mesh(df.project(F.values, self.coarse_prob.init))
elif isinstance(F, rhs_fenics_mesh):
u_coarse = rhs_fenics_mesh(self.coarse_prob.init)
u_coarse.impl.values = df.project(F.impl.values, self.coarse_prob.init)
u_coarse.expl.values = df.project(F.expl.values, self.coarse_prob.init)
else:
raise TransferError('Unknown type of fine data, got %s' % type(F))
return u_coarse
def restrict(self, F):
"""
Restriction implementation
Args:
F: the fine level data
"""
if isinstance(F, fenics_mesh):
u_coarse = fenics_mesh(df.interpolate(F.values, self.coarse_prob.init))
elif isinstance(F, rhs_fenics_mesh):
u_coarse = rhs_fenics_mesh(self.coarse_prob.init)
u_coarse.impl.values = df.interpolate(F.impl.values, self.coarse_prob.init)
u_coarse.expl.values = df.interpolate(F.expl.values, self.coarse_prob.init)
else:
raise TransferError('Unknown type of fine data, got %s' % type(F))
return u_coarse
def prolong(self, G):
"""
Prolongation implementation
Args:
G: the coarse level data
"""
if isinstance(G, fenics_mesh):
u_fine = fenics_mesh(df.interpolate(G.values, self.fine_prob.init))
elif isinstance(G, rhs_fenics_mesh):
u_fine = rhs_fenics_mesh(self.fine_prob.init)
u_fine.impl.values = df.interpolate(G.impl.values, self.fine_prob.init)
u_fine.expl.values = df.interpolate(G.expl.values, self.fine_prob.init)
else:
raise TransferError('Unknown type of coarse data, got %s' % type(G))
return u_fine
| 2,708 | 32.444444 | 91 | py |
pySDC | pySDC-master/pySDC/implementations/transfer_classes/TransferParticles_NoCoarse.py | from pySDC.core.Errors import TransferError
from pySDC.core.SpaceTransfer import space_transfer
from pySDC.implementations.datatype_classes.particles import particles, fields, acceleration
class particles_to_particles(space_transfer):
"""
Custon transfer class, implements SpaceTransfer.py
This implementation is just a dummy for particles with no direct functionality, i.e. the number of particles is not
reduced on the coarse problem
"""
def __init__(self, fine_prob, coarse_prob, params):
"""
Initialization routine
Args:
fine_prob: fine problem
coarse_prob: coarse problem
params: parameters for the transfer operators
"""
super(particles_to_particles, self).__init__(fine_prob, coarse_prob, params)
pass
def restrict(self, F):
"""
Dummy restriction routine
Args:
F: the fine level data
"""
if isinstance(F, particles):
G = particles(F)
elif isinstance(F, fields):
G = fields(F)
elif isinstance(F, acceleration):
G = acceleration(F)
else:
raise TransferError("Unknown type of fine data, got %s" % type(F))
return G
def prolong(self, G):
"""
Dummy prolongation routine
Args:
G: the coarse level data
"""
if isinstance(G, particles):
F = particles(G)
elif isinstance(G, fields):
F = fields(G)
elif isinstance(G, acceleration):
F = acceleration(G)
else:
raise TransferError("Unknown type of coarse data, got %s" % type(G))
return F
| 1,724 | 27.278689 | 119 | py |
pySDC | pySDC-master/pySDC/implementations/transfer_classes/TransferMesh_FFT.py | import numpy as np
from pySDC.core.Errors import TransferError
from pySDC.core.SpaceTransfer import space_transfer
from pySDC.implementations.datatype_classes.mesh import mesh, imex_mesh
class mesh_to_mesh_fft(space_transfer):
"""
Custom base_transfer class, implements Transfer.py
This implementation can restrict and prolong between 1d meshes with FFT for periodic boundaries
Attributes:
irfft_object_fine: planned FFT for backward transformation, real-valued output
rfft_object_coarse: planned real-valued FFT for forward transformation
"""
def __init__(self, fine_prob, coarse_prob, params):
"""
Initialization routine
Args:
fine_prob: fine problem
coarse_prob: coarse problem
params: parameters for the transfer operators
"""
# invoke super initialization
super(mesh_to_mesh_fft, self).__init__(fine_prob, coarse_prob, params)
self.ratio = int(self.fine_prob.params.nvars / self.coarse_prob.params.nvars)
def restrict(self, F):
"""
Restriction implementation
Args:
F: the fine level data (easier to access than via the fine attribute)
"""
if isinstance(F, mesh):
G = mesh(self.coarse_prob.init, val=0.0)
G[:] = F[:: self.ratio]
elif isinstance(F, imex_mesh):
G = imex_mesh(self.coarse_prob.init, val=0.0)
G.impl[:] = F.impl[:: self.ratio]
G.expl[:] = F.expl[:: self.ratio]
else:
raise TransferError('Unknown data type, got %s' % type(F))
return G
def prolong(self, G):
"""
Prolongation implementation
Args:
G: the coarse level data (easier to access than via the coarse attribute)
"""
if isinstance(G, mesh):
F = mesh(self.fine_prob.init, val=0.0)
tmpG = np.fft.rfft(G)
tmpF = np.zeros(self.fine_prob.init[0] // 2 + 1, dtype=np.complex128)
halfG = int(self.coarse_prob.init[0] / 2)
tmpF[0:halfG] = tmpG[0:halfG]
tmpF[-1] = tmpG[-1]
F[:] = np.fft.irfft(tmpF) * self.ratio
elif isinstance(G, imex_mesh):
F = imex_mesh(G)
tmpG_impl = np.fft.rfft(G.impl)
tmpF_impl = np.zeros(self.fine_prob.init[0] // 2 + 1, dtype=np.complex128)
halfG = int(self.coarse_prob.init[0] / 2)
tmpF_impl[0:halfG] = tmpG_impl[0:halfG]
tmpF_impl[-1] = tmpG_impl[-1]
F.impl[:] = np.fft.irfft(tmpF_impl) * self.ratio
tmpG_expl = np.fft.rfft(G.expl)
tmpF_expl = np.zeros(self.fine_prob.init[0] // 2 + 1, dtype=np.complex128)
halfG = int(self.coarse_prob.init[0] / 2)
tmpF_expl[0:halfG] = tmpG_expl[0:halfG]
tmpF_expl[-1] = tmpG_expl[-1]
F.expl[:] = np.fft.irfft(tmpF_expl) * self.ratio
else:
raise TransferError('Unknown data type, got %s' % type(G))
return F
| 3,062 | 35.903614 | 99 | py |
pySDC | pySDC-master/docs/convert_markdown.py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Tue Jan 17 19:47:56 2023
@author: telu
"""
import os
import glob
import json
import m2r2
import shutil
import numpy as np
mdFiles = ['README.md', 'CONTRIBUTING.md', 'CHANGELOG.md', 'CODE_OF_CONDUCT.md', 'docs/contrib']
docSources = 'docs/source'
# Move already images in the future build directory
os.makedirs('docs/build/html/_images/', exist_ok=True)
shutil.copytree('docs/img', 'docs/build/html/_images/docs/img', dirs_exist_ok=True)
counter = np.array(0)
with open('docs/emojis.json') as f:
emojis = set(json.load(f).keys())
def wrappEmojis(rst):
for emoji in emojis:
rst = rst.replace(emoji, f'|{emoji}|')
return rst
def addSectionRefs(rst, baseName):
sections = {}
lines = rst.splitlines()
# Search for sections in rst file
for i in range(len(lines) - 2):
conds = [
len(lines[i + 1]) and lines[i + 1][0] in ['=', '-', '^', '"'],
lines[i + 2] == lines[i - 1] == '',
len(lines[i]) == len(lines[i + 1]),
]
if all(conds):
sections[i] = lines[i]
# Add unique references before each section
for i, title in sections.items():
ref = '-'.join([elt for elt in title.lower().split(' ') if elt != ''])
for char in ['#', "'", '^', '°', '!']:
ref = ref.replace(char, '')
ref = f'{baseName}/{ref}'
lines[i] = f'.. _{ref}:\n\n' + lines[i]
# Returns all concatenated lines
return '\n'.join(lines)
def completeRefLinks(rst, baseName):
i = 0
while i != -1:
i = rst.find(':ref:`', i)
if i != -1:
iLink = rst.find('<', i)
rst = rst[: iLink + 1] + f'{baseName}/' + rst[iLink + 1 :]
i += 6
return rst
def addOrphanTag(rst):
return '\n:orphan:\n' + rst
def setImgPath(rst):
i = 0
while i != -1:
i = rst.find('<img src=".', i)
if i != -1:
rst = rst[: i + 11] + '/_images' + rst[i + 11 :]
i += 16
return rst
def linkReadmeToIndex(rst):
return rst.replace('<./README>', '<./index>')
def convert(md, orphan=False, sectionRefs=True):
baseName = os.path.splitext(md)[0]
rst = m2r2.parse_from_file(md, parse_relative_links=True)
rst = wrappEmojis(rst)
if sectionRefs:
rst = addSectionRefs(rst, baseName)
rst = completeRefLinks(rst, baseName)
if orphan:
rst = addOrphanTag(rst)
rst = setImgPath(rst)
rst = linkReadmeToIndex(rst)
with open(f'{docSources}/{baseName}.rst', 'w') as f:
f.write(rst)
print(f'Converted {md} to {docSources}/{baseName}.rst')
for md in mdFiles:
if os.path.isfile(md):
isNotMain = md != 'README.md'
convert(md, orphan=isNotMain, sectionRefs=isNotMain)
elif os.path.isdir(md):
os.makedirs(f'{docSources}/{md}', exist_ok=True)
for f in glob.glob(f'{md}/*.md'):
convert(f, orphan=True)
else:
raise ValueError('{md} is not a md file or a folder')
| 3,045 | 25.955752 | 96 | py |
pySDC | pySDC-master/docs/source/conf.py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# pySDC documentation build configuration file, created by
# sphinx-quickstart on Tue Oct 11 15:58:40 2016.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
import os
import sys
sys.path.insert(0, os.path.abspath('../'))
sys.path.insert(0, os.path.abspath('../../'))
sys.path.insert(0, os.path.abspath('../../../'))
sys.path.insert(0, os.path.abspath('../../pySDC'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.napoleon',
'sphinx.ext.coverage',
'sphinx.ext.mathjax',
'sphinx.ext.viewcode',
'sphinx.ext.githubpages',
'sphinxemoji.sphinxemoji',
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
#
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The encoding of source files.
#
# source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = 'pySDC'
copyright = '2023, Robert Speck'
author = 'Robert Speck, Thibaut Lunet, Thomas Baumann, Lisa Wimmer'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '5.2'
# The full version, including alpha/beta/rc tags.
release = '5.2.0'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
# language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#
# today = ''
#
# Else, today_fmt is used as the format for a strftime call.
#
# today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This patterns also effect to html_static_path and html_extra_path
exclude_patterns = []
# The reST default role (used for this markup: `text`) to use for all
# documents.
#
# default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#
# add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#
add_module_names = False
toc_object_entries_show_parents = 'hide'
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#
# show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
# modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
# keep_warnings = False
suppress_warnings = ['image.nonlocal_uri']
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
# html_theme = 'alabaster'
html_theme = 'classic'
# Activate the bootstrap theme.
# html_theme = 'bootstrap'
# html_theme_path = sphinx_bootstrap_theme.get_html_theme_path()
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#
# html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
# html_theme_path = []
# The name for this set of Sphinx documents.
# "<project> v<release> documentation" by default.
#
# html_title = 'pySDC v2'
# A shorter title for the navigation bar. Default is the same as html_title.
#
# html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#
# html_logo = None
# The name of an image file (relative to this directory) to use as a favicon of
# the docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#
# html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
html_css_files = [
'custom.css',
]
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#
# html_extra_path = []
# If not None, a 'Last updated on:' timestamp is inserted at every page
# bottom, using the given strftime format.
# The empty string is equivalent to '%b %d, %Y'.
#
# html_last_updated_fmt = None
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#
# html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#
# html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#
# html_additional_pages = {}
# If false, no module index is generated.
#
# html_domain_indices = True
# If false, no index is generated.
#
# html_use_index = True
# If true, the index is split into individual pages for each letter.
#
# html_split_index = False
# If true, links to the reST sources are added to the pages.
#
# html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#
# html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#
# html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#
# html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
# html_file_suffix = None
# Language to be used for generating the HTML full-text search index.
# Sphinx supports the following languages:
# 'da', 'de', 'en', 'es', 'fi', 'fr', 'h', 'it', 'ja'
# 'nl', 'no', 'pt', 'ro', 'r', 'sv', 'tr', 'zh'
#
# html_search_language = 'en'
# A dictionary with options for the search language support, empty by default.
# 'ja' uses this config value.
# 'zh' user can custom change `jieba` dictionary path.
#
# html_search_options = {'type': 'default'}
# The name of a javascript file (relative to the configuration directory) that
# implements a search results scorer. If empty, the default will be used.
#
# html_search_scorer = 'scorer.js'
# Output file base name for HTML help builder.
htmlhelp_basename = 'pySDCdoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#
# 'preamble': '',
# Latex figure (float) alignment
#
# 'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'pySDC.tex', 'pySDC Documentation', 'Robert Speck', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#
# latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#
# latex_use_parts = False
# If true, show page references after internal links.
#
# latex_show_pagerefs = False
# If true, show URL addresses after external links.
#
# latex_show_urls = False
# Documents to append as an appendix to all manuals.
#
# latex_appendices = []
# It false, will not define \strong, \code, itleref, \crossref ... but only
# \sphinxstrong, ..., \sphinxtitleref, ... To help avoid clash with user added
# packages.
#
# latex_keep_old_macro_names = True
# If false, no module index is generated.
#
# latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [(master_doc, 'pysdc', 'pySDC Documentation', [author], 1)]
# If true, show URL addresses after external links.
#
# man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'pySDC', 'pySDC Documentation', author, 'pySDC', 'One line description of project.', 'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#
# texinfo_appendices = []
# If false, no module index is generated.
#
# texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#
# texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#
# texinfo_no_detailmenu = False
autodoc_mock_imports = ['dolfin', 'mpi4py', 'petsc4py', 'mpi4py_fft', 'cupy']
| 10,415 | 28.258427 | 119 | py |
SGR | SGR-main/sgr_main.py | import os
from arg_parser import parse_args
from sgr.sgr import SGR
def main():
params = parse_args()
local_dir = os.path.dirname(__file__)
config_path = os.path.join(local_dir, params.neat_config)
pop = SGR(
config_path,
params.robot_size,
params.spec_genotype_weight,
params.spec_phenotype_weight,
params.pop_size,
params.substrate_type,
params.save_to
)
pop.run(
params.env,
params.steps,
params.gens,
params.cpu,
params.max_stag,
params.save_gen_interval
)
if __name__ == "__main__":
main() | 634 | 20.166667 | 61 | py |
SGR | SGR-main/multiple_env_hyperneat.py |
import os
import numpy as np
import sys
from typing import Dict
from pathos.multiprocessing import ProcessPool
from evogym import get_full_connectivity
import evogym.envs
from sgr.custom_reporter import CustomReporter, remove_reporters
from arg_parser import parse_args
from sgr.evogym_sim import get_obs_size
from sgr.generate_robot import eval_robot_constraint, N_TYPES
import os
from sgr.sgr import SGR
from dynamic_env.env_config import EnvConfig
import dill
N_ENVIRONMENTS = 6
def create_child(parent, rng, height_mutation_chance):
seed = rng.integers(100)
child = parent.create_child(seed)
child.mutate_barrier_h(height_mutation_chance)
return child
def main():
params = parse_args()
local_dir = os.path.dirname(__file__)
config_path = os.path.join(local_dir, params.neat_config)
pop = SGR(
config_path,
params.robot_size,
params.spec_genotype_weight,
params.spec_phenotype_weight,
params.pop_size,
params.substrate_type,
# params.save_to,
reporters=True
)
seed = np.random.SeedSequence()
rng = np.random.default_rng(seed)
base_env = EnvConfig(seed)
child_1 = create_child(base_env, rng, params.height_mutation_chance)
child_2 = create_child(base_env, rng, params.height_mutation_chance)
child_3 = create_child(child_1, rng, params.height_mutation_chance)
child_4 = create_child(child_2, rng, params.height_mutation_chance)
child_5 = create_child(child_3, rng, params.height_mutation_chance)
env_bag = [base_env, child_1, child_2, child_3, child_4, child_5]
# for _ in range(1, N_ENVIRONMENTS):
# parent_env = env_bag[rng.integers(0, len(env_bag))]
# seed = rng.integers(100)
# child = parent_env.create_child(seed)
# child.mutate_barrier_h(params.height_mutation_chance)
# print(parent_env.id, child.id, child.heights_list)
# env_bag.append(child)
for gen in range(1, params.gens//params.p_transfer_gens):
env_order: list[EnvConfig] = rng.choice(env_bag, len(env_bag), replace=False)
print("##################### Starting gen ", gen, "#####################")
print("Curriculum: ", [env.id for env in env_order])
for env in env_order:
print("\nTraining on env: ", env.id)
pop.run(
env_name="dynamic",
n_steps=params.steps,
n_gens=params.p_transfer_gens,
cpus=params.cpu,
max_stagnation=params.max_stag,
save_gen_interval=params.save_gen_interval,
print_results=False,
dynamic_env_config=env,
)
if gen % params.save_gen_interval == 0:
dill.dump(pop, open(f"{params.save_to}_pop_gen_{gen}.pkl", mode='wb'))
if __name__ == "__main__":
main() | 2,869 | 33.578313 | 85 | py |
SGR | SGR-main/poet_test.py | from poet.poet import POET
from arg_parser import parse_args
from sgr.sgr import SGR
import os
import numpy as np
def main():
params = parse_args()
local_dir = os.path.dirname(__file__)
config_path = os.path.join(local_dir, params.neat_config)
seed = np.random.SeedSequence()
poet_alg = POET(
seed,
params,
config_path
)
poet_alg.run(params.gens)
if __name__ == "__main__":
main() | 451 | 17.833333 | 61 | py |
SGR | SGR-main/arg_parser.py | import argparse
import json
import os
def default_values():
default = {
"gens": 250,
"robot_size": 5,
"steps": 400,
"env": "dynamic", # env_names = ["CaveCrawler-v0", "UpStepper-v0", "ObstacleTraverser-v0"]
"n_threads": 4,
"save_to": "",
"goal_fit": 10,
"pop_size": 32,
"max_stag": 10000,
"neat_config": "neat_configs/hyperNEAT.cfg",
"save_gen_interval": 20,
"spec_genotype_weight": .8,
"spec_phenotype_weight": 5,
"substrate_type": "cppn",
# used for POET, not required
"height_mutation_chance": 0.35,
"max_height_mutation": 1,
"obs_prob_mutation_power": 2,
"reproduction_criterion": 1,
"difficulty_criterion_low": .5,
"difficulty_criterion_high": 8,
"num_create_environments": 10,
"num_children_add": 2,
"max_pair_population_size": 20,
"n_nearest_neighbors": 5,
"p_transfer_gens": 1,
"create_frequency": 49,
"p_transfer_frequency": 10,
"d_transfer_frequency": 27,
}
return default
def create_parser(default_args):
parser = argparse.ArgumentParser()
parser.add_argument("-c", "--config", nargs="?", default="", help="", type=str)
parser.add_argument("-g", "--gens", nargs="?", default=default_args["gens"], help="", type=int)
parser.add_argument("-r", "--robot_size", nargs="?", default=default_args["robot_size"], help="", type=int)
parser.add_argument("-s", "--steps", nargs="?", default=default_args["steps"], help="", type=int)
parser.add_argument("-t", "--cpu", nargs="?", default=default_args["n_threads"], help="", type=int)
parser.add_argument("-e", "--env", nargs="?", default=default_args["env"], help="", type=str)
parser.add_argument("--save_to", nargs="?", default=default_args["save_to"], help="", type=str)
parser.add_argument("--goal_fit", nargs="?", default=default_args["goal_fit"], help="", type=float)
parser.add_argument("--pop", nargs="?", default=default_args["pop_size"], help="", type=int)
parser.add_argument("--max_stag", nargs="?", default=default_args["max_stag"], help="", type=int)
parser.add_argument("--neat_config", nargs="?", default=default_args["neat_config"], help="", type=str)
parser.add_argument("--save_gen_interval", nargs="?", default=default_args["save_gen_interval"], help="", type=int)
parser.add_argument("--spec_genotype_weight", nargs="?", default=default_args["spec_genotype_weight"], help="", type=float)
parser.add_argument("--spec_phenotype_weight", nargs="?", default=default_args["spec_phenotype_weight"], help="", type=float)
parser.add_argument("--substrate", nargs="?", default=default_args["substrate_type"], help="", type=str)
parser.add_argument("--height_mutation_chance", nargs="?", default=default_args["height_mutation_chance"], help="", type=float)
parser.add_argument("--max_height_mutation", nargs="?", default=default_args["max_height_mutation"], help="", type=int)
parser.add_argument("--obs_prob_mutation_power", nargs="?", default=default_args["obs_prob_mutation_power"], help="", type=float)
parser.add_argument("--create_frequency", nargs="?", default=default_args["create_frequency"], help="", type=int)
parser.add_argument("--reproduction_criterion", nargs="?", default=default_args["reproduction_criterion"], help="", type=float)
parser.add_argument("--difficulty_criterion_low", nargs="?", default=default_args["difficulty_criterion_low"], help="", type=float)
parser.add_argument("--difficulty_criterion_high", nargs="?", default=default_args["difficulty_criterion_high"], help="", type=float)
parser.add_argument("--num_create_environments", nargs="?", default=default_args["num_create_environments"], help="", type=int)
parser.add_argument("--num_children_add", nargs="?", default=default_args["num_children_add"], help="", type=int)
parser.add_argument("--max_pair_population_size", nargs="?", default=default_args["max_pair_population_size"], help="", type=int)
parser.add_argument("--n_nearest_neighbors", nargs="?", default=default_args["n_nearest_neighbors"], help="", type=int)
parser.add_argument("--p_transfer_gens", nargs="?", default=default_args["p_transfer_gens"], help="", type=int)
parser.add_argument("--p_transfer_frequency", nargs="?", default=default_args["p_transfer_frequency"], help="", type=int)
parser.add_argument("--d_transfer_frequency", nargs="?", default=default_args["d_transfer_frequency"], help="", type=int)
return parser
def parse_args():
args_dict = {}
default_args = default_values()
# Parsing just to change the default values in the case of a config file exists
parser = create_parser(default_args)
command_line_args = parser.parse_args()
if command_line_args.config != "":
local_dir = os.path.dirname(__file__)
path = os.path.join(local_dir, command_line_args.config)
with open(path, 'r', encoding='utf-8') as f:
file_args = json.load(f)
for k, v in file_args.items():
print(k, v)
default_args[k] = v
# "real" parser to get the values from the command line that have priority over the
# config file
parser = create_parser(default_args)
command_line_args = parser.parse_args()
args_dict["gens"] = command_line_args.gens
args_dict["robot_size"] = command_line_args.robot_size
args_dict["steps"] = command_line_args.steps
args_dict["env"] = command_line_args.env
args_dict["cpu"] = command_line_args.cpu
args_dict["save_to"] = command_line_args.save_to
args_dict["goal_fit"] = command_line_args.goal_fit
args_dict["pop_size"] = command_line_args.pop
args_dict["max_stag"] = command_line_args.max_stag
args_dict["neat_config"] = command_line_args.neat_config
args_dict["save_gen_interval"] = command_line_args.save_gen_interval
args_dict["spec_genotype_weight"] = command_line_args.spec_genotype_weight
args_dict["spec_phenotype_weight"] = command_line_args.spec_phenotype_weight
args_dict["substrate_type"] = command_line_args.substrate
args_dict["height_mutation_chance"] = command_line_args.height_mutation_chance
args_dict["max_height_mutation"] = command_line_args.max_height_mutation
args_dict["obs_prob_mutation_power"] = command_line_args.obs_prob_mutation_power
args_dict["create_frequency"] = command_line_args.create_frequency
args_dict["reproduction_criterion"] = command_line_args.reproduction_criterion
args_dict["difficulty_criterion_low"] = command_line_args.difficulty_criterion_low
args_dict["difficulty_criterion_high"] = command_line_args.difficulty_criterion_high
args_dict["num_create_environments"] = command_line_args.num_create_environments
args_dict["num_children_add"] = command_line_args.num_children_add
args_dict["max_pair_population_size"] = command_line_args.max_pair_population_size
args_dict["n_nearest_neighbors"] = command_line_args.n_nearest_neighbors
args_dict["p_transfer_gens"] = command_line_args.p_transfer_gens
args_dict["p_transfer_frequency"] = command_line_args.p_transfer_frequency
args_dict["d_transfer_frequency"] = command_line_args.d_transfer_frequency
return Parameters(args_dict)
class Parameters:
def __init__(self, args_dict):
self.gens = args_dict["gens"]
self.robot_size = args_dict["robot_size"]
self.steps = args_dict["steps"]
self.env = args_dict["env"]
self.cpu = args_dict["cpu"]
self.save_to = args_dict["save_to"]
self.goal_fit = args_dict["goal_fit"]
self.pop_size = args_dict["pop_size"]
self.max_stag = args_dict["max_stag"]
self.neat_config = args_dict["neat_config"]
self.save_gen_interval = args_dict["save_gen_interval"]
self.spec_genotype_weight = args_dict["spec_genotype_weight"]
self.spec_phenotype_weight = args_dict["spec_phenotype_weight"]
self.substrate_type = args_dict["substrate_type"]
self.p_transfer_gens = args_dict["p_transfer_gens"]
self.height_mutation_chance = args_dict["height_mutation_chance"]
self.max_height_mutation = args_dict["max_height_mutation"]
self.obs_prob_mutation_power = args_dict["obs_prob_mutation_power"]
self.create_frequency = args_dict["create_frequency"]
self.reproduction_criterion = args_dict["reproduction_criterion"]
self.difficulty_criterion_low = args_dict["difficulty_criterion_low"]
self.difficulty_criterion_high = args_dict["difficulty_criterion_high"]
self.num_create_environments = args_dict["num_create_environments"]
self.num_children_add = args_dict["num_children_add"]
self.max_pair_population_size = args_dict["max_pair_population_size"]
self.n_nearest_neighbors = args_dict["n_nearest_neighbors"]
self.p_transfer_frequency = args_dict["p_transfer_frequency"]
self.d_transfer_frequency = args_dict["d_transfer_frequency"]
# if report is not None:
for k, v in args_dict.items():
print(f"{k}: {v}")
print()
| 9,264 | 54.14881 | 137 | py |
SGR | SGR-main/poet/poet.py | from distutils.command.config import config
from time import time
from typing import List
import numpy as np
from copy import deepcopy
import pickle
from dynamic_env.env_config import EnvConfig
from sgr.sgr import SGR
from arg_parser import Parameters
import pathlib
RESULTS_DIR = "checkpoints"
class Pair:
""" A POET pair consisting of an environment and an agent. """
def __init__(self, seed):
self.environment: EnvConfig = None
self.agent_pop: SGR = None
self.fitness: float = None
self.seed: int = seed
self.dir_path = None
self.csv = None
def init_first(self, params: Parameters, config_path, save_to=None):
self.environment = EnvConfig(seed = self.seed)
self.agent_pop = SGR(
config_path,
params.robot_size,
params.spec_genotype_weight,
params.spec_phenotype_weight,
params.pop_size,
params.substrate_type,
reporters=True
)
def add_reporter (self, save_to):
dir_path = f"{RESULTS_DIR}/{save_to}/"
pathlib.Path(dir_path).mkdir(parents=True, exist_ok=True)
csv_file = f"{dir_path}/env_{self.environment.id}_results.csv"
self.csv = open(csv_file, "w+")
self.csv.write("global_gen;pop_gen;pop_id;best_fit;num_species\n")
class POET:
def __init__(self, seed: int, params: Parameters, config_path):
self.seed = seed
self.rng = np.random.default_rng(seed)
# Parameters
self.height_mutation_chance = params.height_mutation_chance
self.max_height_mutation = params.max_height_mutation
self.obs_prob_mutation_power = params.obs_prob_mutation_power
self.create_frequency = params.create_frequency
self.reproduction_criterion = params.reproduction_criterion
self.difficulty_criterion_low = params.difficulty_criterion_low
self.difficulty_criterion_high = params.difficulty_criterion_high
self.num_create_environments = params.num_create_environments
self.num_children_add = params.num_children_add
self.max_pair_population_size = params.max_pair_population_size
self.n_nearest_neighbors = params.n_nearest_neighbors
self.p_transfer_frequency = params.p_transfer_frequency
self.d_transfer_frequency = params.d_transfer_frequency
# The pairs of environments and agents
self.pairs: List[Pair] = []
self.run_params = params
self.config_path = config_path
first_pair = Pair(self.rng.integers(100))
first_pair.init_first(params, config_path)
if self.run_params.save_to != "":
first_pair.add_reporter(self.run_params.save_to)
self.pairs.append(first_pair)
# The archive with all environments that have ever existed in the pair population
self.environment_archive = []
self.environment_archive.append(first_pair.environment)
self.total_environments_created = 1
def run(self, generations):
for i in range(1, generations):
print("##################### Starting POET gen ", i, "#####################")
print(f"Evaluating {len(self.pairs)} pairs\n")
gen_start_time = time()
# Transfers
if i%self.p_transfer_frequency == 0:
print("\n=== Starting proposal transfer process ===\n")
self.proposal_transfer()
print(f"Transfer took {time()-gen_start_time}s\n")
if i % self.d_transfer_frequency == 0:
d_transfer_time = time()
print("\n=== Starting direct transfer process ===\n")
self.direct_transfer()
print(f"Transfer took {time()-d_transfer_time}s\n")
# Create new environments
if i%self.create_frequency == 0:
env_creation_t = time()
print("\n=== Creating new environments ===\n")
self.create_environments()
print(f"Env creation took {time()-env_creation_t}s\n")
# Train
print("\n=== Population training ===")
# n_steps = int(self.run_params.steps * (self.rng.integers(8, 12)/10))
n_steps = self.run_params.steps
print("Steps: ", n_steps, "\n")
self.train_agents(n_steps, i)
# Create checkpoint
if i%self.run_params.save_gen_interval == 0 and self.run_params.save_to != "":
self.save_checkpoint(i)
print(f"\nPOET generation took {time()-gen_start_time}s\n")
for p in self.pairs:
if p.csv is not None:
p.csv.close()
def save_checkpoint(self, gen):
temp_csvs = {}
for p in self.pairs:
temp_csvs[p.environment.id] = p.csv
p.csv = None
path = f"{RESULTS_DIR}/{self.run_params.save_to}/cp_{gen}.pkl"
f = open(path, "wb")
pickle.dump(self, f)
f.close()
for p in self.pairs:
p.csv = temp_csvs[p.environment.id]
def create_environments(self):
# Find eligible pairs
eligible_pairs = []
for pair in self.pairs:
if (pair.fitness is not None) and (pair.fitness > self.reproduction_criterion):
eligible_pairs.append(pair)
print("Eligible pairs to reproduce: ", len(eligible_pairs))
# Create child environments
child_pairs: List[Pair]= []
if len(eligible_pairs) > 0:
selected_pairs = np.random.choice(eligible_pairs, self.num_create_environments, replace=True)
for pair in selected_pairs:
new_pair = Pair(self.rng.integers(100))
new_pair.environment = self.mutate(pair.environment)
new_pair.agent_pop = pair.agent_pop.create_child()
child_pairs.append(new_pair)
# Find agents for the children and test them against the minimal criteria
eligible_child_pairs = []
for child_pair in child_pairs:
self.evaluate_pair(child_pair)
print("Env created with fitness of: ", child_pair.fitness)
if self.difficulty_criterion_low <= child_pair.fitness <= self.difficulty_criterion_high:
eligible_child_pairs.append(child_pair)
# Select child environments to add to pair population
sorted_child_pairs = self.sort_child_pairs(eligible_child_pairs)
print("Eligible envs: ", len(sorted_child_pairs))
added = 0
for child in sorted_child_pairs:
if added < self.num_children_add:
child.agent_pop.add_reporters()
if self.run_params.save_to != "":
child.add_reporter(self.run_params.save_to)
self.pairs.append(child)
self.environment_archive.append(child.environment)
if len(self.pairs) > self.max_pair_population_size:
self.pairs.pop(0)
added += 1
def mutate(self, env: EnvConfig):
seed = self.rng.integers(100)
child = env.create_child(seed)
child.mutate_barrier_h(self.height_mutation_chance)
self.total_environments_created += 1
return child
# The difference from this and training is that this one only runs for 1 generations
def evaluate_pair(self, pair: Pair, print_par_name = False, gens = 1):
pop = pair.agent_pop
env = pair.environment
if print_par_name:
print(f"----- Env {pair.environment.id}, Pop {pair.agent_pop.id} -----")
winner = pop.run(
env_name = self.run_params.env,
n_steps = self.run_params.steps,
n_gens = gens,
cpus = self.run_params.cpu,
max_stagnation = self.run_params.max_stag,
save_gen_interval = self.run_params.save_gen_interval,
print_results = False,
dynamic_env_config=env,
)
# Set fitness
pair.fitness = winner.fitness
return pair.fitness
def sort_child_pairs(self, pairs: List[Pair]):
# Remove already existing environments
pruned_pairs = []
for pair in pairs:
if(not self.is_in_archive(pair.environment)):
pruned_pairs.append(pair)
# Compute novelty for the children
novelties = []
for pair in pruned_pairs:
novelties.append(self.compute_novelty(pair.environment))
# Sort children based on novelty
sorted_pairs = []
for i in range(len(novelties)):
index = novelties.index(max(novelties))
sorted_pairs.append(pruned_pairs.pop(index))
novelties.pop(index)
return sorted_pairs
def is_in_archive(self, env):
# Check if the environment already exists in the archive
for environment in self.environment_archive:
if self.compare_envs(environment, env) == 0:
return True
return False
def compute_novelty(self, env):
# Compute the novelty of an environment with regards to the archive
# Novelty is the mean difference from the 5 nearest neighbours
differences = []
for environment in self.environment_archive:
differences.append(self.compare_envs(environment, env))
novelty = 0
k = self.n_nearest_neighbors
if len(differences) < k:
k = len(differences)
for i in range(k):
novelty_i = min(differences)
differences.pop(differences.index(novelty_i))
novelty += novelty_i/k
return novelty
def compare_envs(self, env1: EnvConfig, env2: EnvConfig):
# Find the difference between two environments
d_list = env1.heights_list - env2.heights_list
acc = 0
for d in d_list:
acc += d if d>0 else -1*d
return acc
def train_agents(self, n_steps, gen):
for pair in self.pairs:
print(f"----------------- Env {pair.environment.id}, Pop {pair.agent_pop.id} -----------------")
# Set environments
pop = pair.agent_pop
env = pair.environment
winner = pop.run(
env_name = self.run_params.env,
n_steps = self.run_params.steps,
n_gens = 1,
cpus = self.run_params.cpu,
max_stagnation = self.run_params.max_stag,
save_gen_interval = self.run_params.save_gen_interval,
print_results=False,
dynamic_env_config=env,
)
# Set fitness
pair.fitness = winner.fitness
print("Pair fitness: ", np.round(pair.fitness, 4), "\n")
if pair.csv is not None:
text = f"{gen};{pop.pop.generation};{pop.id};{winner.fitness};{len(pop.pop.species.species)}\n"
pair.csv.write(text)
def proposal_transfer(self):
if len(self.pairs) > 1:
base_pairs = self.rng.choice(self.pairs, 1, replace=True)
for pair in base_pairs:
for transfer_pair in self.pairs:
if transfer_pair.agent_pop.id != pair.agent_pop.id:
transfer_pair.agent_pop.pop.best_genome = None
temp_test_pair = Pair(self.rng.integers(100))
temp_test_pair.environment = pair.environment
temp_test_pair.agent_pop = transfer_pair.agent_pop
_ = self.evaluate_pair(temp_test_pair, True, gens = self.run_params.p_transfer_gens)
def direct_transfer(self):
# Direct transfer
if len(self.pairs) >= 1:
for pair in self.pairs:
best_agent_pop = None
best_fitness = -1000000
for transfer_pair in self.pairs:
if transfer_pair.agent_pop.id != pair.agent_pop.id:
temp_test_pair = Pair(self.rng.integers(100))
temp_test_pair.environment = pair.environment
temp_test_pair.agent_pop = deepcopy(transfer_pair.agent_pop)
fitness = self.evaluate_pair(temp_test_pair, True, gens = 1)
if best_fitness < fitness:
best_agent_pop = temp_test_pair.agent_pop
best_fitness = fitness
if best_fitness > pair.fitness:
pair.agent_pop = best_agent_pop
pair.fitness = best_fitness
def proposal_transfer_strictly_better(self):
if len(self.pairs) > 1:
base_pairs = self.rng.choice(self.pairs, 1, replace=True)
for pair in base_pairs:
for transfer_pair in self.pairs:
if transfer_pair.agent_pop.id != pair.agent_pop.id:
test_pair = Pair(self.rng.integers(100))
test_pair.environment = pair.environment
test_pair.agent_pop = transfer_pair.agent_pop.create_child()
_ = self.evaluate_pair(test_pair, True, gens = self.run_params.p_transfer_gens)
test_pair.environment = transfer_pair.environment
fit = self.evaluate_pair(test_pair, True, gens = 1)
if fit >= transfer_pair.fitness:
print(f"Successfull strictly better transfer: {transfer_pair.agent_pop.id} became {test_pair.agent_pop.id} by training on env {test_pair.environment.id}")
test_pair.agent_pop.add_reporters()
transfer_pair.agent_pop = test_pair.agent_pop | 13,885 | 41.206687 | 182 | py |
SGR | SGR-main/poet/__init__.py | 0 | 0 | 0 | py |
|
SGR | SGR-main/baseline_algs/single_genome_neat.py | import neat
import os
import numpy as np
import errno
import dill
import neat
import time
import neat.nn
import pathlib
import sys
from typing import Dict
from pathos.multiprocessing import ProcessPool
from evogym import get_full_connectivity
import evogym.envs
sys.path.append('../')
from sgr.custom_reporter import CustomReporter, remove_reporters
from alt_arg_parser import parse_args
from sgr.evogym_sim import get_obs_size
from sgr.generate_robot import eval_robot_constraint, N_TYPES
BEST_FIT = -10000
STAG = 0
POPULATION = None
OBS_SIZE = 0
def generate_robot(net, robot_size = 5, pad = 0):
global OBS_SIZE
robot = np.ones((robot_size, robot_size))
for i in range(robot_size):
for j in range(robot_size):
input = (i - (robot_size // 2),
j - (robot_size // 2))
pad = np.full(OBS_SIZE, pad)
full_input = np.concatenate((input, pad))
graph_out = net.activate(full_input)
node = np.argmax(graph_out[:len(N_TYPES)])
robot[i][j] = node
return robot
def simulate_env(robot, net, config, render = False):
connections = get_full_connectivity(robot)
env = evogym.envs.gym.make(config["env"], body=robot, connections=connections)
reward = 0
obs = env.reset()
actuators = env.get_actuator_indices("robot")
global OBS_SIZE
pre_pad = np.full(2, config["pad"])
pos_pad = np.full(OBS_SIZE - len(obs), config["pad"])
for _ in range(config["steps"]):
if render:
env.render('screen')
input = np.concatenate((pre_pad, obs, pos_pad))
action_by_actuator = net.activate(input)[len(N_TYPES):]
action = np.array([action_by_actuator[i] for i in actuators])
obs, r, done, _ = env.step(action)
reward += r
if done:
return reward, True
env.close()
return reward, False
def fit_func_thread(genomes, params, neat_config, render=False, save_gif=False ):
results_dict = {}
for g_id, genome in genomes:
net = neat.nn.FeedForwardNetwork.create(genome, neat_config)
robot = generate_robot(net, params["robot_size"], params["pad"])
if not eval_robot_constraint(robot):
results_dict[g_id] = -10000
continue
reward, _ = simulate_env(robot, net, params)
results_dict[g_id] = reward
return results_dict
def fit_func(genomes, neat_config, params):
global BEST_FIT, STAG, POPULATION
STAG += 1
start_t = time.time()
try:
pool = ProcessPool(nodes=params["cpu"])
results_map = pool.amap(
fit_func_thread,
np.array_split(genomes, params["cpu"]),
[params for _ in range(params["cpu"])],
[neat_config for _ in range(params["cpu"])],
)
results = results_map.get(timeout=15*60)
fitness_dict = {}
for result_dict in results:
for k, v in result_dict.items():
fitness_dict[k] = v
surviving_genomes = {}
for g_id, genome in genomes:
genome.fitness = fitness_dict[g_id]
if genome.fitness > BEST_FIT:
BEST_FIT = genome.fitness
STAG = 0
if genome.fitness > -10000:
surviving_genomes[g_id] = genome
POPULATION.population = surviving_genomes
except IOError as e: # Sometimes the environment just implodes
if e.errno == errno.EPIPE:
print("Problem with broken pipe")
else:
raise(IOError)
print("Simulation took ", time.time()-start_t, "s")
print("STAGNATION: ", STAG)
if STAG > params["max_stag"]:
print("!!!!!!!!!!!!!!!!!!!!! POPULATION STAGNATED !!!!!!!!!!!!!!!!!!!")
if params["save_to"] is not "":
dill.dump(genomes, open(params["save_to"] + "_genomes.pkl", mode='wb'))
remove_reporters(POPULATION)
dill.dump(POPULATION, open(params["save_to"] + "_pop.pkl", mode='wb'))
exit()
def main():
global POPULATION, OBS_SIZE
params = parse_args()
local_dir = os.path.dirname(__file__)
config_path = os.path.join(local_dir, params["controller_config"])
neat_config = neat.Config(neat.DefaultGenome, neat.DefaultReproduction, neat.DefaultSpeciesSet, neat.DefaultStagnation, config_path)
neat_config.pop_size = params["pop_size"]
robot = np.full((params["robot_size"], params["robot_size"]), 4)
OBS_SIZE = get_obs_size(robot, params["env"])
in_size = 2 + OBS_SIZE
out_size = len(N_TYPES) + params["robot_size"]**2
neat_config.genome_config.num_inputs = in_size
neat_config.genome_config.input_keys = [-1*i for i in range(1, in_size+1)]
# neat_config.genome_config.
neat_config.genome_config.num_outputs = out_size
neat_config.genome_config.output_keys = [i for i in range(1, out_size+1)]
pop = neat.Population(neat_config)
POPULATION = pop
stats = neat.StatisticsReporter()
pop.add_reporter(stats)
if params["save_to"] is not "":
pathlib.Path("/".join(params["save_to"].split("/")[:-1])).mkdir(parents=True, exist_ok=True)
pop.add_reporter(CustomReporter(True, params["save_to"] + "_out.txt", params["save_to"] + "_table.csv"))
pop.add_reporter(neat.StdOutReporter(True))
f = lambda genomes, config: fit_func(genomes, config, params)
winner = pop.run(f, params["gens"])
print('\nBest genome:\n{!s}'.format(winner))
if params["save_to"] is not "":
remove_reporters(pop)
dill.dump(pop, open(params["save_to"] + "_pop.pkl", mode='wb'))
if __name__ == "__main__":
main() | 5,678 | 31.82659 | 136 | py |
SGR | SGR-main/baseline_algs/alt_arg_parser.py | import argparse
def parse_args():
args_dict = {}
# Default Values
gens = 500
robot_size = 5
steps = 600
env = "Walker-v0" # env_names = ["CaveCrawler-v0", "UpStepper-v0", "ObstacleTraverser-v0"]
n_threads = 6
save_to = ""
goal_fit = 10
max_stag = 100
structure_pop = 12
controller_pop = 11
controller_in_between_gens = 1
pop_size = 64
pad = 0
neat_config = "../neat_configs/single_genome_neat.cfg"
neat_controller_config = "../neat_configs/neat_controller.cfg"
neat_structure_config = "../neat_configs/neat_structure.cfg"
parser = argparse.ArgumentParser()
parser.add_argument("-g", "--gens", nargs="?", default=gens, help="", type=int)
parser.add_argument("-r", "--robot_size", nargs="?", default=robot_size, help="", type=int)
parser.add_argument("-s", "--steps", nargs="?", default=steps, help="", type=int)
parser.add_argument("-t", "--cpu", nargs="?", default=n_threads, help="", type=int)
parser.add_argument("-e", "--env", nargs="?", default=env, help="", type=str)
parser.add_argument("--save_to", nargs="?", default=save_to, help="", type=str)
parser.add_argument("--goal_fit", nargs="?", default=goal_fit, help="", type=float)
parser.add_argument("--max_stag", nargs="?", default=max_stag, help="", type=int)
parser.add_argument("--pop", nargs="?", default=pop_size, help="", type=int)
parser.add_argument("--neat_config", nargs="?", default=neat_config, help="", type=str)
parser.add_argument("--controller_config", nargs="?", default=neat_controller_config, help="", type=str)
parser.add_argument("--structure_config", nargs="?", default=neat_structure_config, help="", type=str)
parser.add_argument("--controller_in_between_gens", nargs="?", default=controller_in_between_gens, help="", type=int)
parser.add_argument("--structure_pop", nargs="?", default=structure_pop, help="", type=int)
parser.add_argument("--controller_pop", nargs="?", default=controller_pop, help="", type=int)
parser.add_argument("--pad", nargs="?", default=pad, help="", type=int)
command_line_args = parser.parse_args()
args_dict["gens"] = command_line_args.gens
args_dict["pop_size"] = command_line_args.pop
args_dict["robot_size"] = command_line_args.robot_size
args_dict["steps"] = command_line_args.steps
args_dict["env"] = command_line_args.env
args_dict["cpu"] = command_line_args.cpu
args_dict["save_to"] = command_line_args.save_to
args_dict["goal_fit"] = command_line_args.goal_fit
args_dict["max_stag"] = command_line_args.max_stag
args_dict["controller_config"] = command_line_args.controller_config
args_dict["structure_config"] = command_line_args.structure_config
args_dict["controller_in_between_gens"] = command_line_args.controller_in_between_gens
args_dict["structure_pop"] = command_line_args.structure_pop
args_dict["controller_pop"] = command_line_args.controller_pop
args_dict["pad"] = command_line_args.pad
args_dict["neat_config"] = command_line_args.neat_config
# if report is not None:
for k, v in args_dict.items():
print(f"{k}: {v}")
print()
return args_dict
| 3,248 | 40.653846 | 121 | py |
SGR | SGR-main/baseline_algs/multiple_genome_neat.py | import neat
import os
import numpy as np
import errno
import dill
import neat
import math
import neat.nn
import pathlib
import sys
from typing import Dict
from pathos.multiprocessing import ProcessPool
from evogym import hashable
sys.path.append('../')
from alt_arg_parser import parse_args
from sgr.custom_reporter import CustomReporter, remove_reporters
from sgr.generate_robot import generate_robot_CPPN_like
from sgr.evogym_sim import simulate_env, get_obs_size
from sgr.generate_robot import eval_robot_constraint
BEST_FIT = -10000
STAG = 0
POPULATION = None
class RobotController:
def __init__(self, controllers) -> None:
self.population = controllers
self.best_fit = None
self.evaluated_this_gen = False
def get_controller_config(robot, params):
local_dir = os.path.dirname(__file__)
config_path = os.path.join(local_dir, params["controller_config"])
config = neat.Config(neat.DefaultGenome, neat.DefaultReproduction, neat.DefaultSpeciesSet, neat.DefaultStagnation, config_path)
in_size = math.ceil(math.sqrt(get_obs_size(robot, params["env"])))
out_size = params["robot_size"]**2
config.genome_config.num_inputs = in_size**2
config.genome_config.input_keys = [-1*i for i in range(1, (in_size**2)+1)]
config.genome_config.num_outputs = out_size
config.genome_config.output_keys = [i for i in range(1, out_size+1)]
config.pop_size = params["controller_pop"]
return config
def get_controller_population(robot, params, robot_dict: Dict[str, RobotController]):
robot_hash = hashable(robot)
if robot_hash not in robot_dict:
config = get_controller_config(robot, params)
p = neat.Population(config)
robot_dict[robot_hash] = RobotController(p)
return robot_dict[robot_hash]
def update_pop_fitness_thread(genomes, robot, control_neat_config, params):
results_dict = {}
for g_id, genome in genomes:
net = neat.nn.FeedForwardNetwork.create(genome, control_neat_config)
fit, _ = simulate_env(robot, net, params["env"], params["steps"])
results_dict[g_id] = fit
return results_dict
def controller_fit_func(genomes, control_neat_config, robot: np.array, params):
try:
pool = ProcessPool(nodes=params["cpu"])
fit_func = lambda x: update_pop_fitness_thread(x, robot, control_neat_config, params)
results = pool.map(
fit_func,
np.array_split(genomes, params["cpu"]),
)
fitness_dict = {}
for result_dict in results:
for k, v in result_dict.items():
fitness_dict[k] = v
for g_id, genome in genomes:
genome.fitness = fitness_dict[g_id]
except IOError as e:
if e.errno == errno.EPIPE:
print("Problem with broken pipe")
else:
raise(IOError)
def optimize_control(controller_pop, robot, params):
c_fit_func = lambda genomes, config: controller_fit_func(genomes, config, robot, params)
champion = controller_pop.run(c_fit_func, params["controller_in_between_gens"])
return champion.fitness
def structure_fit_func(genomes, config, params, robot_dict: Dict[str, RobotController]):
global BEST_FIT, STAG, POPULATION
STAG += 1
for robot in robot_dict.values():
robot.evaluated_this_gen = False
for _, genome in genomes:
net = neat.nn.FeedForwardNetwork.create(genome, config)
robot = generate_robot_CPPN_like(net, params["robot_size"])
if not eval_robot_constraint(robot):
genome.fitness = -10000
continue
robot_controllers = get_controller_population(robot, params, robot_dict)
if robot_controllers.best_fit is not None and robot_controllers.evaluated_this_gen:
genome.fitness = robot_controllers.best_fit
continue
best_fit = optimize_control(robot_controllers.population, robot, params)
genome.fitness = best_fit
robot_controllers.best_fit = best_fit
robot_controllers.evaluated_this_gen = True
if genome.fitness > BEST_FIT:
BEST_FIT = genome.fitness
STAG = 0
if STAG > params["max_stag"]:
print("!!!!!!!!!!!!!!!!!!!!! POPULATION STAGNATED !!!!!!!!!!!!!!!!!!!")
if params["save_to"] is not "":
dill.dump(POPULATION, open(params["save_to"] + "_pop.pkl", mode='wb'))
dill.dump(robot_dict, open(params["save_to"] + "_robot_dict.pkl", mode='wb'))
exit()
def main():
params = parse_args()
local_dir = os.path.dirname(__file__)
config_path = os.path.join(local_dir, params["structure_config"])
neat_config = neat.Config(neat.DefaultGenome, neat.DefaultReproduction, neat.DefaultSpeciesSet, neat.DefaultStagnation, config_path)
neat_config.pop_size = params["controller_pop"]
pop = neat.Population(neat_config)
stats = neat.StatisticsReporter()
pop.add_reporter(stats)
if params["save_to"] is not "":
pathlib.Path("/".join(params["save_to"].split("/")[:-1])).mkdir(parents=True, exist_ok=True)
pop.add_reporter(CustomReporter(True, params["save_to"] + "_out.txt", params["save_to"] + "_table.csv"))
pop.add_reporter(neat.StdOutReporter(True))
global POPULATION
POPULATION = pop
robot_dict = {}
f = lambda genomes, config: structure_fit_func(genomes, config, params, robot_dict)
winner = pop.run(f, params["gens"])
print('\nBest genome:\n{!s}'.format(winner))
if params["save_to"] is not "":
remove_reporters(pop)
dill.dump(pop, open(params["save_to"] + "_structure_pop.pkl", mode='wb'))
dill.dump(robot_dict, open(params["save_to"] + "_robot_dict.pkl", mode='wb'))
if __name__ == "__main__":
main() | 5,814 | 33.613095 | 136 | py |
SGR | SGR-main/hyperneat/hyperNEAT.py | """
All Hyperneat related logic resides here.
"""
import neat
def create_phenotype_network(cppn, substrate, activation_function="tanh", output_activation="identity", output_node_idx=0):
"""
Creates a recurrent network using a cppn and a substrate.
"""
input_coordinates = substrate.input_coordinates
output_coordinates = substrate.output_coordinates
hidden_coordinates = substrate.hidden_coordinates
# Get activation function.
act_function_set = neat.activations.ActivationFunctionSet()
activation = act_function_set.get(activation_function)
out_activation = act_function_set.get(output_activation)
idx = 0
node_dict = {}
for n in input_coordinates:
node_dict[n] = idx
idx += 1
for layer in hidden_coordinates:
for n in layer:
node_dict[n] = idx
idx += 1
for n in output_coordinates:
node_dict[n] = idx
idx += 1
node_evals = []
# connect input to hidden
if len(hidden_coordinates) > 0:
for node in hidden_coordinates[0]:
im = connect_node_to_layer(cppn, node, input_coordinates, node_dict, False, 1, output_node_idx)
eval = (node_dict[node], activation, sum, 0.0, 1.0, im)
node_evals.append(eval)
# connect input to output if there are no hidden layers
else:
for node in output_coordinates:
im = connect_node_to_layer(cppn, node, input_coordinates, node_dict, False, 1, output_node_idx)
eval = (node_dict[node], out_activation, sum, 0.0, 1.0, im)
node_evals.append(eval)
# connect hidden to hidden
l = 0
while l+1 < len(hidden_coordinates):
for node in hidden_coordinates[l+1]:
im = connect_node_to_layer(cppn, node, hidden_coordinates[l], node_dict, False, 1, output_node_idx)
eval = (node_dict[node], activation, sum, 0.0, 1.0, im)
node_evals.append(eval)
l += 1
# connect hidden to output
if len(hidden_coordinates) > 0:
for node in output_coordinates:
im = connect_node_to_layer(cppn, node, hidden_coordinates[-1], node_dict, False, 1, output_node_idx)
eval = (node_dict[node], out_activation, sum, 0.0, 1.0, im)
node_evals.append(eval)
input_nodes = [node_dict[n] for n in input_coordinates]
output_nodes = [node_dict[n] for n in output_coordinates]
return neat.nn.FeedForwardNetwork(input_nodes, output_nodes, node_evals)
def connect_node_to_layer(cppn, n_coord, goal_layer, node_dict, outgoing, max_weight, output_node_idx):
im = []
for node in goal_layer:
w = query_cppn(n_coord, node, outgoing, cppn, max_weight, output_node_idx)
if w != 0.0: # Only include connection if the weight isn't 0.0.
im.append((node_dict[node], w))
return im
def query_cppn(coord1, coord2, outgoing, cppn, max_weight, output_node_idx):
"""
Get the weight from one point to another using the CPPN.
Takes into consideration which point is source/target.
"""
if outgoing:
i = [*coord1, *coord2, 1.0]
else:
i = [*coord2, *coord1, 1.0]
w = cppn.activate(i)[output_node_idx]
if abs(w) > 0.2: # If abs(weight) is below threshold, treat weight as 0.0.
if w > 0:
w = (w - 0.2) / 0.8
else:
w = (w + 0.2) / 0.8
return w * max_weight
else:
return 0.0 | 3,452 | 34.96875 | 123 | py |
SGR | SGR-main/hyperneat/test_cppn.py | """
Visualizes a CPPN - remember to edit path in visualize.py, sorry.
"""
import pickle
from pureples.es_hyperneat.es_hyperneat import find_pattern
from pureples.shared.visualize import draw_pattern
path_to_cppn = "es_hyperneat_xor_small_cppn.pkl"
# For now, path_to_cppn should match path in visualize.py, sorry.
with open(path_to_cppn, 'rb') as cppn_input:
CPPN = pickle.load(cppn_input)
pattern = find_pattern(CPPN, (0.0, -1.0))
draw_pattern(pattern)
| 469 | 28.375 | 65 | py |
SGR | SGR-main/hyperneat/__init__.py | 0 | 0 | 0 | py |
|
SGR | SGR-main/hyperneat/substrate.py | import itertools as it
import numpy as np
def calc_layer(*coords):
coord_arr = []
for i in coords[:-1]:
aux = np.linspace(-1.0, 1.0, i) if (i > 1) else [0.0]
coord_arr.append(aux)
last_coord = [coords[-1]]
return tuple(it.product(*coord_arr, last_coord))
"""
The substrate.
"""
class Substrate(object):
"""
Represents a substrate: Input coordinates, output coordinates, hidden coordinates and a resolution defaulting to 10.0.
"""
def __init__(self, shape, res=10.0):
self.res = res
self.dimensions = len(shape[0])
layers = [calc_layer(*l) for l in shape]
self.input_coordinates = layers[0]
self.output_coordinates = layers[-1]
self.hidden_coordinates = [[*l] for l in layers[1:-1]]
| 785 | 25.2 | 122 | py |
SGR | SGR-main/hyperneat/visualize.py | """
Varying visualisation tools.
"""
import pickle
import graphviz
import matplotlib.pyplot as plt
def draw_net(net, filename=None, node_names={}, node_colors={}):
"""
Draw neural network with arbitrary topology.
"""
node_attrs = {
'shape': 'circle',
'fontsize': '9',
'height': '0.2',
'width': '0.2'}
dot = graphviz.Digraph('svg', node_attr=node_attrs)
inputs = set()
for k in net.input_nodes:
inputs.add(k)
name = node_names.get(k, str(k))
input_attrs = {'style': 'filled',
'shape': 'box',
'fillcolor': node_colors.get(k, 'lightgray')}
dot.node(name, _attributes=input_attrs)
outputs = set()
for k in net.output_nodes:
outputs.add(k)
name = node_names.get(k, str(k))
node_attrs = {'style': 'filled',
'fillcolor': node_colors.get(k, 'lightblue')}
dot.node(name, _attributes=node_attrs)
for node, _, _, _, _, links in net.node_evals:
for i, w in links:
node_input, output = node, i
a = node_names.get(output, str(output))
b = node_names.get(node_input, str(node_input))
style = 'solid'
color = 'green' if w > 0.0 else 'red'
width = str(0.1 + abs(w / 5.0))
dot.edge(a, b, _attributes={
'style': style, 'color': color, 'penwidth': width})
dot.render(filename)
return dot
def onclick(event):
"""
Click handler for weight gradient created by a CPPN. Will re-query with the clicked coordinate.
"""
plt.close()
x = event.xdata
y = event.ydata
path_to_cppn = "es_hyperneat_xor_small_cppn.pkl"
# For now, path_to_cppn should match path in test_cppn.py, sorry.
with open(path_to_cppn, 'rb') as cppn_input:
cppn = pickle.load(cppn_input)
from pureples.es_hyperneat.es_hyperneat import find_pattern
pattern = find_pattern(cppn, (x, y))
draw_pattern(pattern)
def draw_pattern(im, res=60):
"""
Draws the pattern/weight gradient queried by a CPPN.
"""
fig = plt.figure()
plt.axis([-1, 1, -1, 1])
fig.add_subplot(111)
a = range(res)
b = range(res)
for x in a:
for y in b:
px = -1.0 + (x/float(res))*2.0+1.0/float(res)
py = -1.0 + (y/float(res))*2.0+1.0/float(res)
c = str(0.5-im[x][y]/float(res))
plt.plot(px, py, marker='s', color=c)
fig.canvas.mpl_connect('button_press_event', onclick)
plt.grid()
plt.show()
def draw_es(id_to_coords, connections, filename):
"""
Draw the net created by ES-HyperNEAT
"""
fig = plt.figure()
plt.axis([-1.1, 1.1, -1.1, 1.1])
fig.add_subplot(111)
for c in connections:
color = 'red'
if c.weight > 0.0:
color = 'black'
plt.arrow(c.x1, c.y1, c.x2-c.x1, c.y2-c.y1, head_width=0.00, head_length=0.0,
fc=color, ec=color, length_includes_head=True)
for (coord, _) in id_to_coords.items():
plt.plot(coord[0], coord[1], marker='o', markersize=8.0, color='grey')
plt.grid()
fig.savefig(filename)
| 3,225 | 26.810345 | 99 | py |
SGR | SGR-main/hyperneat/create_cppn.py | """
CPPN creator.
"""
import neat
from neat.graphs import feed_forward_layers
def create_cppn(genome, config, output_activation_function="tanh"):
"""
Receives a genome and returns its phenotype (a FeedForwardNetwork).
"""
# Gather expressed connections.
connections = [cg.key for cg in genome.connections.values() if cg.enabled]
layers = feed_forward_layers(
config.genome_config.input_keys, config.genome_config.output_keys, connections)
node_evals = []
for layer in layers:
for node in layer:
inputs = []
node_expr = [] # currently unused
for conn_key in connections:
inode, onode = conn_key
if onode == node:
cg = genome.connections[conn_key]
inputs.append((inode, cg.weight))
node_expr.append("v[{}] * {:.7e}".format(inode, cg.weight))
ng = genome.nodes[node]
aggregation_function = config.genome_config.aggregation_function_defs.get(
ng.aggregation)
# Fix the output note's activation function to any function.
if node in config.genome_config.output_keys:
ng.activation = output_activation_function
activation_function = config.genome_config.activation_defs.get(
ng.activation)
node_evals.append(
(node, activation_function, aggregation_function, ng.bias, ng.response, inputs))
return neat.nn.FeedForwardNetwork(config.genome_config.input_keys,
config.genome_config.output_keys, node_evals)
| 1,659 | 35.888889 | 96 | py |
SGR | SGR-main/dynamic_env/generateJSON.py | import json
import numpy as np
N_TYPES = ['empty', 'rigid', 'soft', 'hori', 'vert']
EMPTY_VX = 0
RIGID_VX = 1
SOFT_VX = 2
HORI_VX = 3
VERT_VX = 4
FIXED_VX = 5
STARTING_ZONE = 12
def base_json(width, height):
env_json = {
"grid_width": width,
"grid_height": height,
"objects": {}
}
return env_json
def generate_env(width, height, h_list):
env = np.zeros((height, width))
for j in range(width):
h = h_list[j]
for i in range(height):
env[i][j] = EMPTY_VX
if i < h:
env[i][j] = FIXED_VX
return env
def ij_to_index(i, j, width):
return j + i*width
def add_neighbors(i, j, env_vals, width, height):
neighbors = []
if j > 0 and env_vals[i][j-1] != EMPTY_VX:
neighbors.append(ij_to_index(i, j-1, width))
if j < width-1 and env_vals[i][j+1] != EMPTY_VX:
neighbors.append(ij_to_index(i, j+1, width))
if i > 0 and env_vals[i-1][j] != EMPTY_VX:
neighbors.append(ij_to_index(i-1, j, width))
if i < height-1 and env_vals[i+1][j] != EMPTY_VX:
neighbors.append(ij_to_index(i+1, j, width))
return neighbors
def generate_env_json(width=60, height=10, h_list=None):
if h_list is None:
h_list = [height//2 for _ in range(width)]
ground = {
"indices": [],
"types": [],
"neighbors": {}
}
env_vals = generate_env(width, height, h_list)
for i in range(height):
for j in range(width):
idx = ij_to_index(i, j, width)
vx_type = env_vals[i][j]
if vx_type != EMPTY_VX:
ground["indices"].append(idx)
ground["types"].append(vx_type)
ground["neighbors"][str(idx)] = add_neighbors(i, j, env_vals, width, height)
env_json = base_json(width, height)
env_json["objects"]["ground"] = ground
return env_json
def create_ObstacleTraverser_JSON(file_path):
env = generate_env_json()
with open(file_path, 'w', encoding='utf-8') as f:
json.dump(env, f, ensure_ascii=False, indent=4)
if __name__ == "__main__":
create_ObstacleTraverser_JSON('dynamic_env_v2/env.json') | 2,187 | 26.35 | 92 | py |
SGR | SGR-main/dynamic_env/traverser.py | from gym import error, spaces
from evogym import *
from evogym.envs import WalkingBumpy2, StairsBase
import numpy as np
import os
from dynamic_env.generateJSON import generate_env_json
from dynamic_env.env_config import EnvConfig
class DynamicObstacleTraverser(WalkingBumpy2):
def __init__(self, body, connections=None, filename ="", env_config: EnvConfig=None):
# make world
if env_config is not None:
world_dict = env_config.generate_env_dict()
self.load_env_from_json_dict(world_dict)
elif filename != "":
self.load_world_from_file(filename)
else:
world_dict = generate_env_json()
self.load_env_from_json_dict(world_dict)
starting_height = (self.world.grid_size[1]//2)+5
self.world.add_from_array('robot', body, 2, starting_height, connections=connections)
# init sim
StairsBase.__init__(self, self.world)
# set action space and observation space
num_actuators = self.get_actuator_indices('robot').size
num_robot_points = self.object_pos_at_time(self.get_time(), "robot").size
self.sight_dist = 4
self.step_count = 0
self.action_space = spaces.Box(low= 0.6, high=1.6, shape=(num_actuators,), dtype=np.float)
self.observation_space = spaces.Box(low=-100.0, high=100.0, shape=(3 + num_robot_points + (2*self.sight_dist +1),), dtype=np.float)
def load_world_from_file(self, filename):
local_dir = os.path.dirname(__file__)
self.world = EvoWorld.from_json(os.path.join(local_dir, filename))
def load_env_from_json_dict(self, world_dict):
self.world = EvoWorld()
file_grid_size = Pair(world_dict['grid_width'], world_dict['grid_height'])
for name, obj_data in world_dict['objects'].items():
obj = WorldObject()
obj.load_from_parsed_json(name, obj_data, file_grid_size)
self.world.add_object(obj)
def get_obs(self):
obs = np.array ([
*self.get_vel_com_obs("robot"),
*self.get_ort_obs("robot"),
*self.get_pos_com_obs("robot"),
*self.get_floor_obs("robot", ["ground"], self.sight_dist),
self.step_count%30,
])
return obs
def step(self, action):
_, reward, done, _ = super().step(action)
obs = self.get_obs()
return obs, reward, done, {}
def reset(self):
_ = super().reset()
return self.get_obs() | 2,521 | 34.027778 | 139 | py |
SGR | SGR-main/dynamic_env/__init__.py | 0 | 0 | 0 | py |
|
SGR | SGR-main/dynamic_env/env_config.py | from copy import deepcopy
import os
import numpy as np
import json
import itertools
from .generateJSON import generate_env_json
class EnvConfig:
idCounter = itertools.count().__next__
def __init__(self, seed, width = 150, height = 18, flat_start = 9):
self.id = self.idCounter()
self.seed = seed
self.rng = np.random.default_rng(seed)
self.h = height
self.w = width
self.flat_start = flat_start
self.heights_list = np.full((width), height//2)
def mutate_barrier_h(self, mutation_prob):
previous_h = self.h//2
for idx, h in enumerate(self.heights_list):
if idx < self.flat_start:
pass
elif self.rng.random() < mutation_prob:
r = self.rng.random()
if r < .05:
h -= 3
if r < .15:
h -= 2
elif r < .5:
h -= 1
elif r < .85:
h += 1
elif r < .95:
h += 2
else:
h += 3
h = np.clip(h, max(0, previous_h-2), min(self.h, previous_h + 1))
self.heights_list[idx] = h
previous_h = h
def generate_json(self, filename="env.json"):
env = generate_env_json(self.w, self.h, self.heights_list)
local_dir = os.path.dirname(__file__)
path = os.path.join(local_dir, filename)
with open(path, 'w', encoding='utf-8') as f:
json.dump(env, f, ensure_ascii=False, indent=4)
def generate_env_dict(self):
return generate_env_json(self.w, self.h, self.heights_list)
def create_child(self, seed = None):
child = deepcopy(self)
child.id = self.idCounter()
child.seed = self.rng.integers(100) if seed == None else seed
self.rng = np.random.default_rng(child.seed)
return child
if __name__ == "__main__":
env1 = EnvConfig(1)
env1.generate_json("env1.json")
env = env1
for i in range(10):
new_env = env.create_child()
new_env.mutate_barrier_h(.25)
env = new_env
print(env.heights_list)
for idx, h in enumerate(env.heights_list):
if idx == 0:
pass
if h-env.heights_list[idx-1] < -2 or h-env.heights_list[idx-1] > 2:
print(idx, h, env.heights_list[idx-1])
print()
| 2,464 | 28.698795 | 79 | py |
SGR | SGR-main/sgr/custom_reporter.py | from __future__ import division, print_function
import time
from neat.math_util import mean, stdev
from neat.six_util import itervalues, iterkeys
from neat.reporting import ReporterSet
import neat
class CustomReporter():
"""Uses `print` to output information about the run; an example reporter class."""
def __init__(self, show_species_detail, txt_file, csv_file):
self.show_species_detail = show_species_detail
self.generation = None
self.generation_start_time = None
self.generation_times = []
self.num_extinctions = 0
self.txt = open(txt_file, "w+")
self.csv = open(csv_file, "w+")
self.csv.write("pop_size;best_fit;num_species\n")
def start_generation(self, generation):
self.generation = generation
text = ""
text += '\n ****** Running generation {0} ****** \n'.format(generation) + "\n"
self.txt.write(text)
self.generation_start_time = time.time()
def end_generation(self, config, population, species_set):
ng = len(population)
ns = len(species_set.species)
text = ""
if self.show_species_detail:
text += 'Population of {0:d} members in {1:d} species:'.format(ng, ns) + "\n"
sids = list(iterkeys(species_set.species))
sids.sort()
text += " ID age size fitness adj fit stag" + "\n"
text += " ==== === ==== ======= ======= ====" + "\n"
for sid in sids:
s = species_set.species[sid]
a = self.generation - s.created
n = len(s.members)
f = "--" if s.fitness is None else "{:.1f}".format(s.fitness)
af = "--" if s.adjusted_fitness is None else "{:.3f}".format(s.adjusted_fitness)
st = self.generation - s.last_improved
text += " {: >4} {: >3} {: >4} {: >7} {: >7} {: >4}".format(sid, a, n, f, af, st) + "\n"
else:
text += 'Population of {0:d} members in {1:d} species'.format(ng, ns) + "\n"
elapsed = time.time() - self.generation_start_time
self.generation_times.append(elapsed)
self.generation_times = self.generation_times[-10:]
average = sum(self.generation_times) / len(self.generation_times)
text += 'Total extinctions: {0:d}'.format(self.num_extinctions) + "\n"
if len(self.generation_times) > 1:
text += "Generation time: {0:.3f} sec ({1:.3f} average)".format(elapsed, average) + "\n"
else:
text += "Generation time: {0:.3f} sec".format(elapsed) + "\n"
self.txt.write(text)
def post_evaluate(self, config, population, species, best_genome):
# pylint: disable=no-self-use
fitnesses = [c.fitness for c in itervalues(population)]
fit_mean = mean(fitnesses)
fit_std = stdev(fitnesses)
best_species_id = species.get_species_id(best_genome.key)
text = ""
text += 'Population\'s average fitness: {0:3.5f} stdev: {1:3.5f}'.format(fit_mean, fit_std) + "\n"
text += 'Best fitness: {0:3.5f} - size: {1!r} - species {2} - id {3}'.format(best_genome.fitness, best_genome.size(), best_species_id, best_genome.key) + "\n"
self.txt.write(text)
self.csv.write(f"{len(population)};{best_genome.fitness};{len(species.species)}\n")
def complete_extinction(self):
self.num_extinctions += 1
text = 'All species extinct.' + "\n"
self.txt.write(text)
def found_solution(self, config, generation, best):
text = '\nBest individual in generation {0} meets fitness threshold - complexity: {1!r}'.format(self.generation, best.size()) + "\n"
self.txt.write(text)
def species_stagnant(self, sid, species):
text = ""
if self.show_species_detail:
text += "\nSpecies {0} with {1} members is stagnated: removing it".format(sid, len(species.members)) + "\n"
self.txt.write(text)
def info(self, msg):
self.txt.write(msg+"\n")
def remove_reporters(pop: neat.Population):
pop.reporters = ReporterSet()
| 4,142 | 42.610526 | 166 | py |
SGR | SGR-main/sgr/generate_robot.py | import numpy as np
from .substrates import raise_substrate_error
from evogym import is_connected, has_actuator
N_TYPES = ['empty', 'rigid', 'soft', 'hori', 'vert']
def generate_robot_3D_out(net, robot_size):
graph_out = net.activate([1997])
formated_output = np.reshape(graph_out, (robot_size, robot_size, len(N_TYPES)), "F")
robot = np.argmax(formated_output, 2)
return robot
def generate_robot_CPPN_like(net, robot_size=5):
robot = np.ones((robot_size, robot_size))
for i in range(robot_size):
for j in range(robot_size):
input = [i - (robot_size // 2), j - (robot_size // 2)]
# input = np.concatenate((input, [BIAS]))
graph_out = net.activate(input)
node = np.argmax(graph_out)
robot[i][j] = node
return robot
def generate_robot(net, robot_size, substrate_name):
if substrate_name == "cppn" or substrate_name == "CPPN":
robot = generate_robot_CPPN_like(net, robot_size)
elif substrate_name == "3D" or substrate_name == "3d":
robot = generate_robot_3D_out(net, robot_size)
else:
raise_substrate_error()
return robot
# return premade_robot()
def premade_robot():
a = [
[3, 3, 3, 3, 3],
[3, 3, 3, 3, 3],
[3, 3, 0, 3, 3],
[3, 3, 0, 3, 3],
[3, 3, 0, 3, 3],
]
r = np.asarray(a)
return r
def eval_robot_constraint(robot):
validity = is_connected(robot) and has_actuator(robot)
return validity | 1,509 | 29.2 | 88 | py |
SGR | SGR-main/sgr/sgr.py | from copy import deepcopy
from multiprocessing import TimeoutError
import multiprocess
import neat
import os
import numpy as np
import errno
import dill
import neat
import time
import neat.nn
import pathlib
import itertools
from neat.reporting import ReporterSet
from pathos.multiprocessing import ProcessPool
from hyperneat.hyperNEAT import create_phenotype_network
from sgr.custom_reporter import CustomReporter, remove_reporters
from sgr.body_speciation import CustomGenome
from sgr.substrates import morph_substrate, control_substrate
from sgr.generate_robot import generate_robot, eval_robot_constraint
from sgr.evogym_sim import simulate_env
from dynamic_env.generateJSON import create_ObstacleTraverser_JSON
class SGR:
idCounter = itertools.count().__next__
def __init__(
self,
neat_config_path,
robot_size,
spec_genotype_weight,
spec_phenotype_weight,
pop_size,
substrate_type,
save_to="",
reporters=True
):
self.id = self.idCounter()
morphology_coords = morph_substrate(robot_size, substrate_type)
self.input_size = morphology_coords.dimensions*2 + 1 # two coordinates plus the bias
self.pop_size = pop_size
self.robot_size = robot_size
self.substrate_type = substrate_type
self.save_to = save_to
CustomGenome.robot_func = lambda self, net, params: generate_robot(net, robot_size, substrate_type)
CustomGenome.substrate = morphology_coords
CustomGenome.robot_size = robot_size
CustomGenome.spec_genotype_weight = spec_genotype_weight
CustomGenome.spec_phenotype_weight = spec_phenotype_weight
self.neat_config = self.create_neat_config(neat_config_path, CustomGenome)
self.pop = neat.Population(self.neat_config)
if reporters:
self.add_reporters()
self.voxel_types = ['empty', 'rigid', 'soft', 'hori', 'vert']
self.best_fit = -10000
self.stagnation = 0
self.generation = 0
self.max_stagnation = None
self.save_gen_interval = None
def create_neat_config(self, config_path, neat_genome=neat.DefaultGenome):
neat_config = neat.Config(neat_genome, neat.DefaultReproduction, neat.DefaultSpeciesSet, neat.DefaultStagnation, config_path)
# ovewriting pop_size from the neat config file
neat_config.pop_size = self.pop_size
# overwriting the num_inputs and num_outputs from the neat config file to fit the substrate
neat_config.genome_config.num_inputs = self.input_size
neat_config.genome_config.input_keys = [-1*i for i in range(1, self.input_size+1)]
neat_config.genome_config.num_outputs = 2
neat_config.genome_config.output_keys = [1, 2]
return neat_config
def add_reporters(self):
self.pop.add_reporter(neat.StdOutReporter(True))
self.pop.add_reporter(neat.StatisticsReporter())
if self.save_to is not "":
pathlib.Path("/".join(self.save_to.split("/")[:-1])).mkdir(parents=True, exist_ok=True)
self.pop.add_reporter(CustomReporter(True, self.save_to + "_out.txt", self.save_to + "_table.csv"))
def create_child(self):
new_pop = deepcopy(self)
new_pop.id = self.idCounter()
new_pop.stagnation = 0
new_pop.generation = 0
new_pop.best_fit = -10000
new_pop.best_genome = None
new_pop.max_stagnation = None
new_pop.save_gen_interval = None
# new_pop.pop = neat.Population(self.neat_config)
new_pop.pop.reporters = ReporterSet()
new_pop.pop.generation = 0
new_pop.pop.best_genome = None
new_pop.pop.population = self.pop.population
for _, ag in new_pop.pop.population.items():
ag.fitness = None
config = new_pop.pop.config
new_pop.pop.species = self.neat_config.species_set_type(config.species_set_config, new_pop.pop.reporters)
new_pop.pop.species.speciate(config, new_pop.pop.population, 0)
stagnation = config.stagnation_type(config.stagnation_config,new_pop.pop.reporters)
new_pop.pop.reproduction = config.reproduction_type(config.reproduction_config, new_pop.pop.reporters, stagnation)
return new_pop
def single_genome_fit(
self,
genome,
n_steps,
env_name,
dynamic_env_config=None,
render=False,
save_gif=None,
):
cppn = neat.nn.FeedForwardNetwork.create(genome, self.neat_config)
if hasattr(genome, 'robot'):
robot = genome.robot
else:
design_substrate = morph_substrate(self.robot_size, self.substrate_type)
design_net = create_phenotype_network(cppn, design_substrate, output_node_idx=0)
robot = generate_robot(design_net, self.robot_size, self.substrate_type)
genome.robot = robot
if not eval_robot_constraint(robot):
return -10000, False
try:
controller_substrate = control_substrate(self.robot_size, env_name, robot, self.substrate_type)
except IndexError: # Sometimes the environment just implodes
return -10000, False
controller_net = create_phenotype_network(cppn, controller_substrate, output_node_idx=1)
reward, done = simulate_env(robot, controller_net, env_name, n_steps, dynamic_env_config, render, save_gif)
return reward, done
def fit_func_thread(self, genomes, n_steps, env_name, dynamic_env_config=None):
results_dict = {}
for genome_key, genome in genomes:
reward, _ = self.single_genome_fit(genome, n_steps, env_name, dynamic_env_config)
results_dict[genome_key] = reward
return results_dict
def fit_func(
self,
genomes,
neat_config,
env_name,
n_steps,
cpus,
dynamic_env_config = None,
):
self.stagnation += 1
try:
pool = ProcessPool(nodes=cpus)
results_map = pool.amap(
self.fit_func_thread,
np.array_split(genomes, cpus),
[n_steps for _ in range(cpus)],
[env_name for _ in range(cpus)],
[dynamic_env_config for _ in range(cpus)],
)
results = results_map.get(timeout=60*10)
fitness_dict = {}
for result_dict in results:
for k, v in result_dict.items():
fitness_dict[k] = v
for g_id, genome in genomes:
genome.fitness = fitness_dict[g_id]
if genome.fitness > self.best_fit:
self.best_fit = genome.fitness
self.stagnation = 0
self.best_genome = genome
except IOError as e: # Sometimes the environment just implodes
if e.errno == errno.EPIPE:
print("Problem with broken pipe")
else:
raise(IOError)
except multiprocess.context.TimeoutError as e:
print("Deu timeout!!!!!!")
for g_id, genome in genomes:
if genome.fitness is None:
genome.fitness = -1000
pool.terminate()
pool.clear()
surviving_genomes = {g_id: genome for g_id, genome in genomes if genome.fitness is not None and genome.fitness > -1000}
self.pop.population = surviving_genomes
self.check_stagnation_and_save_interval()
def check_stagnation_and_save_interval(self):
# print("STAGNATION: ", self.stagnation)
if self.max_stagnation is not None and self.stagnation > self.max_stagnation:
print("!!!!!!!!!!!!!!!!!!!!! POPULATION STAGNATED !!!!!!!!!!!!!!!!!!!")
if self.save_to is not "":
dill.dump(self.pop, open(self.save_to + "_pop.pkl", mode='wb'))
exit()
if self.save_to is not "" and self.save_gen_interval is not None and (self.pop.generation+1)% self.save_gen_interval == 0:
dill.dump(self.pop, open(f"{self.save_to}_pop_gen_{self.pop.generation}.pkl", mode='wb'))
def run(
self,
env_name,
n_steps,
n_gens,
cpus=1,
max_stagnation=None,
save_gen_interval=None,
print_results=True,
dynamic_env_config=None
):
self.max_stagnation = max_stagnation
self.save_gen_interval = save_gen_interval
neat_fit_func = lambda genomes, config: self.fit_func(genomes, config, env_name, n_steps, cpus, dynamic_env_config)
winner: CustomGenome = self.pop.run(neat_fit_func, n_gens)
if print_results:
print('\nBest genome:\n{!s}'.format(winner))
if self.save_to is not "":
# remove_reporters(self.pop)
dill.dump(self.pop, open(self.save_to + "_pop.pkl", mode='wb'))
return winner | 9,182 | 35.879518 | 133 | py |
SGR | SGR-main/sgr/__init__.py | 0 | 0 | 0 | py |
|
SGR | SGR-main/sgr/body_speciation.py | import neat
import numpy as np
from hyperneat.hyperNEAT import create_phenotype_network
from evogym import is_connected, has_actuator
def robot_from_genome(genome, robot_size, substrate, robot_func, config):
cppn = neat.nn.FeedForwardNetwork.create(genome, TempConfig(config))
design_net = create_phenotype_network(cppn, substrate, output_node_idx=0)
robot = robot_func(design_net, robot_size)
if not (is_connected(robot) and has_actuator(robot)):
robot = np.zeros((robot_size, robot_size))
return robot
class TempConfig:
def __init__(self, config):
self.genome_config = config
class CustomGenome(neat.DefaultGenome):
robot_size = None
substrate = None
robot_func = None
spec_genotype_weight = None
spec_phenotype_weight = None
def __init__(self, key):
super().__init__(key)
if self.robot_size is None or self.substrate is None or self.robot_func is None or self.spec_genotype_weight is None or self.spec_phenotype_weight is None:
print("Please define superparameters of CustomGenome")
raise
def distance(self, other, config):
genotype_dist = super().distance(other, config)
if not hasattr(self, 'robot'):
self.robot = robot_from_genome(self, self.robot_size, self.substrate, self.robot_func, config)
if not hasattr(other, 'robot'):
other.robot = robot_from_genome(other, self.robot_size, self.substrate, self.robot_func, config)
diff = 0
for i in range(self.robot_size):
for j in range(self.robot_size):
if (self.robot[i][j] == 0 and other.robot[i][j] != 0) or (self.robot[i][j] != 0 and other.robot[i][j] == 0):
diff += 1
elif self.robot[i][j] != other.robot[i][j]:
diff += .75
phenotype_dist = diff/(self.robot_size**2) # Normalizing between 0 and 1
return self.spec_genotype_weight*genotype_dist + self.spec_phenotype_weight*phenotype_dist
| 2,058 | 37.12963 | 163 | py |
SGR | SGR-main/sgr/evogym_sim.py | import math
from evogym import get_full_connectivity
import evogym.envs
import imageio
import numpy as np
import os
from dynamic_env.traverser import DynamicObstacleTraverser
from dynamic_env.env_config import EnvConfig
def get_env(robot, connections, env_name, dynamic_env_config:EnvConfig =None):
if env_name == "dynamic":
if dynamic_env_config is None:
print("Using JSON file")
local_dir = os.path.dirname(__file__)
json_path = os.path.join(local_dir, "../dynamic_env_v2/env.json")
env = DynamicObstacleTraverser(body=robot, connections=connections, filename=json_path)
else:
env = DynamicObstacleTraverser(body=robot, connections=connections, env_config=dynamic_env_config)
else:
env = evogym.envs.gym.make(env_name, body=robot, connections=connections)
return env
def get_obs_size(robot, env_name):
dynamic_env_config=None
if env_name == "dynamic":
dynamic_env_config=EnvConfig(10)
connections = get_full_connectivity(robot)
env = get_env(robot, connections, env_name, dynamic_env_config)
obs = env.reset()
env.close()
del env
return len(obs)
def simulate_env(robot, net, env_name, n_steps, dynamic_env_config:EnvConfig=None, render = False, save_gif=None):
connections = get_full_connectivity(robot)
env = get_env(robot, connections, env_name, dynamic_env_config)
reward = 0
obs = env.reset()
actuators = env.get_actuator_indices("robot")
in_size = math.ceil(math.sqrt(len(obs))) # this is to be used to format the input
finished = False
imgs = []
for _ in range(n_steps):
if render:
env.render('human')
elif save_gif is not None:
imgs.append(env.render(mode='img'))
obs.resize(in_size**2, refcheck=False)
action_by_actuator = net.activate(obs)
action = np.array([action_by_actuator[i] for i in actuators])
obs, r, done, _ = env.step(action)
reward += r
if done:
finished = True
break
env.close()
del env
if save_gif is not None:
imageio.mimsave(save_gif + ".gif", imgs, duration=(1/60))
return reward, finished
return reward, finished | 2,276 | 32 | 114 | py |
SGR | SGR-main/sgr/substrates.py | import itertools as it
import math
import numpy as np
from sgr.evogym_sim import get_obs_size
from hyperneat.substrate import Substrate
def raise_substrate_error():
print("Substrate type should be specified")
print("Available substrates: [cppn, 3d]")
raise
def morph_substrate(robot_size, substrate_name):
if substrate_name == "cppn" or substrate_name == "CPPN":
shape = morph_substrate_CPPN_like_shape(robot_size)
elif substrate_name == "3D" or substrate_name == "3d":
shape = morph_substrate_3D_out_shape(robot_size)
else:
raise_substrate_error()
return Substrate(shape)
def control_substrate(robot_size, env_name, robot, substrate_name):
in_size = math.ceil(math.sqrt(get_obs_size(robot, env_name)))
if substrate_name == "cppn" or substrate_name == "CPPN":
shape = control_substrate_CPPN_like_shape(robot_size, in_size)
elif substrate_name == "3D" or substrate_name == "3d":
shape = control_substrate_3D_out_shape(robot_size, in_size)
else:
raise_substrate_error()
return Substrate(shape)
def morph_substrate_3D_out_shape(robot_size):
intermediate_layer = (1+robot_size)//2
shape = [
[1,1,1,1],
[intermediate_layer, intermediate_layer, 3, 2],
[robot_size, robot_size, 5, 3],
]
return shape
def control_substrate_3D_out_shape(robot_size, in_size):
intermediate_layer = (in_size+robot_size)//2
shape = [
[in_size, in_size, 1, -1],
[intermediate_layer, intermediate_layer, 1, -2],
[robot_size, robot_size, 1, -3]
]
return shape
def morph_substrate_CPPN_like_shape(robot_size):
shape = [
[1, 2, 1],
[1, 3, 2],
[1, 4, 3],
[1, 5, 4]
]
return shape
def control_substrate_CPPN_like_shape(robot_size, in_size):
intermediate_layer = (in_size+robot_size)//2
# [intermediate_layer, intermediate_layer, -2],
shape = [
[in_size, in_size, -1],
[intermediate_layer, intermediate_layer, -2],
[robot_size, robot_size, -3]
]
return shape
| 2,110 | 28.319444 | 70 | py |
SGR | SGR-main/configs/__init__.py | 0 | 0 | 0 | py |
|
SGR | SGR-main/evaluators/poet_evaluator.py | import neat
import os
import evogym.envs
from evogym import is_connected, has_actuator, get_full_connectivity, hashable
import numpy as np
import pickle as pkl
import sys
sys.path.append('../')
from typing import List
from sgr.substrates import morph_substrate
from sgr.generate_robot import generate_robot
from sgr.sgr import SGR
from sgr.body_speciation import CustomGenome
from poet.poet import POET
from dynamic_env.env_config import EnvConfig
from pathos.multiprocessing import ProcessPool
import numpy as np
POET_DIRS = [
"new_cppn_1",
"new_3d_1",
"new_cppn_2",
"new_3d_2",
"new_cppn_3",
"new_3d_3",
]
MULT_ENV_FILES = [
"multiple_env_cppn_1",
"multiple_env_3d_1",
"multiple_env_cppn_2",
"multiple_env_3d_2",
"multiple_env_cppn_3",
"multiple_env_3d_3",
]
RESULTS_DIR = os.getcwd() + "/poet_results"
STEPS = 600
def fit_func_thread(pop, n_steps, env_name, dynamic_env_config=None):
results_dict = {}
reward, _ = pop.single_genome_fit(pop.pop.best_genome, n_steps, env_name, dynamic_env_config)
results_dict[dynamic_env_config.id] = np.round(reward, 4)
return results_dict
def multithread_eval(pop: SGR, envs: List[EnvConfig]):
cpus = len(envs)
pool = ProcessPool(nodes=5)
# winner = pop.pop.best_genome
results_map = pool.amap(
fit_func_thread,
[pop for _ in range(cpus)],
[STEPS for _ in range(cpus)],
["dynamic" for _ in range(cpus)],
envs,
)
results = results_map.get(timeout=60*10)
fit_dict = {}
for result_dict in results:
for k, v in result_dict.items():
fit_dict[k] = v
return fit_dict
class POET_TEST:
def __init__(self, test_name, envs) -> None:
self.test_name = test_name
self.csvs_dict = {}
self.envs: List[EnvConfig] = envs
self.dir_path = f"{os.getcwd()}/../checkpoints/{test_name}"
self.create_csv("global")
def evaluate_gen(self, gen):
file_path = f"{self.dir_path}/cp_{gen}.pkl"
poet_pop: POET = pkl.load(open(file_path, "rb"))
CustomGenome.robot_func = lambda self, net, config: generate_robot(net, poet_pop.run_params.robot_size)
CustomGenome.substrate = morph_substrate(poet_pop.run_params.robot_size, poet_pop.run_params.substrate_type)
CustomGenome.robot_size = poet_pop.run_params.robot_size
CustomGenome.spec_genotype_weight = poet_pop.run_params.spec_genotype_weight
CustomGenome.spec_phenotype_weight = poet_pop.run_params.spec_phenotype_weight
for p in poet_pop.pairs:
pop = p.agent_pop
results = multithread_eval(pop, envs)
print_results = f"{gen}; {p.environment.id}; {pop.best_genome.key}"
for i in range(0, len(self.envs)):
print_results += f"; {results[i]}"
self.csvs_dict["global"].write(print_results + "\n")
print(f" {p.environment.id}; {pop.best_genome.key}; {results}")
def create_csv(self, original_env_id):
csv_file = f"{RESULTS_DIR}/POET_{self.test_name}_{original_env_id}.csv"
csv = open(csv_file, "w+")
header = "gen;original_env_id;genome_id"
for e in self.envs:
header +=";env" + str(e.id) + "_fit"
csv.write(header + "\n")
self.csvs_dict[original_env_id] = csv
class MULT_ENV_TEST:
def __init__(self, test_name, envs) -> None:
self.test_name = test_name
self.csvs_dict = {}
self.envs: List[EnvConfig] = envs
self.dir_path = f"{os.getcwd()}/../multiple_env_results/{test_name}"
self.create_csv("global")
def evaluate_gen(self, gen):
file_path = f"{self.dir_path}_pop_gen_{gen}.pkl"
pop: SGR = pkl.load(open(file_path, "rb"))
CustomGenome.robot_func = lambda self, net, config: generate_robot(net, pop.robot_size)
CustomGenome.substrate = morph_substrate(pop.robot_size, pop.substrate_type)
CustomGenome.robot_size = pop.robot_size
CustomGenome.spec_genotype_weight = 1
CustomGenome.spec_phenotype_weight = 2
winner = pop.pop.best_genome
results = multithread_eval(pop, envs)
print_results = f"{gen}; 0; {winner.key}"
for i in range(0, len(self.envs)):
print_results += f"; {results[i]}"
self.csvs_dict["global"].write(print_results + "\n")
print(f" 0; {winner.key}; {results}")
def create_csv(self, original_env_id):
csv_file = f"{RESULTS_DIR}/MULT_ENV_{self.test_name}_{original_env_id}.csv"
csv = open(csv_file, "w+")
header = "gen;original_env_id;genome_id"
for e in self.envs:
header += ";env" + str(e.id) + "_fit"
csv.write(header + "\n")
self.csvs_dict[original_env_id] = csv
if __name__ == "__main__":
env0 = EnvConfig(0)
env1 = env0.create_child()
env1.mutate_barrier_h(.25)
env2 = env0.create_child()
env2.mutate_barrier_h(.25)
env3 = env1.create_child()
env3.mutate_barrier_h(.25)
env4 = env1.create_child()
env4.mutate_barrier_h(.25)
env5 = env2.create_child()
env5.mutate_barrier_h(.25)
env6 = env2.create_child()
env6.mutate_barrier_h(.25)
envs = [env0, env1, env2, env3, env4, env5, env6]
for dir, mult_env_file in zip(POET_DIRS, MULT_ENV_FILES):
print("initiating test on: ", dir)
p = POET_TEST(dir, envs)
for i in range(5, 201, 5):
print(i)
p.evaluate_gen(i)
print()
for f in p.csvs_dict.values():
f.close()
print("initiating test on: ", mult_env_file)
p = MULT_ENV_TEST(mult_env_file, envs)
for i in range(5, 201, 5):
print(i)
p.evaluate_gen(i)
print()
for f in p.csvs_dict.values():
f.close() | 5,858 | 32.672414 | 116 | py |
AACL-22 | AACL-22-main/utils/template_utils.py | import os
import pandas as pd
import os.path as path
import re
from pprint import pprint
import readtime
from jinja2 import Template
import numpy as np
from jinja2 import Template
from bs4 import BeautifulSoup
def read_json(path):
import json
with open(path) as json_file:
data = json.load(json_file)
return data
def get_arg_length(sentences):
import nltk
from nltk.tokenize import sent_tokenize
number_of_sentences = sent_tokenize(sentences)
return (len(number_of_sentences))
def wrap_paragraph(row):
return '<div class="container-box"><p span style="font-:normal">{}</p><div class="select-box">select</div></div>'.format(' '.join(row.split()))
def delete_keys_from_dict(dict_del, lst_keys):
for k in lst_keys:
try:
del dict_del[k]
except KeyError:
pass
for v in dict_del.values():
if isinstance(v, dict):
delete_keys_from_dict(v, lst_keys)
return dict_del
def shuffle_list(a_lis):
import random
random.seed(8)
return sorted(a_lis, key=lambda k: random.random())
def format_table(a_df):
from jinja2 import Environment, BaseLoader
col1 = a_df.columns[0]
col2 = a_df.columns[1]
part_1 = ' '.join(a_df[col1].tolist())
part_2 = ' '.join(a_df[col2].tolist())
html_table = '''<section class="row"> <div class="layout-one"> <h3>{{col_1}}</h3> <div> {{part_1}} </div></div><div class="layout-two"><h3>{{col_2}}</h3><div>{{part_2}}</div></div></section>'''
template = Environment(loader=BaseLoader()).from_string(html_table)
return template.render(col_1 = col1, col_2=col2, part_1= part_1, part_2 = part_2)
from bs4 import BeautifulSoup
def create_tuple(a_df):
return zip(a_df['argument'].tolist(), a_df['stance'].tolist())
def wrap(output_path_unlabeled, tupled_list):
soup = BeautifulSoup(open(output_path_unlabeled), 'html.parser')
for i, elem in enumerate(tupled_list):
for j, e in enumerate(elem):
matches =\
soup.find_all(lambda x: x.text == e[0])
for k, match in enumerate(matches):
target = match.parent.find("div", {"class": "select-box"})
# TODO 1, 2, 3
if e[1] == 'Con':
r = '1'
elif e[1] == "Pro":
r = '2'
elif e[1] == "Unknown":
r = '3'
else:
r = '4'
s = f'''<div class="container"><input type="radio" value="1-{r}" name="question-select_{str(i)+str(j)+str(k)}" class="answer_question" data-id-question="type3" required >Con</input>
<input type="radio" value="2-{r}" name="question-select_{str(i)+str(j)+str(k)}" class="answer_question" data-id-question="type3" required >Pro</input>
<input type="radio" value="3-{r}" name="question-select_{str(i)+str(j)+str(k)}" class="answer_question" data-id-question="type3" required >Unknown</input>
<input type="radio" value="4-{r}" name="question-select_{str(i)+str(j)+str(k)}" class="answer_question" data-id-question="type3" required >Neutral</input></div>'''
target.string = target.text.replace("select", s)
# fix the path
with open(output_path_unlabeled, "w") as file:
file.write(soup.prettify(formatter=None))
def create_argument(df, a_dict):
import numpy as np # Remove this for getting different results for each run
np.random.seed(123)
topic = df['topic'].tolist()[0]
df_pro = df[df['stance'] == 'Pro']
#import pdb; pdb.set_trace() # debugging starts here
rows = np.random.choice(df_pro.index.values, a_dict['arg_line_A']['pro'], replace=False)
chunk_a_1 = df_pro.loc[rows]
to_filter = chunk_a_1['index'].tolist()
updated_df = df[~df['index'].isin(to_filter)]
df_con = updated_df[updated_df['stance'] == 'Con']
rows = np.random.choice(df_con.index.values, a_dict['arg_line_A']['con'], replace=False)
chunk_a_2 = df_con.loc[rows]
to_filter = chunk_a_2['index'].tolist()
updated_df = updated_df[~updated_df['index'].isin(to_filter)]
arg_line_a = pd.concat([chunk_a_1, chunk_a_2], ignore_index=True)
arg_line_a = arg_line_a.sample(frac=1, random_state=0).reset_index(drop=True)
df_pro = updated_df[updated_df['stance'] == 'Pro']
rows = np.random.choice(df_pro.index.values, a_dict['arg_line_B']['pro'], replace=False)
chunk_b_1 = df_pro.loc[rows]
# Update the DF
to_filter_2 = chunk_b_1['index'].tolist()
updated_df = updated_df[~updated_df['index'].isin(to_filter_2)]
df_con = updated_df[updated_df['stance'] == 'Con']
rows = np.random.choice(df_con.index.values, a_dict['arg_line_B']['con'], replace=False)
chunk_b_2 = df_con.loc[rows]
# Update the DF
to_filter_2 = chunk_b_2['index'].tolist()
updated_df = updated_df[~updated_df['index'].isin(to_filter_2)]
arg_line_b = pd.concat([chunk_b_1, chunk_b_2], ignore_index=True)
arg_line_b = arg_line_b.sample(frac=1, random_state=0).reset_index(drop=True)
line_a = arg_line_a.argument.tolist()
line_b = arg_line_b.argument.tolist()
ert = readtime.of_text(' '.join([item for sublist in [line_a, line_b] for item in sublist]), wpm=260)
stances_line_a = arg_line_a.stance.tolist()
stances_line_b = arg_line_b.stance.tolist()
a = pd.DataFrame(line_a, columns=['A'])
b = pd.DataFrame(line_b, columns=['B'])
d = pd.concat([a, b], axis=1)
d['A'] = d['A'].apply(wrap_paragraph)
d['B'] = d['B'].apply(wrap_paragraph)
return format_table(d.iloc[np.random.permutation(len(d))]), topic, ert.seconds
def create_view_topic_stance(arg_data):
a_lis = [tuple([e[0],e[1]]) for e in arg_data]
final_string = []
for e in a_lis:
s = f'''<div class="container"> {e[1]}?:
<div style="margin: -2px 15px 14px">
<input type="radio" value="agree" name="personal_stance_{e[0]}" class="answer_question" data-id-question="agree" required >Agree</input>
<input type="radio" value="disagree" name="personal_stance_{e[0]}" class="answer_question" data-id-question="agree" required >Disagree</input>
<input type="radio" value="neutral" name="personal_stance_{e[0]}" class="answer_question" data-id-question="agree" required >Neutral</input>
<input type="radio" value="unknown" name="personal_stance_{e[0]}" class="answer_question" data-id-question="agree" required >Unknown</input>
</div>
</div>'''
final_string.append(s)
return " ".join(final_string)
def split_list(a_list):
l = []
half = len(a_list)//2
return a_list[:half], a_list[half:][:-1], a_list[-1]
def produce_template(topics, args, template_path, output_path):
template = Template(open(template_path).read())
idx = list(map(lambda x:x+1, list(range(0, len(topics)))))
arg_data = list(zip(idx, topics, args))
part_1, part_2, part_3 = split_list(arg_data)
with open(output_path, 'w') as f:
f.write(template.render(arg_data_0 = create_view_topic_stance(arg_data),
arg_data_1 = part_1,
arg_data_2 = part_2,
arg_data_3 = part_3))
def shuffle_df(df, n=1, axis=0):
df = df.copy()
for _ in range(n):
df.apply(np.random.shuffle, axis=axis)
return df
def create_setup(dic_list, arg_list, **kwargs):
read_time = []
args = []
topics = []
if kwargs.get('shuffling', True):
dic_list = shuffle_list(dic_list)
else:
pass
for x, y in zip(arg_list, dic_list):
try:
output, topic, ert = create_argument(x, y)
args.append(output)
topics.append(topic)
read_time.append(ert)
except ValueError:
pass
return topics, args
def t_tuple(e):
l = []
for i in e:
l.append(abs(i['pro']-i['con']))
return tuple(l)
| 8,250 | 31.105058 | 198 | py |
AACL-22 | AACL-22-main/utils/utils.py | import pandas as pd
import numpy as np
from nltk.metrics.agreement import AnnotationTask
from nltk.metrics import interval_distance, binary_distance
import datetime
from matplotlib import pyplot as plt
import seaborn as sns
import operator
from subprocess import PIPE, run
import pathlib
sns.set_style("darkgrid")
def get_alpha(triplet):
# print('-> Krippendorff\'s alpha: {:.8f}'.format(AnnotationTask(triplet, distance = interval_distance).alpha()))
return AnnotationTask(triplet, distance = interval_distance).alpha()
def removekey(d, key):
r = dict(d)
del r[key]
return r
def transform_dict(a_dict):
keys = [str(e[0]) for e in list(a_dict.keys())]
values = list(a_dict.values())
return removekey(dict(zip(keys, values)), 'False')
def get_summary_choice(data):
return pd.DataFrame(data).fillna(0).T
def conf_mat(y_true, y_pred, title):
import seaborn as sns
import matplotlib.pyplot as plt
from sklearn.metrics import confusion_matrix
cm = confusion_matrix(y_true, y_pred)
ax= plt.subplot()
sns.heatmap(cm, annot=True, ax = ax, fmt='d', cmap="OrRd") #annot=True to annotate cells
# labels, title and ticks
ax.set_xlabel('Predicted labels')
ax.set_ylabel('True labels')
ax.set_title(title)
ax.xaxis.set_ticklabels(['Arg A', 'Arg B', 'None'])
return ax.yaxis.set_ticklabels(['Arg A', 'Arg B', 'None'])
def get_alpha(col1, col2, col3, batch):
triplet = list(zip(batch.id_worker, [col1] * batch.shape[0], batch[col1])) + list(zip(batch.id_worker, [col2] * batch.shape[0], batch[col2])) + list(zip(batch.id_worker, [col3] * batch.shape[0], batch[col3]))
return AnnotationTask(triplet, distance = interval_distance).alpha()
def get_barchart(summary, title, caption, batch):
ax= summary.plot(kind='bar', rot=0)
plt.title(label=title)
plt.figtext(0.5, 0.01, caption, wrap=True, horizontalalignment='center', fontsize=12)
for p in ax.patches:
percentage = '{:.1f}%'.format(100 * p.get_height()/float(len(batch)))
x = p.get_x() + p.get_width()
y = p.get_height()
ax.annotate(percentage, (x, y),ha='center')
plt.show()
def validate_time(arr):
return 'approved' if arr[ arr >= 180 ].size >= 4 else 'internal reject' if arr[ arr >= 90 ].size >= 4 else 'rejected'
def validate_answers(a_df, interval_1, interval_2):
a_lis = []
for e in a_df.index:
a_lis.append(validate_time(a_df.iloc[e].to_numpy()[interval_1:interval_2]))
#assign
a_df['quality_control'] = a_lis
return a_df
def transform_one_hot_df(a_df, a_lis_of_cols, col_name):
age_groups_df = a_df[a_lis_of_cols]
age_groups_df = age_groups_df.set_index('id_worker')
age_groups_df = age_groups_df.dot(age_groups_df.columns).to_frame(col_name).reset_index()
age_groups_df[col_name] = age_groups_df[col_name].apply(lambda x : ''.join(x.split('.')[-1:]))
return age_groups_df
def read_json(path):
import json
with open(path) as json_file:
data = json.load(json_file)
return data
def get_mace_competence(a_df, hit_id):
pd.options.mode.chained_assignment = None # default='warn'
# Section 1
part_1 = a_df[['id_worker', 'question1_1_1.arg_a', 'question1_1_1.arg_b', 'question1_1_1.none']]
part_2 = a_df[['id_worker', 'question1_1_2.arg_a', 'question1_1_2.arg_b', 'question1_1_2.none']]
# Section 2
part_3 = a_df[['id_worker', 'question1_1_2.arg_a', 'question1_1_2.arg_b', 'question1_1_2.none']]
#Section 3
# 1. Which text has more pro stances (paragraphs that agree with the topic)?
part_4 = a_df[['id_worker', 'question2_1_3.val1', 'question2_1_3.val2', 'question2_1_3.val3']]
# 2. Which text has more con stances (paragraphs that disagree with the topic)?
part_5 = a_df[['id_worker', 'question2_2_3.val1', 'question2_2_3.val2', 'question2_2_3.val3']]
# 3. Which text is more one-sided?
part_6 = a_df[['id_worker', 'question2_3_3.val1', 'question2_3_3.val2', 'question2_3_3.val3']]
# 4. How sure you are?
part_7 = a_df[['id_worker', 'question2_4_3.val1', 'question2_4_3.val2', 'question2_4_3.val3']]
#section 4
# 1. Which text has more pro stances (paragraphs that agree with the topic)?
part_8 = a_df[['id_worker', 'question3_3_4.val1', 'question3_3_4.val2', 'question3_3_4.val3']]
# 2. Which text has more con stances (paragraphs that disagree with the topic)?
#section 5
# 1. We believe that A is more one-sided, are you agree?
part_9 = a_df[['id_worker', 'question1_1_repeatedLabeled1.no',
'question1_1_repeatedLabeled1.none', 'question1_1_repeatedLabeled1.yes']]
# 2. How sure you are?
part_10 = a_df[['id_worker', 'question1_2_repeatedLabeled1.val1',
'question1_2_repeatedLabeled1.val2', 'question1_2_repeatedLabeled1.val3']]
mace_data_format_path = f"../data/crowdsourced/mace_temp/{hit_id}_mace_data.csv"
competence_path = "../scripts/competence"
mace_data = pd.concat([part_1, part_2, part_3, part_4,
part_5, part_6, part_7, part_8,
part_9, part_10], axis=1).T.drop_duplicates()
mace_data = mace_data.rename(columns=mace_data.iloc[0]).drop(mace_data.index[0])
mace_cols = mace_data.columns
mace_data.to_csv(mace_data_format_path, index=False, header=False)
command = ['java', '-jar', '../MACE.jar', mace_data_format_path]
result = run(command, stdout=PIPE, stderr=PIPE, universal_newlines=True)
mace_competence = pd.DataFrame({"id_worker":mace_cols,
"mace_competence":list(np.loadtxt("../scripts/competence"))})
pathlib.Path("../scripts/competence").unlink()
pathlib.Path("../scripts/prediction").unlink()
time = a_df[["id_worker", "start_time",
"submit_time", "time_elapsed_1",
"time_elapsed_2", "time_elapsed_3",
"time_elapsed_id_4", "time_elapsed_last"]]
time['start_time'] = pd.to_datetime(time['start_time'], infer_datetime_format=True)
time['submit_time'] = pd.to_datetime(time['submit_time'], infer_datetime_format=True)
time['δ_minutes'] = (time['submit_time'] - time['start_time']).astype("timedelta64[m]")
output = pd.concat([time, mace_competence], axis=1).T.drop_duplicates().T
output = output.drop(['start_time', 'submit_time'], axis=1)
return output
def plot_mace_time_corr(a_df):
import seaborn as sns
corr = a_df[['mace_competence', 'δ_minutes']].astype(float).corr()
heatmap =sns.heatmap(corr, annot = True, fmt='.2g',cmap= 'coolwarm')
return heatmap.set_title('Correlation Heatmap', fontdict={'fontsize':18}, pad=12);
def get_krippendorff(a_df):
from simpledorff import calculate_krippendorffs_alpha_for_df
a_lis = list(a_df.columns)
data = transform_one_hot_df(a_df, a_lis,'annotation')
data['document_id'] = a_lis[1].split('.')[0]
return calculate_krippendorffs_alpha_for_df(data,
experiment_col='document_id',
annotator_col='id_worker',
class_col='annotation')
def create_format_for_quica(a_df):
a_df = a_df.T
a_df.rename(columns=a_df.iloc[0], inplace = True)
a_df.drop(a_df.index[0], inplace = True)
a_df = a_df*1
data = a_df.values.T
dataframe = pd.DataFrame({
"coder_0" : data[0].tolist(),
"coder_1" : data[1].tolist(),
"coder_2" : data[2].tolist(),
"coder_3" : data[3].tolist(),
"coder_4" : data[4].tolist(),
"coder_5" : data[5].tolist(),
"coder_6" : data[6].tolist(),
"coder_7" : data[7].tolist(),
"coder_8" : data[8].tolist()})
return dataframe
| 7,908 | 32.231092 | 212 | py |
NFLPlayPrediction | NFLPlayPrediction-master/main.py | import random
from machine_learning.classification import compare_classification_parameters
from machine_learning.neural_network_prediction import neural_network_prediction
from machine_learning.regression import compute_regression_results
from preprocessing.analysis import apply_pca, apply_kernel_pca, apply_anova_f_value_test, \
apply_variance_threshold_selection, plot_progress_measure
from preprocessing.features import extract_features
random.seed(0) # keep seed fixed for reproducibility
def use_classification(data, config, target_name='success'):
compare_classification_parameters(data['categorical_features'], data[target_name], config)
def use_regression(data, config, target_name='progress'):
compute_regression_results(data['categorical_features'], data[target_name], target_name, config)
#features, labels, yards, progress = get_team_features("NO", features, labels, "team")
#compute_regression_results(features, yards, "./regression_results_team_NO_yards")
#compute_regression_results(features, progress, "./regression_results_team_NO_progress")
def use_neural_networks(data, config, measure='progress', layer_type='tanh'):
neural_network_prediction(data=data,
config=config,
team='all',
measure=measure,
load_previous=True,
layer_type=layer_type)
def __main__():
config = {
'start_year': 2009,
'end_year': 2014,
'predict_yards': True,
'predict_progress': True,
'predict_success': True,
'prediction_method_success': 'classification',
'prediction_method_yards': 'regression',
'prediction_method_progress': 'regression',
'use_neural_networks': True,
'neural_net_config': {
'k_fold': 5,
'epochs': [1],
'hidden_layers': [1],
'hidden_units': [1],
'load_previous': True,
'tanh': True,
'sigmoid': True,
'linear': True
},
'grid_search': False
}
print config
print 'getting data'
data = extract_features(start_year=config['start_year'], end_year=config['end_year'])
print 'finished getting data'
print 'starting prediction'
for measure in ['success', 'yards', 'progress']:
if config['predict_%s' % measure]:
print 'predicting %s measure' % measure
if config['prediction_method_%s' % measure] == 'regression':
use_regression(data, config, measure)
else:
use_classification(data, config, measure)
if config['use_neural_networks']:
for layer_type in ['tanh', 'sigmoid', 'linear']:
if config['neural_net_config'][layer_type]:
use_neural_networks(data, config, measure=measure, layer_type=layer_type)
apply_pca(data['categorical_features'])
apply_kernel_pca(data['categorical_features'], data['success'])
apply_anova_f_value_test(data['categorical_features'], data['success'], data['encoder'])
apply_variance_threshold_selection(data['categorical_features'], data['success'], data['encoder'])
plot_progress_measure()
__main__()
| 3,326 | 36.382022 | 102 | py |
NFLPlayPrediction | NFLPlayPrediction-master/postprocessing/evaluate.py | from __future__ import division
import math
import os
from collections import defaultdict
import matplotlib.pyplot as plt
import numpy as np
from sklearn import tree
from sklearn.metrics import confusion_matrix as confusion_matrix_func
from sklearn.model_selection import KFold
def predict_superbowl(encoder, classifier):
# Predict result for play
"""Superbowl 49 example: The Seahawks decided to pass the football from the 1 yard line, ending in an interception.
We let the estimator predict this specific play to judge the quality of the actual play call by Seattle's coaches.
"""
for p in [0, 1]:
for side in ['left', 'middle', 'right']:
X = defaultdict(float)
X['team'] = "SEA"
X['opponent'] = "NE"
X['time'] = 26
X['position'] = 1
X['half'] = 2
X['togo'] = 1
X['shotgun'] = 1
X['pass'] = p
if p == 1:
X['passlen'] = 'short'
X['side'] = side
X['qbrun'] = 0
X['down'] = 2
X = encoder.transform(X)
y_pred = classifier.predict(X)
print p, side, y_pred
return y_pred
# Evaluate classifier
def classifier_evaluate(classifier, features, labels, k=5):
confusion_matrix = [[0, 0], [0, 0]]
k_fold = KFold(len(labels), n_folds=k)
for train_index, test_index in k_fold:
X_train, X_test = features[train_index], features[test_index]
y_train, y_test = labels[train_index], labels[test_index]
y_pred = classifier.fit(X_train, y_train).predict(X_test)
confusion_matrix = confusion_matrix + confusion_matrix_func(y_test, y_pred)
return confusion_matrix
def get_stats_from_confusion_matrix(confusion_matrix):
print confusion_matrix
recall = 0 if confusion_matrix[1][1] == 0 else confusion_matrix[1][1] / \
(confusion_matrix[1][1] + confusion_matrix[1][0])
precision = 0 if confusion_matrix[1][1] == 0 else confusion_matrix[1][1] / \
(confusion_matrix[1][1] + confusion_matrix[0][1])
accuracy = 0 if confusion_matrix[1][1] == 0 and confusion_matrix[0][0] == 0 else \
(confusion_matrix[0][0] + confusion_matrix[1][1]) / \
(confusion_matrix[0][0] + confusion_matrix[0][1] + confusion_matrix[1][0] + confusion_matrix[1][1])
f1 = 0 if precision == 0 or recall == 0 else (2 * precision * recall) / (precision + recall)
return recall, precision, accuracy, f1
# Evaluate classifier and return recall, precision, accuracy
def classifier_evaluate_percents(classifier, features, labels, k=5):
confusion_matrix = [[0, 0], [0, 0]]
k_fold = KFold(n_splits=k)
for train_index, test_index in k_fold.split(features):
X_train, X_test = features[train_index], features[test_index]
y_train, y_test = labels[train_index], labels[test_index]
y_pred = classifier.fit(X_train, y_train).predict(X_test)
confusion_matrix = confusion_matrix + confusion_matrix_func(y_test, y_pred)
recall, precision, accuracy, f1 = get_stats_from_confusion_matrix(confusion_matrix)
return recall, precision, accuracy, f1
# Evaluate regression estimator
def regression_evaluate(classifier, features, labels, k=5):
abs_diffs = []
mse_diffs = []
k_fold = KFold(len(labels), n_folds=k)
for train_index, test_index in k_fold:
X_train, X_test = features[train_index], features[test_index]
y_train, y_test = labels[train_index], labels[test_index]
classifier.fit(X_train, y_train)
y_pred = classifier.predict(X_test)
for idx in range(len(y_pred)):
d = abs(y_pred[idx] - y_test[idx])
abs_diffs.append(d)
d = d * d
mse_diffs.append(d)
avg_abs_diff = sum(abs_diffs) / len(abs_diffs)
avg_mse_diff = math.sqrt(sum(mse_diffs) / len(mse_diffs))
print "MAE:",
print("%.4f" % avg_abs_diff),
print '/ RMSE:',
print ("%.4f" % avg_mse_diff)
return abs_diffs, mse_diffs, avg_abs_diff, avg_mse_diff
# Create a plot of the confusion matrix
def plot_confusion_matrix(cm):
plt = create_confusion_matrix_plot(cm)
plt.show()
def save_confusion_matrix(confusion_matrix, file_path):
plt = create_confusion_matrix_plot(confusion_matrix)
plt.savefig(file_path)
def create_confusion_matrix_plot(confusion_matrix):
confusion_matrix_normalized = confusion_matrix.astype('float') / confusion_matrix.sum(axis=1)[:, np.newaxis] * 100.0
plt.matshow(confusion_matrix_normalized)
width = len(confusion_matrix)
height = len(confusion_matrix[0])
for x in xrange(width):
for y in xrange(height):
plt.gca().annotate("{:5.2f} %".format(confusion_matrix_normalized[x][y]), xy=(y, x),
horizontalalignment='center',
verticalalignment='center')
tick_labels = ['Fail', 'Success']
plt.xticks(range(width), tick_labels)
plt.yticks(range(height), tick_labels)
plt.title('Normalized confusion matrix')
plt.colorbar()
plt.ylabel('True label')
plt.xlabel('Predicted label')
return plt
# Create a plot of the decision tree
def plot_tree(classifier, feature_names):
tree.export_graphviz(classifier, out_file='tree.dot', class_names=['Fail', 'Success'], feature_names=feature_names)
os.system("dot -Tpng tree.dot -o tree.png")
os.system("tree.png")
| 5,550 | 36.255034 | 120 | py |
NFLPlayPrediction | NFLPlayPrediction-master/postprocessing/__init__.py | from evaluate import *
| 23 | 11 | 22 | py |
NFLPlayPrediction | NFLPlayPrediction-master/machine_learning/classification.py | from __future__ import division
from collections import Counter
from random import random
from sklearn import tree
from sklearn.discriminant_analysis import LinearDiscriminantAnalysis
from sklearn.linear_model import SGDClassifier
from sklearn.model_selection import GridSearchCV, cross_val_score
from sklearn.neighbors.nearest_centroid import NearestCentroid
from sklearn.svm import LinearSVC
from sklearn.svm import SVC
from postprocessing.evaluate import classifier_evaluate_percents
'''
estimator = the SVM you wish to use to classify the data
features = a (sample size) x (features) array containing the feature vectors of the data
true_labels = an array of the correct classification for each of the sample feature vectors
kfold = the number of folds to use in the cross validation. Defaults to 5 fold if not specified.
Returns (mean, standard deviation) of the provided estimator's accuracy using kfold validation,
and utilizes all available CPUs for the training and validation.
'''
result_file_name = './results/classifier_results.txt'
def write_result_stats_to_file(file_name, estimator_name, recall, precision, accuracy, f1):
output = open(file_name, 'a')
print >> output, "**********************************"
print >> output, estimator_name
print >> output, "Recall:", recall * 100, '%'
print >> output, "Precision:", precision * 100, '%'
print >> output, "Accuracy:", accuracy * 100, '%'
print >> output, "F1:", f1 * 100, '%'
print >> output, "**********************************"
output.flush()
output.close()
def write_search_results_to_file(file_name, estimator_name, search):
output = open(file_name, 'a')
print >> output, estimator_name
print >> output, search.best_estimator_
print >> output, ""
print >> output, "Parameters:"
print >> output, search.best_params_
print >> output, ""
print >> output, "Score:"
print >> output, search.best_score_
print >> output, "Grid Scores:"
print >> output, search.grid_scores_
output.close()
output.flush()
def test_classifier(estimator, features, true_labels, kfold=5):
scores = cross_val_score(estimator, features, true_labels, cv=kfold, n_jobs=-2, verbose=1)
return scores.mean(), scores.std()
def under_sample(vector, true_labels):
counts = Counter(true_labels)
total = len(true_labels)
proportion = {}
for label in counts:
proportion[label] = 0.00 + counts[label] / total
min_prop = min(proportion.values())
weights = {}
for label in counts:
weights[label] = min_prop / proportion[label]
balanced_dataset = []
new_labels = []
for idx, label in enumerate(true_labels):
if random() < weights[label]:
new_labels.append(label)
balanced_dataset.append(vector[idx])
print Counter(new_labels)
return balanced_dataset, new_labels
def compare_classification_parameters(features, labels, config):
#TODO Consider replacing the above with FeatureHasher for faster computation?
#linsvm = LinearSVC(C=0.03125)
#linsvm.fit(vectorized_features, labels)
#rbfsvm = SVC(C=2048, kernel='rbf', gamma=pow(2, -17))
#rbfsvm.fit(vectorized_features, labels)
estimators = [
#(LinearDiscriminantAnalysis(), "SVD LDA"),
(LinearDiscriminantAnalysis(solver='lsqr', shrinkage='auto'), "LSQR LDA"),
(LinearDiscriminantAnalysis(solver='eigen', shrinkage='auto'), "Eigenvalue Decomposition LDA"),
(SGDClassifier(), "SGDC"),
(tree.DecisionTreeClassifier(class_weight='balanced', max_depth=10), "Decision Tree"),
(NearestCentroid(), "NearestCentroid"),
#(linsvm, "Linear SVM"),
#(rbfsvm, "RBF SVM, C=2048, Gamma= 2^-17")
]
for (estimator, estimator_name) in estimators:
print 'using %s' % estimator_name
(recall, precision, accuracy, f1) = classifier_evaluate_percents(estimator, features, labels)
write_result_stats_to_file(result_file_name, estimator_name, recall, precision, accuracy, f1)
if config['grid_search']:
linear_svm_params = {'C': [pow(2, x) for x in range(-5, 15, 2)]}
search = GridSearchCV(LinearSVC(class_weight='balanced'), linear_svm_params, cv=3, n_jobs=-1, verbose=1)
search.fit(features, labels)
write_search_results_to_file(result_file_name, "Linear SVM Best Estimator", search)
rbf_parameters = {
'C': [pow(2, x) for x in range(-5, 17, 2)], # Possible error weights for the SVM.
'gamma': [pow(2, x) for x in range(-17, 4, 2)] # Possible gamma values for the SVM.
}
search = GridSearchCV(SVC(class_weight='balanced'), rbf_parameters, cv=3, n_jobs=-1, verbose=1)
search.fit(features, labels)
write_search_results_to_file(result_file_name, "RBF SVM Best Estimator", search)
| 4,865 | 38.560976 | 112 | py |
NFLPlayPrediction | NFLPlayPrediction-master/machine_learning/neural_network_prediction.py | import os
import pickle
import numpy as np
from pybrain.datasets import SupervisedDataSet, ClassificationDataSet
from pybrain.structure import SigmoidLayer, LinearLayer
from pybrain.structure import TanhLayer
from pybrain.supervised.trainers import BackpropTrainer
from pybrain.tools.shortcuts import buildNetwork
from sklearn.metrics import confusion_matrix
from sklearn.metrics import mean_squared_error, mean_absolute_error
from sklearn.model_selection import KFold
from postprocessing.evaluate import save_confusion_matrix
def neural_network_prediction(data, config, measure, team='all',
layer_type='tanh', load_previous=True):
regression_task = config['prediction_method_%s' % measure] == 'regression'
# k-fold cross-validation
k_fold = KFold(n_splits=config['neural_net_config']['k_fold'])
model_directory = 'machine_learning/trained_models/' + team
if not os.path.isdir(model_directory):
os.mkdir(model_directory)
suffix = '%s_%s_%s_%s' % (config['neural_net_config']['epochs'],
config['neural_net_config']['hidden_layers'],
config['neural_net_config']['hidden_units'],
layer_type)
result_file_name = "results/neural_networks_" + measure + suffix + ".txt"
output_file = open(result_file_name, "w")
if regression_task:
output_file.write('epochs & hidden layers & hidden units & hidden class & RMSE(all) & MAE(all)\\\\ \n')
else:
output_file.write(
'epochs & hidden layers & hidden units & hidden class & accuracy & precision & recall \\\\ \n')
hidden_class = SigmoidLayer
if 'layer_type' == 'tanh':
hidden_class = TanhLayer
if 'layer_type' == 'linear':
hidden_class = LinearLayer
for number_of_epochs in config['neural_net_config']['epochs']:
for number_of_hidden_layers in config['neural_net_config']['hidden_layers']:
for number_of_hidden_units in config['neural_net_config']['hidden_units']:
configuration = {'epochs': number_of_epochs,
'layers': number_of_hidden_layers,
'units': number_of_hidden_units,
'class': layer_type}
predictions = np.array([])
# try:
for i in range(1):
cross_val_index = 1
for train_index, test_index in k_fold.split(data['categorical_features']):
train_x, test_x = np.array(data['categorical_features'][train_index]), np.array(data['categorical_features'][test_index])
train_y, test_y = np.array(data[measure][train_index]), np.array(data[measure][test_index])
ds, number_of_features = initialize_dataset(regression_task, train_x, train_y)
file_name = model_directory + '/' + measure + '_' + layer_type + \
'_epochs=%d_layers=%d_units=%d_part=%d.pickle' % \
(number_of_epochs, number_of_hidden_layers, number_of_hidden_units, cross_val_index)
net = build_and_train_network(load_previous, file_name, ds, number_of_features,
number_of_epochs, number_of_hidden_layers, number_of_hidden_units,
hidden_class)
predictions = np.concatenate((predictions, predict(net, test_x)))
cross_val_index += 1
evaluate_accuracy(predictions, data[measure], regression_task, result_file_name, output_file, configuration)
# except:
# pass
output_file.close()
def initialize_dataset(regression_task, train_x, train_y):
number_of_features = train_x.shape[1]
if regression_task:
ds = SupervisedDataSet(number_of_features, 1)
else:
ds = ClassificationDataSet(number_of_features, nb_classes=2, class_labels=['no success', '1st down or TD'])
ds.setField('input', train_x)
ds.setField('target', train_y.reshape((len(train_y), 1)))
return ds, number_of_features
def build_and_train_network(load_previous, file_name, ds, number_of_features,
number_of_epochs, number_of_hidden_layers, number_of_hidden_units, hidden_class):
if load_previous:
print 'trying to load previously trained network'
try:
with open(file_name, 'r') as net_file:
net = pickle.load(net_file)
load_failed = False
print 'succeed to load previously trained network'
except:
load_failed = True
print 'failed to load previously trained network'
if (not load_previous) or load_failed:
print 'creating new network'
# define number of units per layer
layers = [number_of_features]
layers.extend([number_of_hidden_units] * number_of_hidden_layers)
layers.append(1)
# Build Neural Network
net = buildNetwork(
*layers,
bias=True,
hiddenclass=hidden_class,
outclass=LinearLayer
)
trainer = BackpropTrainer(net, ds, learningrate=0.01, lrdecay=1.0, momentum=0.0, weightdecay=0.0, verbose=True)
trainer.trainUntilConvergence(maxEpochs=number_of_epochs)
print 'trained new network'
with open(file_name, 'w') as net_file:
pickle.dump(net, net_file)
print 'saved new network to file ' + file_name
return net
def predict(net, feature_vectors):
predictions = []
for x in feature_vectors:
predictions.append(net.activate(x)[0])
return np.array(predictions)
def evaluate_accuracy(predictions, labels, regression_task, output_file_name, output_file, configuration):
if regression_task:
evaluate_regression(predictions, labels, output_file, configuration)
else:
evaluate_classification(predictions, labels, output_file_name, output_file, configuration)
def evaluate_classification(predictions, labels, output_file_name, output_file, configuration):
print labels[:10], [0 if p < 0.5 else 1 for p in predictions[:10]]
cm = confusion_matrix(labels, [0 if p < 0.5 else 1 for p in predictions])
print cm, cm[0][0], cm[0][1], cm[1][0], cm[1][1], float(cm[0][0] + cm[0][1] + cm[1][0] + cm[1][1])
recall = float(cm[1][1]) / float(cm[1][1] + cm[1][0]) \
if cm[1][1] + cm[1][0] > 0 else 0
precision = float(cm[1][1]) / float(cm[1][1] + cm[0][1]) \
if cm[1][1] + cm[0][1] > 0 else 0
accuracy = float(cm[0][0] + cm[1][1]) / float(cm[0][0] + cm[0][1] + cm[1][0] + cm[1][1]) \
if cm[0][0] + cm[0][1] + cm[1][0] + cm[1][1] > 0 else 0
f1 = float(2 * precision * recall) / float(precision + recall) if precision + recall > 0 else 0
save_confusion_matrix(cm, output_file_name[:-7] + '.png')
# format output for LaTeX
output_file.write('%d & %d & %d & %s & %f & %f & %f & %f \\\\ \n' %
(configuration['epochs'], configuration['layers'], configuration['units'], configuration['class'],
accuracy, precision, recall, f1))
def evaluate_regression(predictions, labels, output_file, configuration):
# format output for LaTeX
output_file.write('%d & %d & %d & %s & %f & %f \\\\ \n' %
(configuration['epochs'], configuration['layers'], configuration['units'], configuration['class'],
mean_squared_error(labels, predictions) ** 0.5, mean_absolute_error(labels, predictions)))
| 7,740 | 42.982955 | 145 | py |
NFLPlayPrediction | NFLPlayPrediction-master/machine_learning/regression.py | from __future__ import division
from sklearn import tree
from sklearn.linear_model import LinearRegression
from sklearn.model_selection import GridSearchCV
from sklearn.svm import SVR
from postprocessing.evaluate import regression_evaluate
'''
estimator = the SVM you wish to use to classify the data
features = a (sample size) x (features) array containing the feature vectors of the data
targets = an array of the correct classification for each of the sample feature vectors
k_fold = the number of folds to use in the cross validation. Defaults to 5 fold if not specified.
Returns (mean, standard deviation) of the provided estimator's accuracy using kfold validation,
and utilizes all available CPUs for the training and validation.
'''
result_file_name = './results/regression_results_%s.txt'
pkl_file_name = './results/regression_%s.pkl'
def write_result_stats_to_file(file_name, target_name, estimator_name, average_difference, average_mse_difference):
output = open(file_name % target_name, 'a')
print >> output, "**********************************"
print >> output, estimator_name
print >> output, "Average Difference from Goal: ", average_difference
print >> output, "Average MSE from Goal: ", average_mse_difference
print >> output, "**********************************"
output.flush()
output.close()
def grid_search_rbf_parameters(vectorized_features, targets, file_name, target_name, estimator_name,
c_values, gamma_values, k_fold=5, scoring="mean_squared_error"):
output = open(file_name % target_name, 'a')
print >> output, "**********************************"
print >> output, "Searching for SVR RBF Parameters"
rbf_parameters = {'C': c_values, 'gamma': gamma_values}
search = GridSearchCV(SVR(), rbf_parameters, cv=k_fold, n_jobs=-1, verbose=1, scoring=scoring)
search.fit(vectorized_features, targets)
print >> output, "%s Best Estimator:" % estimator_name
print >> output, search.best_estimator_
print >> output, "Best Parameters: ", search.best_params_
print >> output, "Mean-Squared-Error Score: ", search.best_score_
print >> output, "Grid Scores:"
print >> output, search.cv_results_
output.flush()
output.close()
def compute_regression_results(features, targets, target_name, config):
vectorized_features = features
#TODO Consider replacing the above with FeatureHasher for faster computation?
estimators = [
(LinearRegression(normalize=True), "LinearRegression"),
#(SVR(C=128, kernel='rbf', gamma=pow(2, -1)), "RBF SVR, C=128, Gamma= 2^-1"),
(tree.DecisionTreeRegressor(max_depth=10), 'Decision Tree')
]
for (estimator, estimator_name) in estimators:
print 'using %s' % estimator_name
abs_diff, mse_diff, avg_diff, avg_mse_diff = regression_evaluate(estimator, vectorized_features, targets)
write_result_stats_to_file(result_file_name, target_name, estimator_name, avg_diff, avg_mse_diff)
#joblib.dump(estimator, pkl_file_name % estimator_name)
if config['grid_search']:
grid_search_rbf_parameters(vectorized_features=vectorized_features, targets=targets, file_name=result_file_name,
target_name=target_name, estimator_name='RBF SVR',
c_values=[pow(2, x) for x in range(-5, 17, 2)],
gamma_values=[pow(2, x) for x in range(-17, 9, 2)],
k_fold=5, scoring="mean_squared_error")
| 3,594 | 46.302632 | 120 | py |
NFLPlayPrediction | NFLPlayPrediction-master/machine_learning/__init__.py | from classification import *
from neural_network_prediction import *
from regression import * | 93 | 30.333333 | 39 | py |
NFLPlayPrediction | NFLPlayPrediction-master/machine_learning/trained_models/__init__.py | 0 | 0 | 0 | py |
|
NFLPlayPrediction | NFLPlayPrediction-master/preprocessing/features.py | # Load games
from __future__ import division
import nflgame
# Extract features
import re
from collections import defaultdict
import numpy as np
from sklearn.feature_extraction import DictVectorizer
def extract_features(start_year, end_year):
play_features = []
success_labels = []
yard_labels = []
progress_labels = []
success_cnt = 0
for year in range(start_year, end_year + 1):
# split into individual weeks in order to avoid having to load
# large chunks of data at once
for week in range(1, 18):
games = nflgame.games(year, week=week)
for play in nflgame.combine_plays(games):
features = defaultdict(float)
success = 0
yards = 0
progress = 0
desc = ''
# TODO: include sacks? probably not since we can't assign them to any play option
# TODO: Additonally maybe even booth review, official timeout?
# TODO: Fumble plays should count as if Fumble didn't happen?
# TODO: plays with declined penalties should be counted ((4:52) A.Foster right tackle to HOU 43 for 13 yards (J.Cyprien). Penalty on JAC-S.Marks, Defensive Offside, declined.)
# TODO: plays with accepted penalties that do not nullify the play should be counted (keyword: No Play)
# TODO: error with group when using 2013
# TODO: Should we count Def. Pass Interference? Def. Holding?
if (play.note == None or play.note == 'TD' or play.note == 'INT') \
and (' punt' not in play.desc) \
and ('END ' != play.desc[:4]) \
and ('End ' != play.desc[:4]) \
and ('Two-Minute Warning' not in play.desc) \
and ('spiked the ball to stop the clock' not in play.desc) \
and ('kneels to ' not in play.desc) \
and ('Delay of Game' not in play.desc) \
and (play.time is not None) \
and ('Penalty on' not in play.desc) \
and ('Delay of Game' not in play.desc) \
and ('sacked at' not in play.desc) \
and ('Punt formation' not in play.desc) \
and ('Direct snap to' not in play.desc) \
and ('Aborted' not in play.desc) \
and ('temporary suspension of play' not in play.desc) \
and ('TWO-POINT CONVERSION ATTEMPT' not in play.desc) \
and ('warned for substitution infraction' not in play.desc) \
and ('no play run - clock started' not in play.desc) \
and ('challenged the first down ruling' not in play.desc) \
and ('*** play under review ***' not in play.desc) \
and ('Direct Snap' not in play.desc) \
and ('Direct snap' not in play.desc):
features['team'] = play.team
if play.drive.game.away == play.team:
features['opponent'] = play.drive.game.home
else:
features['opponent'] = play.drive.game.away
timeclock = play.time.clock.split(':')
features['time'] = float(timeclock[0]) * 60 + float(timeclock[1])
if (play.time.qtr == 1) or (play.time.qtr == 3):
features['time'] += 15 * 60
if play.time.qtr <= 2:
features['half'] = 1
else:
features['half'] = 2
features['position'] = 50 - play.yardline.offset
features['down'] = play.down
features['togo'] = play.yards_togo
if 'Shotgun' in play.desc:
features['shotgun'] = 1
else:
features['shotgun'] = 0
full_desc = play.desc
full_desc = full_desc.replace('No. ', 'No.')
while re.search(r" [A-Z]\. ", full_desc) is not None:
match = re.search(r" [A-Z]\. ", full_desc).group(0)
full_desc = full_desc.replace(match, match.rstrip())
if re.search(r"[^\.] \(Shotgun\)", full_desc) is not None:
full_desc = full_desc.replace(" (Shotgun)", ". (Shotgun)")
full_desc = full_desc.replace('.(Shotgun)', '. (Shotgun)')
if re.search(r" a[st] QB for the \w+ ", full_desc) is not None:
match = re.search(r" a[st] QB for the \w+ ", full_desc).group(0)
full_desc = full_desc.replace(match, match.rstrip() + '. ')
if re.search(r"New QB.{0,20}[0-9]+ \w+?\.w+? ", full_desc) is not None:
match = re.search(r"New QB.{0,20}[0-9]+ \w+?\.w+? ", full_desc).group(0)
full_desc = full_desc.replace(match, match.rstrip() + '. ')
if re.search(r"New QB.{0,20}[0-9]+ \w+?[\.\, ] ?\w+? ", full_desc) is not None:
match = re.search(r"New QB.{0,20}[0-9]+ \w+?[\.\, ] ?\w+? ", full_desc).group(0)
full_desc = full_desc.replace(match, match.rstrip() + '. ')
if re.search(r"\#[0-9]+ Eligible ", full_desc) is not None:
match = re.search(r"\#[0-9]+ Eligible ", full_desc).group(0)
full_desc = full_desc.replace(match, match.rstrip() + '. ')
full_desc = full_desc.replace('New QB for Denver - No.6 - Brock Osweiler ',
'New QB for Denver - No.6 - B.Osweiler. ')
full_desc = full_desc.replace(' at QB ', ' at QB. ')
full_desc = full_desc.replace(' at qb ', ' at QB. ')
full_desc = full_desc.replace(' at Qb ', ' at QB. ')
full_desc = full_desc.replace(' in as QB for this play ', ' in as QB for this play. ')
full_desc = full_desc.replace(' in as QB ', ' in as QB. ')
full_desc = full_desc.replace(' in as quarterback ', ' in as QB. ')
full_desc = full_desc.replace(' in at Quarterback ', ' in as QB. ')
full_desc = full_desc.replace(' is now playing ', ' is now playing. ')
full_desc = full_desc.replace(' Seminole Formation ', ' ')
full_desc = full_desc.replace(' St. ', ' St.')
full_desc = full_desc.replace(' A.Randle El ', ' A.Randle ')
full_desc = full_desc.replace('Alex Smith ', 'A.Smith ')
if (re.search(r"New QB \#[0-9]+ \w+?\.\w+? ", full_desc) is not None):
match = re.search(r"New QB \#[0-9]+ \w+?\.\w+? ", full_desc).group(0)
full_desc = full_desc.replace(match, match.rstrip() + '. ')
if (re.search(r"took the reverse handoff from #[0-9]+", full_desc) is not None):
match = re.search(r"took the reverse handoff from #[0-9]+ \S+ ", full_desc).group(0)
full_desc = full_desc.replace(match, match.rstrip() + '. ')
sentences = full_desc.split('. ')
flag = 0
for i in range(len(sentences)):
if ('as eligible (Shotgun) ' in sentences[i]):
sentences[i] = re.sub(r"^.+ \(Shotgun\) ", "", sentences[i]).strip()
if (re.search(r' eligible \S+\.\S+ ', sentences[i]) is not None):
sentences[i] = re.sub(r"^.+ eligible ", "", sentences[i]).strip()
if ' as eligible' in sentences[i]:
continue
if 'was injured during the play' in sentences[i]:
continue
if 'lines up at ' in sentences[i]:
continue
if (re.search(r' at QB$', sentences[i]) is not None):
continue
if ' in at QB' in sentences[i]:
sentences[i] = re.sub(r"^.+ in at QB", "", sentences[i]).strip()
if ' report as eligible' in sentences[i]:
sentences[i] = re.sub(r"^.+ report as eligible", "", sentences[i]).strip()
if ('at QB' in sentences[i]) and ('at WR' in sentences[i]):
# QB and WR switched positions
continue
desc = sentences[i]
desc = re.sub(r"\(.+?\)", "", desc).strip()
desc = re.sub(r"\{.+?\}", "", desc).strip()
if ((re.search(r'to \w+$', desc) is not None) or (re.search(r'^\w+$', desc) is not None)) and (
i < len(sentences) - 1) and ('respotted to' not in desc):
desc = desc + '.' + re.sub(r"\(.+?\)", "", sentences[i + 1]).strip()
if ((i < len(sentences) - 1) and (sentences[i + 1][:3] == 'to ')):
desc = desc + '.' + re.sub(r"\(.+?\)", "", sentences[i + 1]).strip()
if ' at QB' in desc:
desc = ''
continue
if ' eligible' in desc:
desc = ''
continue
if 'Injury update: ' in desc:
desc = ''
continue
if desc.startswith('Reverse') == True:
desc = ''
continue
if desc.startswith('Direction change') == True:
desc = ''
continue
if desc.startswith('Direction Change') == True:
desc = ''
continue
# if (re.search(r'^\S+\.\S+ ', desc) is not None):
# if((' pass ' ) in desc) and ((
if ' pass ' in desc:
if (' short ' in desc) or (' deep' in desc):
if (' left' in desc) or (' right' in desc) or (' middle' in desc):
if (' incomplete ' in desc) or (' for ' in desc) or (' INTERCEPTED ' in desc):
break
else:
if (' up the middle' in desc) or (' left' in desc) or (' right' in desc):
if (' for ' in desc):
break
desc = ''
if desc == '':
continue
if 'incomplete' in desc:
features['pass'] = 1
rematch = re.search(r'incomplete \S+ \S+ to ', desc)
if rematch is None:
# ball just thrown away, no intended target -> ignore
continue;
match = rematch.group(0).split()
features['passlen'] = match[1]
features['side'] = match[2]
else:
if 'no gain' in desc:
yards = 0
else:
if (play.note != 'INT') and ('INTERCEPTED' not in desc):
rematch = re.search(r'[-]?[0-9]+ yard\s?', desc)
if rematch is None:
print desc
print play.desc
match = rematch.group(0)
yards = float(match[:match.find(' ')])
if ' pass ' in desc:
features['pass'] = 1
match = re.search(r'pass \S+ \S+', desc).group(0).split()
if match[1] == 'to':
continue
features['passlen'] = match[1]
features['side'] = match[2]
else:
features['pass'] = 0
if 'up the middle' in desc:
features['side'] = 'middle'
else:
rematch = re.search(r'^\S+ (scrambles )?\S+ \S+', desc)
if rematch is None:
print desc
print play.desc
offset = 0
match = rematch.group(0).split()
if match[1] == 'scrambles':
features['qbrun'] = 1
offset = 1
if match[2 + offset] == "guard":
features['side'] = 'middle'
else:
features['side'] = match[1 + offset]
if (play.note == 'INT') or ('INTERCEPTED' in desc):
success = 0
else:
if (play.touchdown == True) and (' fumble' not in play.desc):
success = 1
success_cnt += 1
elif yards >= play.yards_togo:
success = 1
success_cnt += 1
# progress label calculation
if yards >= play.yards_togo:
# new first down reached
progress == 1
elif (play.down in [1, 2]) and (yards > 0):
progress = (float(yards) / float(play.yards_togo)) ** play.down
else:
# 3rd or 4th down attempt without conversion
progress = 0
if features['side'] not in ['middle', 'left', 'right']:
print play.desc
print
continue
play_features.append(features)
success_labels.append(success)
yard_labels.append(yards)
progress_labels.append(progress)
print len(play_features)
data = {}
data['features'] = np.array(play_features)
data['success'] = np.array(success_labels)
data['yards'] = np.array(yard_labels)
data['progress'] = np.array(progress_labels)
data['categorical_features'], data['encoder'] = encode_categorical_features(data['features'], sparse=False)
return data
# Encode categorical features
def encode_categorical_features(features, sparse=True):
encoder = DictVectorizer(sparse=sparse)
encoder.fit(features)
encoded_features = encoder.transform(features)
return encoded_features, encoder
def get_team_features(team, features, labels, feature_name='team'):
team_features = []
team_labels = []
for feature_index in range(len(features)):
if features[feature_index][feature_name] == team:
f = features[feature_index].copy()
del f[feature_name]
team_features.append(f)
team_labels.append(labels[feature_index])
print len(team_features), 'features / rows'
return np.array(team_features), np.array(team_labels)
| 16,208 | 48.417683 | 191 | py |
NFLPlayPrediction | NFLPlayPrediction-master/preprocessing/analysis.py | import pickle
import matplotlib.pyplot as plt
import numpy as np
from sklearn.decomposition import PCA, KernelPCA
from sklearn.feature_selection import VarianceThreshold
from sklearn.feature_selection import f_classif
def apply_pca(features, n_components):
pca = PCA(n_components = n_components)
pca.fit(features)
reduced_features = pca.transform(features)
print 'PCA variance ratios:', pca.explained_variance_ratio_
print 'PCA sum of variance ratios:', sum(pca.explained_variance_ratio_)
print 'PCA noise variance:', pca.noise_variance_
pickle.dump(reduced_features, open("features_pca1.p", "wb"))
plt.clf()
x = range(1, len(pca.explained_variance_ratio_)+1)
plt.plot(x, pca.explained_variance_ratio_, marker='o')
plt.yscale('log')
plt.ylim([0.00000000000000000000000000000000001, 10])
plt.ylabel('Variance ratio')
plt.xlabel('Component')
plt.title('Component variances')
plt.show()
plt.clf()
#x = range(1, len(pca.explained_variance_ratio) + 1)
x1=[]
y1=[]
x2=[]
y2=[]
for i,t in enumerate(reduced_features):
if labels[i] == 1:
x1.append(t[0])
y1.append(t[1])
else:
x2.append(t[0])
y2.append(t[1])
plt.scatter(x2,y2,marker='.',color='b',alpha=0.66,label='failure')
plt.scatter(x1,y1,marker='.',color='r',alpha=0.33,label='success')
plt.legend(loc=4)
plt.ylabel('Component 2')
plt.xlabel('Component 1')
plt.xlim([-1200,1000])
plt.title('Projection of first two components')
def apply_kernel_pca(features, labels):
plt.clf()
#x = range(1, len(pca.explained_variance_ratio) + 1)
x1=[]
y1=[]
x2=[]
y2=[]
kernel_pca = KernelPCA(n_components=2, kernel='sigmoid')
reduced_features = kernel_pca.fit_transform(features, labels)
for i, t in enumerate(reduced_features):
if labels[i] == 1:
x1.append(t[0])
y1.append(t[1])
else:
x2.append(t[0])
y2.append(t[1])
plt.scatter(x2, y2, marker='.', color='b', alpha=0.66, label='failure')
plt.scatter(x1, y1, marker='.', color='r', alpha=0.33, label='success')
plt.legend(loc=4)
plt.ylabel('Component 2')
plt.xlabel('Component 1')
plt.xlim([-1200, 1000])
plt.title('Projection of first two components')
plt.show()
def apply_anova_f_value_test(features, labels, encoder):
(f_val,_) = f_classif(features, labels)
sort_scores = [i[0] for i in sorted(enumerate(f_val), key=lambda x:x[1], reverse=True)]
for i in sort_scores:
print encoder.feature_names_[i], ':', f_val[i]
def apply_variance_threshold_selection(features, labels, encoder):
sp = VarianceThreshold()
sp.fit(features,labels)
print sp.scores_
for i in range(len(encoder.feature_names_)):
print encoder.feature_names_[i], ':', sp.variances_[i]
def plot_progress_measure():
x = np.linspace(0, 15, 75)
y1 = []
y2 = []
y3 = []
for i in x:
if i < 10.0:
y3.append(0.0)
y1.append(i/10.0)
y2.append((i/10.0)**2)
else:
y3.append(1.0)
y1.append(1.0)
y2.append(1.0)
plt.clf()
plt.plot(x, y1, label='1st down')
plt.plot(x, y2, label='2nd down')
plt.plot(x, y3, label='3rd/4th down')
plt.ylim([0, 1.1])
plt.xlim([0, 13])
plt.legend(loc=2)
plt.xticks([0, 5, 10], ['0', 'togo/2', 'togo'])
plt.ylabel('Progress score')
plt.xlabel('Distance')
plt.title('Progress label')
plt.show()
# second plot:
y1 = []
y2 = []
y3 = []
for i in x:
if i < 10.0:
y3.append(0.0)
y1.append((i*2)/10.0)
y2.append(i/10.0)
else:
y3.append(1 + float(i - 10.0) / 10.0)
y1.append(1 + float(i - 10.0) / 10.0)
y2.append(1 + float(i - 10.0) / 10.0)
plt.clf()
plt.plot(x, y1, label='1st down')
plt.plot(x, y2, label='2nd down')
plt.plot(x, y3, label='3rd/4th down')
plt.ylim([0, 2.2])
plt.xlim([0, 15])
plt.legend(loc=2)
plt.xticks([0, 5, 10], ['0', 'togo/2', 'togo'])
plt.ylabel('Progress score')
plt.xlabel('Distance')
plt.title('Progress label (Version 1)')
plt.show() | 4,315 | 26.845161 | 91 | py |
NFLPlayPrediction | NFLPlayPrediction-master/preprocessing/__init__.py | from analysis import *
from features import * | 45 | 22 | 22 | py |
dswgan-paper | dswgan-paper-main/exhibits.py | import random
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import ot
#define paths and set random seed
data_path = "data/"
fig_path = "figures/"
random.seed(100)
################################################################################
## Helper Functions
################################################################################
#load real data and a sample of generated data of equal size
def load_sample(name, path):
real = pd.read_feather(path+"original_data/{}_merged.feather".format(name)).drop(["u74", "u75"], axis=1)
n0 = real[real["t"]==0].shape[0]
n1 = real[real["t"]==1].shape[0]
gen = pd.read_feather(path+"generated/{}_generated.feather".format(name)).drop(["re78_cf"], axis=1)
gen = pd.concat([ gen[gen["t"]==1].sample(n1,replace=False),
gen[gen["t"]==0].sample(n0,replace=False)])
return real, gen
#plot a marginal histogram
def histogram(real_data, gen_data, fname, bins):
plt.figure(figsize=(4,4))
plt.hist([real_data, gen_data], density=1,
histtype='bar', label=["real", "generated"], bins=bins, color=["blue", "red"])
plt.legend(prop={"size": 10})
plt.yticks([])
plt.savefig(fname)
plt.close()
# returns exact wasserstein distance between
# real and generated data and real and multivariate normal
# data that matches the moments of the real data
def wd_distance(real, gen):
n = real.shape[0]
normal = pd.DataFrame(np.random.multivariate_normal(real.mean(), real.cov(), n),
columns=real.columns)
a = np.ones(n)/n
d_gen = ot.emd2(a, a, M=ot.dist(real.to_numpy(),
gen.to_numpy(),
metric='euclidean'),numItermax=2000000)
d_normal = ot.emd2(a, a, M=ot.dist(real.to_numpy(),
normal.to_numpy(),
metric='euclidean'),numItermax=2000000)
return [d_gen, d_normal]
########################################################################
#Figure 1 (Marginal Histograms)
########################################################################
real, gen = load_sample("cps",data_path)
for var in ["re78","black","hispanic","married","nodegree","re74", "re75", "education", "age"]:
fname = fig_path + "cps_" + var +".pdf"
if var in ["re78", "re74", "re75"]:
bins= 9
else:
bins = 10
histogram(real[var], gen[var], fname, bins)
########################################################################
#Figure 2 (Correlation)
########################################################################
fig4 = plt.figure(figsize=(10,4))
s1 = [fig4.add_subplot(1, 2, i) for i in range(1, 3)]
s1[0].set_xlabel("real")
s1[1].set_xlabel("generated")
s1[0].matshow(real.corr())
s1[1].matshow(gen.corr())
fig4.savefig(fig_path+"cps_corr.pdf")
########################################################################
#Figure 3 (Conditional Histogram)
########################################################################
gen_data1 = gen["re78"][gen["re74"]==0]
real_data1 = real["re78"][real["re74"]==0]
gen_data2 = gen["re78"][gen["re74"]>0]
real_data2 = real["re78"][real["re74"]>0]
bins = 9
histogram(real_data1, gen_data1, fig_path+"cps_c0re74.pdf", bins)
histogram(real_data2, gen_data2, fig_path+"cps_c1re74.pdf", bins)
########################################################################
#Table 1: Summary Statistics for LDW Data
########################################################################
df = pd.read_feather(data_path+"original_data/"+"exp_treated"+".feather").drop(["u74","u75","t"],axis=1)
col_order = ["black", "hispanic", "age", "married", "nodegree", "education", 're74', 're75', 're78']
names = [ "{\tt black}", '{\tt hispanic}', '{\tt age}', '{\tt married}', '{\tt nodegree}', '{\tt education}',
'{\tt earn \'74}', '{\tt earn \'75}', '{\tt earn \'78}']
results = [names]
colnames = ["Variable"]
for file in ["exp_treated","exp_controls","cps_controls","psid_controls"]:
df = pd.read_feather(data_path+"original_data/"+file+".feather").drop(["u74","u75","t"],axis=1)
df = df[col_order]
means = df.mean()
stds = df.std()
means[6:9] = means[6:9]/1000
stds[6:9] = stds[6:9]/1000
results.append(means.round(2))
results.append("(" + stds.round(2).astype(str) +")")
colnames.append(file + " mean")
colnames.append(file + " sd")
df = pd.DataFrame(results).transpose()
df.columns = colnames
print("\n Table 1: ")
print(df.to_latex(index=False, escape=False))
with open('tables/table1.txt','w') as tf:
tf.write(df.to_latex(index=False, escape=False))
########################################################################
#Table 2: Summary Statistics for Generated Data
########################################################################
results = [names]
colnames = ["Variable"]
for name in ["exp","cps","psid"]:
gen = pd.read_feather(data_path + "generated/" + name + "_generated.feather")
gen = gen[col_order+["t"]]
if name== "exp":
groups = [1,0]
else:
groups = [0]
for i in groups:
gen_i = gen[gen["t"]==i].sample(int(1e5), replace=False)
gen_i = gen_i.drop(["t"],axis=1)
means = gen_i.mean()
stds = gen_i.std()
means[6:9] = means[6:9]/1000
stds[6:9] = stds[6:9]/1000
results.append(means.round(2).to_list())
results.append(("(" + stds.round(2).astype(str) +")").to_list())
colnames.append(name + str(i)+ " mean")
colnames.append(name + str(i)+ " sd")
df = pd.DataFrame(results).transpose()
df.columns = colnames
print("\n Table 2: ")
print(df.to_latex(index=False, escape=False))
with open('tables/table2.txt','w') as tf:
tf.write(df.to_latex(index=False, escape=False))
########################################################################
#Table 3: Wasserstein Distance
########################################################################
random.seed(100)
results = []
for name in ["exp", "cps", "psid"]:
gendist = 0
normdist = 0
if name =="cps":
#K=1?
K=3
else:
K=10
for j in range(K):
real, gen = load_sample(name, data_path)
wd = wd_distance(real, gen)
gendist += wd[0]
normdist += wd[1]
results.append([name, int(gendist/K), int(normdist/K), gendist/normdist])
df = pd.DataFrame(results, columns=["Dataset","WD, GAN Simulation", "WD, MVN Simulation","Ratio"])
print("\nTable 3: ")
print(df.round(2).to_latex(index=False))
with open('tables/table3.txt','w') as tf:
tf.write(df.round(2).to_latex(index=False))
########################################################################
#Figures 4 and 5: Enforcing Monotonicity
########################################################################
import os
os.system('python3 monotonicity_penalty/monotonicity.py')
| 6,905 | 37.581006 | 109 | py |
dswgan-paper | dswgan-paper-main/gan_estimation/gan_baseline.py | import ldw_gan
import pandas as pd
# first redo with the original data
output_path = "data/generated/"
data_path = "data/original_data/"
#
file = data_path+"exp_merged.feather"
df = pd.read_feather(file).drop(["u74", "u75"], axis=1)
ldw_gan.do_all(df, "exp", batch_size=128, max_epochs=1000, path=output_path)
file = data_path+"cps_merged.feather"
df = pd.read_feather(file).drop(["u74", "u75"], axis=1)
ldw_gan.do_all(df, "cps", batch_size=4096, max_epochs=5000, path=output_path)
file = data_path+"psid_merged.feather"
df = pd.read_feather(file).drop(["u74", "u75"], axis=1)
ldw_gan.do_all(df, "psid", batch_size=512, max_epochs=4000, path=output_path)
| 658 | 33.684211 | 77 | py |
dswgan-paper | dswgan-paper-main/gan_estimation/ldw_gan.py | #wrapper function to save model weights and generate large dataset for
#any Lalonde dataset passed
import wgan
import torch
import pandas as pd
import numpy as np
import ot
from hypergrad import AdamHD
def wd_distance(real, gen):
n = real.shape[0]
a = np.ones(n)/n
d_gen = ot.emd2(a, a, M=ot.dist(real.to_numpy(),
gen.to_numpy(),
metric='euclidean'), numItermax=2000000)
return d_gen
def do_all(df, type, batch_size=128, architecture = [128, 128, 128], lr=1e-4,
max_epochs=4000, optimizer=AdamHD, path=""):
print(type, "starting training")
critic_arch = architecture.copy()
critic_arch.reverse()
# X | t
continuous_vars1 = ["age", "education", "re74", "re75"]
continuous_lower_bounds1 = {"re74": 0, "re75": 0}
categorical_vars1 = ["black", "hispanic", "married", "nodegree"]
context_vars1 = ["t"]
# Y | X, t
continuous_vars2 = ["re78"]
continuous_lower_bounds2 = {"re78": 0}
context_vars2 = ["t", "age", "education", "re74", "re75", "black",
"hispanic", "married", "nodegree"]
df_balanced = df.sample(2*len(df), weights=(1-df.t.mean())*df.t+df.t.mean()*(1-df.t),
replace=True, random_state=0)
#First X|t
data_wrapper1 = wgan.DataWrapper(df_balanced, continuous_vars1, categorical_vars1,
context_vars1, continuous_lower_bounds1)
x1, context1 = data_wrapper1.preprocess(df_balanced)
specifications1 = wgan.Specifications(data_wrapper1, critic_d_hidden=critic_arch, generator_d_hidden=architecture,
batch_size=batch_size, optimizer=optimizer, max_epochs=max_epochs, generator_lr=lr, critic_lr=lr, print_every=1e6)
generator1 = wgan.Generator(specifications1)
critic1 = wgan.Critic(specifications1)
#Then Y|X,t
data_wrapper2 = wgan.DataWrapper(df_balanced, continuous_vars = continuous_vars2,
context_vars= context_vars2, continuous_lower_bounds = continuous_lower_bounds2)
x2, context2 = data_wrapper2.preprocess(df_balanced)
specifications2 = wgan.Specifications(data_wrapper2, critic_d_hidden=critic_arch, generator_lr=lr, critic_lr=lr,
generator_d_hidden=architecture, optimizer=optimizer, batch_size=batch_size,
max_epochs=max_epochs,print_every=1e6)
generator2 = wgan.Generator(specifications2)
critic2 = wgan.Critic(specifications2)
df_real = df.copy()
G=[generator1,generator2]
C=[critic1,critic2]
data_wrappers = [data_wrapper1,data_wrapper2]
wgan.train(generator1, critic1, x1, context1, specifications1)
wgan.train(generator2, critic2, x2, context2, specifications2)
df_fake_x = data_wrappers[0].apply_generator(G[0], df.sample(int(1e5), replace=True))
df_fake = data_wrappers[1].apply_generator(G[1], df_fake_x)
# Let's also add a counterfactual re78 column to our fake data frame
df_fake_x["t"] = 1 - df_fake_x["t"]
df_fake["re78_cf"] = data_wrappers[1].apply_generator(G[1], df_fake_x)["re78"]
tt = (df_fake.re78 - df_fake.re78_cf).to_numpy()[df_fake.t.to_numpy()==1]
print("att =", tt.mean(), "| se =", tt.std()/tt.size**0.5)
# Now, we'll compare our fake data to the real data
table_groupby = ["t"]
scatterplot = dict(x=[],
y=[],
samples = 400)
histogram = dict(variables=['re78', 'black', 'hispanic', 'married', 'nodegree',
're74', 're75', 'education', 'age'],
nrow=3, ncol=3)
compare_path = path + "compare_"+type
wgan.compare_dfs(df_real, df_fake, figsize=5, table_groupby=table_groupby,
histogram=histogram, scatterplot=scatterplot,save=True,
path=compare_path)
df_fake_x = data_wrappers[0].apply_generator(G[0], df.sample(df.shape[0], replace=True))
df_fake = data_wrappers[1].apply_generator(G[1], df_fake_x)
print(df_real.columns)
df_real = df_real.drop("source",axis=1)
wd = wd_distance(df_real, df_fake)
print("wd =", wd)
for model, name in zip(G + C, ["G_0", "G_1", "C_0", "C_1"]):
torch.save(model.state_dict(), path+ name + "_{}.pth".format(type))
n_samples = int(1e6)
df_fake_x = data_wrappers[0].apply_generator(G[0], df_balanced.sample(n_samples, replace=True))
df_fake = data_wrappers[1].apply_generator(G[1], df_fake_x)
df_fake_x["t"] = 1 - df_fake_x["t"]
df_fake["re78_cf"] = data_wrappers[1].apply_generator(G[1], df_fake_x)["re78"]
df_fake.to_feather(path+"{}_generated.feather".format(type))
| 4,765 | 44.826923 | 156 | py |
dswgan-paper | dswgan-paper-main/gan_estimation/gan_robust.py | import ldw_gan
import pandas as pd
import numpy as np
import multiprocessing
from multiprocessing import active_children
from joblib import Parallel, delayed
num_cores = multiprocessing.cpu_count()
print(num_cores)
# first redo with the original data
epochs=5000
batch=4096
output = "data/generated/robustness/"
datapath = "data/original_data/"
df0 = pd.read_feather(datapath+"cps_controls.feather").drop(["u74", "u75"], axis=1)
df1 = pd.read_feather(datapath+"exp_treated.feather").drop(["u74","u75"], axis=1)
#architecture
df = pd.concat([df0,df1])
Parallel(n_jobs=num_cores)(delayed( ldw_gan.do_all)(df, name, architecture=arch,
batch_size=batch, max_epochs=epochs, path=output+"tbl_arch/")
for arch, name in zip([[64, 128, 256], [256, 128, 64]], ["arch1", "arch2"]) )
#Part 1: 80% CV Exercise
def get_sample(df0, df1, pct):
rows0 = int(pct*len(df0))
rows1 = int(pct*len(df1))
controls = df0.sample(rows0, replace=False)
treated = df1.sample(rows1, replace=False)
return pd.concat([controls,treated])
df_cv = [ get_sample(df0, df1, 0.8) for i in range(10) ]
name_cv = ["cps"+str(i) for i in range(10)]
Parallel(n_jobs=num_cores)(delayed(ldw_gan.do_all)(dfs, name, batch_size=4096, max_epochs=epochs, path=output+"tbl_cv/")
for dfs, name in zip(df_cv, name_cv) )
df_size = [ get_sample(df0, df1, pct) for pct in [0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 1] ]
name_size = ["cps"+ str(pct*100) for pct in [0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 1]]
Parallel(n_jobs=num_cores)(delayed(ldw_gan.do_all)(dfs, name, batch_size=4096,
max_epochs=epochs, path=output+"tbl_size/")
for dfs, name in zip(df_size, name_size))
| 1,834 | 39.777778 | 127 | py |
dswgan-paper | dswgan-paper-main/data/original_data/merge_data.py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Thu Aug 15 10:20:16 2019
@author: jonas
"""
import pandas as pd
exp = pd.read_feather("exp_merged.feather")
cps = pd.read_feather("cps_controls.feather")
psid = pd.read_feather("psid_controls.feather")
cps = pd.concat((exp.loc[exp.t==1], cps), ignore_index=True)
psid = pd.concat((exp.loc[exp.t==1], psid), ignore_index=True)
cps.to_feather("cps_merged.feather")
psid.to_feather("psid_merged.feather") | 468 | 23.684211 | 62 | py |
dswgan-paper | dswgan-paper-main/monotonicity_penalty/monotonicity.py | import wgan
import pandas as pd
import torch
import numpy as np
import torch.nn.functional as F
from matplotlib import pyplot as plt
########################################
# setup
########################################
df = pd.read_feather("data/original_data/cps_merged.feather").drop("u75",1).drop("u74",1)
df = df.loc[df.t==0,]
continuous_vars_0 = ["age", "education", "re74", "re75", "re78"]
continuous_lower_bounds_0 = {"re74": 0, "re75": 0, "re78": 0, "age": 0}
categorical_vars_0 = ["black", "hispanic", "married", "nodegree"]
context_vars_0 = ["t"]
dw = wgan.DataWrapper(df, continuous_vars_0, categorical_vars_0, context_vars_0, continuous_lower_bounds_0)
x, context = dw.preprocess(df)
a = lambda *args, **kwargs: torch.optim.Adam(betas=(0, 0.9), *args, **kwargs)
oa = lambda *args, **kwargs: wgan.OAdam(betas=(0, 0.9), *args, **kwargs)
spec = wgan.Specifications(dw, batch_size=512, max_epochs=int(3e3), print_every=500, optimizer=a, generator_optimizer=oa, critic_lr=1e-4, generator_lr=1e-4)
########################################
# define penalties
########################################
def monotonicity_penalty_kernreg(factor, h=0.1, idx_out=4, idx_in=0, x_min=None, x_max=None, data_wrapper=None):
"""
Adds Kernel Regression monotonicity penalty.
Incentivizes monotonicity of the mean of cat(x_hat, context)[:, dim_out] conditional on cat(x_hat, context)[:, dim_in].
Parameters
----------
x_hat: torch.tensor
generated data
context: torch.tensor
context data
Returns
-------
torch.tensor
"""
if data_wrapper is not None:
x_std = torch.cat(data_wrapper.stds, -1).squeeze()[idx_in]
x_mean = torch.cat(data_wrapper.means, -1).squeeze()[idx_in]
x_min, x_max = ((x-x_mean)/(x_std+1e-3) for x in (x_min, x_max))
if x_min is None: x_min = x.min()
if x_max is None: x_max = x.max()
def penalty(x_hat, context):
y, x = (torch.cat([x_hat, context], -1)[:, idx] for idx in (idx_out, idx_in))
k = lambda x: (1-x.pow(2)).clamp_min(0)
x_grid = ((x_max-x_min)*torch.arange(20, device=x.device)/20 + x_min).detach()
W = k((x_grid.unsqueeze(-1) - x)/h).detach()
W = W/(W.sum(-1, True) + 1e-2)
y_mean = (W*y).sum(-1).squeeze()
return (factor * (y_mean[:-1]-y_mean[1:])).clamp_min(0).sum()
return penalty
def monotonicity_penalty_chetverikov(factor, bound=0, idx_out=4, idx_in=0):
"""
Adds Chetverikov monotonicity test penalty.
Incentivizes monotonicity of the mean of cat(x_hat, context)[:, dim_out] conditional on cat(x_hat, context)[:, dim_in].
Parameters
----------
x_hat: torch.tensor
generated data
context: torch.tensor
context data
Returns
-------
torch.tensor
"""
def penalty(x_hat, context):
y, x = (torch.cat([x_hat, context], -1)[:, idx] for idx in (idx_out, idx_in))
argsort = torch.argsort(x)
y, x = y[argsort], x[argsort]
sigma = (y[:-1] - y[1:]).pow(2)
sigma = torch.cat([sigma, sigma[-1:]])
k = lambda x: 0.75*F.relu(1-x.pow(2))
h_max = torch.tensor((x.max()-x.min()).detach()/2).to(x_hat.device)
n = y.size(0)
h_min = 0.4*h_max*(np.log(n)/n)**(1/3)
l_max = int((h_min/h_max).log()/np.log(0.5))
H = h_max * (torch.tensor([0.5])**torch.arange(l_max)).to(x_hat.device)
x_dist = (x.unsqueeze(-1) - x) # i, j
Q = k(x_dist.unsqueeze(-1) / H) # i, j, h
Q = (Q.unsqueeze(0) * Q.unsqueeze(1)).detach() # i, j, x, h
y_dist = (y - y.unsqueeze(-1)) # i, j
sgn = torch.sign(x_dist) * (x_dist.abs() > 1e-8) # i, j
b = ((y_dist * sgn).unsqueeze(-1).unsqueeze(-1) * Q).sum(0).sum(0) # x, h
V = ((sgn.unsqueeze(-1).unsqueeze(-1) * Q).sum(1).pow(2)* sigma.unsqueeze(-1).unsqueeze(-1)).sum(0) # x, h
T = b / (V + 1e-2)
return T.max().clamp_min(0) * factor
return penalty
mode = "load"
if mode == "train":
########################################
# train and save models
########################################
gennone, critnone = wgan.Generator(spec), wgan.Critic(spec)
wgan.train(gennone, critnone, x, context, spec)
torch.save(genchet, "monotonicity_penalty/genchet.torch")
torch.save(critchet, "monotonicity_penalty/critchet.torch")
genkern, critkern = wgan.Generator(spec), wgan.Critic(spec)
wgan.train(genkern, critkern, x, context, spec, monotonicity_penalty_kernreg(1, h=1, idx_in=0, idx_out=4, x_min=0, x_max=90, data_wrapper=dw))
torch.save(genkern, "monotonicity_penalty/genkern.torch")
torch.save(critkern, "monotonicity_penalty/critkern.torch")
genchet, critchet = wgan.Generator(spec), wgan.Critic(spec)
wgan.train(genchet, critchet, x, context, spec, monotonicity_penalty_chetverikov(1, idx_in=0, idx_out=4))
torch.save(gennone, "monotonicity_penalty/gennone.torch")
torch.save(critnone, "monotonicity_penalty/critnone.torch")
elif mode == "load":
########################################
# load models
########################################
genchet = torch.load("monotonicity_penalty/genchet.torch", map_location=torch.device('cpu'))
critchet = torch.load("monotonicity_penalty/critchet.torch", map_location=torch.device('cpu'))
genkern = torch.load("monotonicity_penalty/genkern.torch", map_location=torch.device('cpu'))
critkern = torch.load("monotonicity_penalty/critkern.torch", map_location=torch.device('cpu'))
gennone = torch.load("monotonicity_penalty/gennone.torch", map_location=torch.device('cpu'))
critnone = torch.load("monotonicity_penalty/critnone.torch", map_location=torch.device('cpu'))
########################################
# produce figures
########################################
# sample data
df_none = dw.apply_generator(gennone, df.sample(int(5e5), replace=True)).reset_index(drop=True)
df_kern = dw.apply_generator(genkern, df.sample(int(5e5), replace=True)).reset_index(drop=True)
df_chet = dw.apply_generator(genchet, df.sample(int(5e5), replace=True)).reset_index(drop=True)
# Kernel Smoother for plotting
def y_smooth(x, y, h):
x, y = torch.tensor(x), torch.tensor(y)
k = lambda x: (1-x.pow(2)).clamp_min(0)
x_grid = (x.max()-x.min())*torch.arange(20)/20 + x.min()
W = k((x_grid.unsqueeze(-1) - x)/h)
W = W/W.sum(-1, True)
return x_grid, (W*y).sum(-1)
# Compare conditional means
plt.figure(figsize=(10, 6))
for df_, lab in zip((df, df_none, df_kern, df_chet), ("Original Data", "Unpenalized WGAN", "Kernel Regression Penalty", "Chetverikov Penalty")):
x_, y = df_.age.to_numpy(), df_.re78.to_numpy()
x_grid, y_hat = y_smooth(x_, y, 1)
plt.plot(x_grid, y_hat, label=lab)
plt.ylabel("Earnings 1978")
plt.xlabel("Age")
plt.legend()
plt.savefig("figures/monotonicity.pdf", format="pdf")
# Compare overall fits
f, a = plt.subplots(4, 6, figsize=(15, 10), sharex="col", sharey="col")
for i, (ax, df_, n) in enumerate(zip(a, [df, df_none, df_kern, df_chet], ["Original", "Unpenalized WGAN", "Kernel Regression Penalty", "Chetverikov Penalty"])):
ax[0].set_ylabel(n)
ax[0].matshow(df_.drop(["t"], 1).corr())
ax[1].hist(df_.re78, density=True)
ax[2].hist(df_.age, density=True)
ax[3].hist(df_.re74, density=True)
ax[4].hist(df_.education, density=True)
ax[5].hist(df_.married, density=True)
for _ in range(1,6): ax[_].set_yticklabels([])
for i, n in enumerate(["Correlation", "Earnings 1978", "Age", "Earnings 1974", "Education", "Married"]):
a[0, i].set_title(n)
plt.savefig("figures/monotonicity_fit.pdf", format="pdf")
| 7,429 | 41.701149 | 160 | py |
HC-MGAN | HC-MGAN-main/fmnist.py | import argparse
import os
import sys
from utils.data import create_dataloader, merge_dataloaders
from tree.tree import Node, grow_tree_from_root
import torch
parser = argparse.ArgumentParser()
#main config
parser.add_argument('--dataset_path', type=str, default='data',
metavar='', help='Path for folder containing the dataset root folder')
parser.add_argument('--logs_path', type=str, default='experiment_logs_fmnist',
metavar='', help='Folder for saving all logs (replaces previous logs in the folder if any)')
parser.add_argument('--root_node_name', type=str, default='Z',
metavar='', help='Name for the root node of the tree')
parser.add_argument('--device', type=int, default=0,
metavar='', help='GPU device to be used')
parser.add_argument('--amp_enable', action='store_true', help='Enables automatic mixed precision if available (executes faster on modern GPUs')
parser.set_defaults(amp_enable=False)
#architecture/model parameters
parser.add_argument('--nf_g', type=int, default=128,
metavar='', help='Number of feature maps for generator.')
parser.add_argument('--nf_d', type=int, default=128,
metavar='', help='Number of feature maps for discriminator/classifier.')
parser.add_argument('--kernel_size_g', type=int, default=4,
metavar='', help='Size of kernel for generators')
parser.add_argument('--kernel_size_d', type=int, default=5,
metavar='', help='Size of kernel for discriminator/classifier')
parser.add_argument('--normalization_d', type=str, default='layer_norm',
metavar='', help='Type of normalization layer used for discriminator/classifier')
parser.add_argument('--normalization_g', type=str, default='no_norm',
metavar='', help='Type of normalization layer used for generator')
parser.add_argument('--architecture_d', type=str, default='cnn',
metavar='', help='Specific architecture choice for for discriminator/classifier')
parser.add_argument('--architecture_g', type=str, default='cnn',
metavar='', help='Specific architecture choice for for generator')
parser.add_argument('--img_channels', type=int, default=1,
metavar='', help='Number of channels used for intended types of images')
parser.add_argument('--latent_dim', type=int, default=100,
metavar='', help="Dimension of generator's latent space")
parser.add_argument('--batch_size_real', type=int, default=100,
metavar='', help="Minibatch size for real images")
parser.add_argument('--batch_size_gen', type=int, default=100,
metavar='', help="Minibatch size for generated images ")
parser.add_argument('--img_dim', type=int, default=28,
metavar='', help="Image dimensions")
parser.add_argument('--shared_features_across_ref', action='store_true', help='Shares encoder features among parallel refinement groups (inactivated by default)')
parser.set_defaults(shared_features_across_ref=False)
#training parameters
parser.add_argument('--lr_d', type=float, default=0.0001,
metavar='', help='Learning rate for discriminator')
parser.add_argument('--lr_c', type=float, default=0.00002,
metavar='', help='Learning rate for classifier')
parser.add_argument('--lr_g', type=float, default=0.0002,
metavar='', help='Learning rate for generator')
parser.add_argument('--b1', type=float, default=0.5,
metavar='', help='Adam optimizer beta 1 parameter')
parser.add_argument('--b2', type=float, default=0.999,
metavar='', help='Adam optimizer beta 2 parameter')
parser.add_argument('--noise_start', type=float, default=1.5,
metavar='', help='Start image noise intensity linearly decaying throughout each GAN/MGAN training')
parser.add_argument('--epochs_raw_split', type=int, default=150,
metavar='', help='Number of epochs for raw split training')
parser.add_argument('--epochs_refinement', type=int, default=150,
metavar='', help='Number of epochs for refinement training')
parser.add_argument('--diversity_parameter_g', type=float, default=1.0,
metavar='', help="Hyperparameter for weighting generators' classification loss component")
parser.add_argument('--no_refinements', type=int, default=8,
metavar='', help='Number of refinements in each split')
parser.add_argument('--no_splits', type=int, default=9,
metavar='', help='Number of splits during tree growth')
parser.add_argument('--collapse_check_epoch', type=float, default=40,
metavar='', help='Epoch after which to check for generation collapse')
parser.add_argument('--sample_interval', type=int, default=10,
metavar='', help='No. of epochs between printring/saving training logs')
parser.add_argument('--min_prob_mass_variation', type=float, default=150,
metavar='', help='If the total prob mass variation between two consecutive refinements is less than this number, to save up time, the next refinements are skipped for that node')
args = parser.parse_args()
torch.cuda.set_device(args.device)
dataloader_train = create_dataloader(dataset='fmnist', test=False, batch_size=args.batch_size_real, path=args.dataset_path)
dataloader_test = create_dataloader(dataset='fmnist', test=True, batch_size=args.batch_size_real, path=args.dataset_path)
dataloader_train = merge_dataloaders(dataloader_train, dataloader_test)
root_node = Node(args.root_node_name, dataloader_train.sampler.weights, args.logs_path)
grow_tree_from_root(root_node, dataloader_train, args)
| 5,985 | 64.065217 | 202 | py |
HC-MGAN | HC-MGAN-main/sop.py | import argparse
import os
import sys
from utils.data import create_dataloader, merge_dataloaders
from tree.tree import Node, grow_tree_from_root
import torch
parser = argparse.ArgumentParser()
#main config
parser.add_argument('--dataset_path', type=str, default='data',
metavar='', help='Path for folder containing the dataset root folder')
parser.add_argument('--logs_path', type=str, default='experiment_logs_sop',
metavar='', help='Folder for saving all logs (replaces previous logs in the folder if any)')
parser.add_argument('--root_node_name', type=str, default='Z',
metavar='', help='Name for the root node of the tree')
parser.add_argument('--device', type=int, default=0,
metavar='', help='GPU device to be used')
parser.add_argument('--amp_enable', action='store_true', help='Enables automatic mixed precision if available (executes faster on modern GPUs')
parser.set_defaults(amp_enable=False)
#architecture/model parameters
parser.add_argument('--nf_g', type=int, default=128,
metavar='', help='Number of feature maps for generator.')
parser.add_argument('--nf_d', type=int, default=128,
metavar='', help='Number of feature maps for discriminator/classifier.')
parser.add_argument('--kernel_size_g', type=int, default=4,
metavar='', help='Size of kernel for generators')
parser.add_argument('--kernel_size_d', type=int, default=5,
metavar='', help='Size of kernel for discriminator/classifier')
parser.add_argument('--normalization_d', type=str, default='layer_norm',
metavar='', help='Type of normalization layer used for discriminator/classifier')
parser.add_argument('--normalization_g', type=str, default='no_norm',
metavar='', help='Type of normalization layer used for generator')
parser.add_argument('--architecture_d', type=str, default='cnn',
metavar='', help='Specific architecture choice for for discriminator/classifier')
parser.add_argument('--architecture_g', type=str, default='cnn',
metavar='', help='Specific architecture choice for for generator')
parser.add_argument('--img_channels', type=int, default=1,
metavar='', help='Number of channels used for intended types of images')
parser.add_argument('--latent_dim', type=int, default=100,
metavar='', help="Dimension of generator's latent space")
parser.add_argument('--batch_size_real', type=int, default=100,
metavar='', help="Minibatch size for real images")
parser.add_argument('--batch_size_gen', type=int, default=100,
metavar='', help="Minibatch size for generated images ")
parser.add_argument('--img_dim', type=int, default=32,
metavar='', help="Image dimensions")
parser.add_argument('--shared_features_across_ref', action='store_true', help='Shares encoder features among parallel refinement groups (activated by default)')
parser.add_argument('--no-shared_features_across_ref', dest='shared_features_across_ref', action='store_false', help='Does not share encoder features among parallel refinement groups')
parser.set_defaults(shared_features_across_ref=True)
#training parameters
parser.add_argument('--lr_d', type=float, default=0.0002,
metavar='', help='Learning rate for discriminator')
parser.add_argument('--lr_c', type=float, default=0.00002,
metavar='', help='Learning rate for classifier')
parser.add_argument('--lr_g', type=float, default=0.0002,
metavar='', help='Learning rate for generator')
parser.add_argument('--b1', type=float, default=0.5,
metavar='', help='Adam optimizer beta 1 parameter')
parser.add_argument('--b2', type=float, default=0.999,
metavar='', help='Adam optimizer beta 2 parameter')
parser.add_argument('--noise_start', type=float, default=1.0,
metavar='', help='Start image noise intensity linearly decaying throughout each GAN/MGAN training')
parser.add_argument('--epochs_raw_split', type=int, default=100,
metavar='', help='Number of epochs for raw split training')
parser.add_argument('--epochs_refinement', type=int, default=100,
metavar='', help='Number of epochs for refinement training')
parser.add_argument('--diversity_parameter_g', type=float, default=0.1,
metavar='', help="Hyperparameter for weighting generators' classification loss component")
parser.add_argument('--no_refinements', type=int, default=4,
metavar='', help='Number of refinements in each split')
parser.add_argument('--no_splits', type=int, default=9,
metavar='', help='Number of splits during tree growth')
parser.add_argument('--collapse_check_epoch', type=float, default=40,
metavar='', help='Epoch after which to check for generation collapse')
parser.add_argument('--sample_interval', type=int, default=10,
metavar='', help='No. of epochs between printring/saving training logs')
parser.add_argument('--min_prob_mass_variation', type=float, default=150,
metavar='', help='If the total prob mass variation between two consecutive refinements is less than this number, to save up time, the next refinements are skipped for that node')
args = parser.parse_args()
torch.cuda.set_device(args.device)
dataloader_train = create_dataloader(dataset='sop', test=False, batch_size=args.batch_size_real, path=args.dataset_path)
root_node = Node(args.root_node_name, dataloader_train.sampler.weights, args.logs_path)
grow_tree_from_root(root_node, dataloader_train, args)
| 5,968 | 63.880435 | 202 | py |
HC-MGAN | HC-MGAN-main/mnist.py | import argparse
import os
import sys
from utils.data import create_dataloader, merge_dataloaders
from tree.tree import Node, grow_tree_from_root
import torch
parser = argparse.ArgumentParser()
#omain config
parser.add_argument('--dataset_path', type=str, default='data',
metavar='', help='Path for folder containing the dataset root folder')
parser.add_argument('--logs_path', type=str, default='experiment_logs_mnist',
metavar='', help='Folder for saving all logs (replaces previous logs in the folder if any)')
parser.add_argument('--root_node_name', type=str, default='Z',
metavar='', help='Name for the root node of the tree')
parser.add_argument('--device', type=int, default=0,
metavar='', help='GPU device to be used')
parser.add_argument('--amp_enable', action='store_true', help='Enables automatic mixed precision if available (executes faster on modern GPUs')
parser.set_defaults(amp_enable=False)
#architecture/model parameters
parser.add_argument('--nf_g', type=int, default=128,
metavar='', help='Number of feature maps for generator.')
parser.add_argument('--nf_d', type=int, default=128,
metavar='', help='Number of feature maps for discriminator/classifier.')
parser.add_argument('--kernel_size_g', type=int, default=4,
metavar='', help='Size of kernel for generators')
parser.add_argument('--kernel_size_d', type=int, default=5,
metavar='', help='Size of kernel for discriminator/classifier')
parser.add_argument('--normalization_d', type=str, default='layer_norm',
metavar='', help='Type of normalization layer used for discriminator/classifier')
parser.add_argument('--normalization_g', type=str, default='no_norm',
metavar='', help='Type of normalization layer used for generator')
parser.add_argument('--architecture_d', type=str, default='cnn',
metavar='', help='Specific architecture choice for for discriminator/classifier')
parser.add_argument('--architecture_g', type=str, default='cnn',
metavar='', help='Specific architecture choice for for generator')
parser.add_argument('--img_channels', type=int, default=1,
metavar='', help='Number of channels used for intended types of images')
parser.add_argument('--latent_dim', type=int, default=100,
metavar='', help="Dimension of generator's latent space")
parser.add_argument('--batch_size_real', type=int, default=100,
metavar='', help="Minibatch size for real images")
parser.add_argument('--batch_size_gen', type=int, default=100,
metavar='', help="Minibatch size for generated images ")
parser.add_argument('--img_dim', type=int, default=28,
metavar='', help="Image dimensions")
parser.add_argument('--shared_features_across_ref', action='store_true', help='Shares encoder features among parallel refinement groups (inactivated by default)')
parser.set_defaults(shared_features_across_ref=False)
#training parameters
parser.add_argument('--lr_d', type=float, default=0.0001,
metavar='', help='Learning rate for discriminator')
parser.add_argument('--lr_c', type=float, default=0.00002,
metavar='', help='Learning rate for classifier')
parser.add_argument('--lr_g', type=float, default=0.0002,
metavar='', help='Learning rate for generator')
parser.add_argument('--b1', type=float, default=0.5,
metavar='', help='Adam optimizer beta 1 parameter')
parser.add_argument('--b2', type=float, default=0.999,
metavar='', help='Adam optimizer beta 2 parameter')
parser.add_argument('--noise_start', type=float, default=1.0,
metavar='', help='Start image noise intensity linearly decaying throughout each GAN/MGAN training')
parser.add_argument('--epochs_raw_split', type=int, default=100,
metavar='', help='Number of epochs for raw split training')
parser.add_argument('--epochs_refinement', type=int, default=100,
metavar='', help='Number of epochs for refinement training')
parser.add_argument('--diversity_parameter_g', type=float, default=1.0,
metavar='', help="Hyperparameter for weighting generators' classification loss component")
parser.add_argument('--no_refinements', type=int, default=6,
metavar='', help='Number of refinements in each split')
parser.add_argument('--no_splits', type=int, default=9,
metavar='', help='Number of splits during tree growth')
parser.add_argument('--collapse_check_epoch', type=float, default=40,
metavar='', help='Epoch after which to check for generation collapse')
parser.add_argument('--sample_interval', type=int, default=10,
metavar='', help='No. of epochs between printring/saving training logs')
parser.add_argument('--min_prob_mass_variation', type=float, default=150,
metavar='', help='If the total prob mass variation between two consecutive refinements is less than this number, to save up time, the next refinements are skipped for that node')
args = parser.parse_args()
torch.cuda.set_device(args.device)
dataloader_train = create_dataloader(dataset='mnist', test=False, batch_size=args.batch_size_real, path=args.dataset_path)
dataloader_test = create_dataloader(dataset='mnist', test=True, batch_size=args.batch_size_real, path=args.dataset_path)
dataloader_train = merge_dataloaders(dataloader_train, dataloader_test)
root_node = Node(args.root_node_name, dataloader_train.sampler.weights, args.logs_path)
grow_tree_from_root(root_node, dataloader_train, args)
| 5,984 | 63.354839 | 202 | py |
HC-MGAN | HC-MGAN-main/models/models_32x32.py | import argparse
import os
from torch.autograd import Variable
import torch.nn as nn
import torch
from models.utils import verify_string_args, linear_block, Reshape, convT_block, conv_block
class Generator(nn.Module):
def __init__(self,
architecture = 'cnn',
nf=128,
kernel_size=4,
latent_dim = 100,
nc = 3,
print_shapes=False,
norm = 'no_norm'
):
super(Generator, self).__init__()
print_shapes = False
architecture_list = ['cnn', 'cnn_short', 'cnn_long']
normalization_list = ['no_norm']
verify_string_args(architecture, architecture_list)
verify_string_args(norm, normalization_list)
self.img_size = 32
self.architecture = architecture
self.nf = nf
self.kernel_size = kernel_size
self.latent_dim = latent_dim
self.nc = nc
self.norm = norm
#print('Generator normalization is ', self.norm)
gen_layers = []
if architecture == 'cnn' or architecture == 'cnn_short':
first_map_shape = 8
gen_layers += linear_block(self.latent_dim, nf*2*first_map_shape*first_map_shape, norm='no_norm', act=nn.ReLU(True))
gen_layers += Reshape(-1, nf*2, first_map_shape, first_map_shape),
gen_layers += convT_block(nf*2, nf, stride=2, padding=1, norm=self.norm, act=nn.ReLU(True))
gen_layers += convT_block(nf, nc, stride=2, padding=1, norm='no_norm', act=nn.Tanh())
elif (architecture == 'cnn_long'):
first_map_shape = 3
gen_layers += linear_block(self.latent_dim, nf*4*first_map_shape*first_map_shape, norm='no_norm', act=nn.ReLU(True))
gen_layers += Reshape(-1, nf*4, first_map_shape, first_map_shape),
gen_layers += convT_block(nf*4, nf*2, stride=2, padding=1, norm=self.norm, act=nn.ReLU(True))
gen_layers += convT_block(nf*2, nf, stride=2, padding=0, norm=self.norm, act=nn.ReLU(True))
gen_layers += convT_block(nf, nc, stride=2, padding=1, norm='no_norm', act=nn.Tanh())
else:
raise ValueError('Architecture {} not implemented!'.format(architecture))
self.generate = nn.Sequential(*gen_layers)
if print_shapes:
input_tensor = torch.zeros(100,self.latent_dim)
output = input_tensor
print("\nGenerator ConvT Shapes:\n")
for i, ly in enumerate(self.generate):
output = self.generate[i](output)
if (type(ly) == torch.nn.modules.conv.ConvTranspose2d):
print('layer: {}'.format(i))
print(ly)
print('output shape: {}'.format(output.shape))
def forward(self, z):
img = self.generate(z)
if self.architecture == 'mlp':
img = img.view(-1,self.nc, self.img_size, self.img_size)
return img
class EncoderLayers(nn.Module):
def __init__(self,
architecture='cnn',
nf=128,
kernel_size=5,
norm = 'no_norm',
nc = 3,
print_shapes=True
):
super(EncoderLayers, self).__init__()
print_shapes = False
architecture_list = ['cnn', 'cnn_short', 'cnn_long']
normalization_list = ['layer_norm', 'spectral_norm', 'no_norm']
verify_string_args(architecture, architecture_list)
verify_string_args(norm, normalization_list)
self.img_size = 32
self.architecture = architecture
self.nf = nf
self.kernel_size = kernel_size
self.norm = norm
self.nc = nc
self.leaky_relu = nn.LeakyReLU(0.2, inplace=True)
#print('Normalization for conv layers is {}'.format(norm))
encoder_layers = []
if (architecture == 'cnn' or architecture == 'cnn_short'):
encoder_layers += conv_block(nc, nf, fmap_shape=[16, 16], norm=self.norm, act=self.leaky_relu, kernel_size=self.kernel_size)
encoder_layers += conv_block(nf, nf * 2, fmap_shape=[8, 8], norm=self.norm, act=self.leaky_relu, kernel_size=self.kernel_size)
encoder_layers += conv_block(nf * 2, nf * 4, fmap_shape=[4,4], norm=self.norm, act=self.leaky_relu, kernel_size=self.kernel_size)
else:
print('Architecture {} not implemented!'.format(architecture))
self.encoder_layers = nn.Sequential(*encoder_layers)
if print_shapes:
print("\nConv Features Shapes\n")
input_tensor = torch.zeros(100, self.nc, self.img_size, self.img_size)
output=input_tensor
if architecture == 'mlp':
output = input_tensor.view(100,-1)
for i, ly in enumerate(self.encoder_layers):
output = self.encoder_layers[i](output)
if (type(ly) == torch.nn.modules.conv.Conv2d and print_shapes):
print('layer: {}'.format(i))
print(ly)
print('output shape: {}'.format(output.shape))
self.total_units = output.view(input_tensor.shape[0], -1).shape[-1]
def forward(self, img):
img_input_dim = img.shape[-1]
if img_input_dim!=self.img_size:
raise Exception("This discriminator/classifier assumes image inputs with {} resolution and an input with {} resolution was received. Please choose a compatible model or data.".format(self.img_size, img_input_dim))
if self.architecture == 'mlp':
img = img.view(img.shape[0],-1)
return self.encoder_layers(img)
| 5,899 | 39.972222 | 225 | py |
HC-MGAN | HC-MGAN-main/models/models_general.py | import torch.nn as nn
import torch.nn.functional as F
import torch
class GeneratorSet(nn.Module):
def __init__(self, *gens):
super(GeneratorSet, self).__init__()
modules = nn.ModuleList()
for gen in gens:
modules.append(gen)
self.paths = modules
def forward(self, z, rand_perm=False):
img = []
for path in self.paths:
img.append(path(z))
img = torch.cat(img, dim=0)
if rand_perm:
img = img[torch.randperm(img.shape[0])]
return img
class Classifier(nn.Module):
def __init__(self,
feature_layers,
no_c_outputs = 2,
dropout = 0
):
super(Classifier, self).__init__()
self.feature_layers = feature_layers
self.no_c_outputs = no_c_outputs
total_units = feature_layers.total_units
self.linear_clasf = nn.Linear(total_units, no_c_outputs)
self.dropout = nn.Dropout(dropout)
self.log_softmax = nn.LogSoftmax(dim=1)
def forward(self, input_tensor, feature_input = False):
if feature_input:
conv_features = input_tensor
else:
conv_features = (self.feature_layers(input_tensor))
conv_features = conv_features.view(conv_features.shape[0], -1)
classification = self.dropout(conv_features)
classification = self.linear_clasf(classification)
classification = self.log_softmax(classification)
return classification
class Discriminator(nn.Module):
def __init__(self,
feature_layers
):
super(Discriminator, self).__init__()
self.feature_layers = feature_layers
total_units = feature_layers.total_units
self.linear_disc = nn.Linear(total_units, 1)
def forward(self, input_tensor, feature_input = False):
if feature_input:
conv_features = input_tensor
else:
conv_features = (self.feature_layers(input_tensor))
conv_features = conv_features.view(conv_features.shape[0], -1)
validity = self.linear_disc(conv_features)
return validity
| 2,298 | 29.25 | 70 | py |
HC-MGAN | HC-MGAN-main/models/utils.py | from torch.autograd import Variable
import torch.nn as nn
import torch.nn.functional as F
import torch
def get_seq_model_shapes(seq_model, input_shape, seq_model_name = 'seq_model'):
input_tensor = torch.zeros(*input_shape)
output = input_tensor
print("\n{} Layers:\n".format(seq_model_name))
for i, ly in enumerate(seq_model):
output = seq_model[i](output)
print('Layer Block {}: {}, out shape: {}'.format(i, ly, output.shape))
return output
def verify_string_args(string_arg, string_args_list):
if string_arg not in string_args_list:
raise ValueError("Argument '{}' not available in {}".format(string_arg, string_args_list))
class Reshape(nn.Module):
def __init__(self, *args):
super(Reshape, self).__init__()
self.shape = args
def forward(self, x):
return x.view(self.shape)
def convT_block(nf_in, nf_out, stride = 2, padding = 1,norm='no_norm', act=None, kernel_size=4):
block = [nn.ConvTranspose2d(nf_in, nf_out, kernel_size=kernel_size, stride=stride, padding=padding, bias=True )]
if act is not None:
block.append(act)
return block
def conv_block(nf_in, nf_out, stride = 2, padding = 2, fmap_shape=[10,10], norm=None, act=None, kernel_size=5):
block = [nn.Conv2d(nf_in, nf_out, kernel_size=kernel_size, stride=stride, padding=padding, bias=True )]
if norm == 'layer_norm':
block.append(nn.LayerNorm([nf_out]+fmap_shape))
elif norm == 'spectral_norm':
block[-1] = torch.nn.utils.spectral_norm(block[-1])
if act is not None:
block.append(act)
#block.append(nn.LeakyReLU(0.2, inplace=True))
#block.append(GaussianNoise(normal_std_scale=0.7))
return block
def linear_block(nf_in, nf_out, norm='no_norm', act=None):
block = [nn.Linear(nf_in, nf_out)]
if norm == 'layer_norm':
block.append(nn.LayerNorm([nf_out]))
elif norm == 'spectral_norm':
block[-1] = torch.nn.utils.spectral_norm(block[-1])
if act is not None:
block.append(act)
return block
| 2,061 | 33.949153 | 116 | py |
HC-MGAN | HC-MGAN-main/models/gan.py | #torch imports
from torch.autograd import Variable
import torch
import numpy as np
class GAN:
def __init__(self,
gen_set,
disc,
clasf,
feature_layers,
optimizer_G,
optimizer_D,
optimizer_C,
diversity_parameter_g
):
'''Class for coordinating batch-wise update between the components of MGAN (raw split) or a GAN group (refinement)
gen_set (torch.nn.Module): generator(s)
disc (torch.nn.Module): discriminator
clasf (torch.nn.Module): classifier
feature_layers (torch.nn.Module): shared feature extractor for classifier and discriminator
optimizer_G (torch.optim.Adam): Adam optimizer for generator(s)
optimizer_D (torch.optim.Adam): Adam optimizer for discriminator
optimizer_C (torch.optim.Adam): Adam optimizer for classifier
diversity_parameter_g (float): hyperparameter for weighting generators' classification loss component
'''
#components
self.gen_set = gen_set
self.disc = disc
self.clasf = clasf
self.feature_layers = feature_layers
self.latent_dim = gen_set.paths[0].latent_dim
self.diversity_parameter_g = diversity_parameter_g
#optimizers
self.optimizer_G = optimizer_G
self.optimizer_D = optimizer_D
self.optimizer_C = optimizer_C
#losses
self.loss_disc = torch.nn.BCEWithLogitsLoss()
self.loss_clasf = torch.nn.NLLLoss()
self.amp_enable = False
self.metrics_dict = {'loss_disc_real': 0,
'acc_disc_real' : 0,
'loss_disc_fake': 0,
'acc_disc_fake': 0,
'loss_gen_disc': 0,
'loss_gen_clasf': 0,
'loss_clasf': 0,
'acc_clasf' : 0,
}
self.Tensor = torch.cuda.FloatTensor
def bin_accuracy(self, pred, labels):
corrects = (labels == torch.sigmoid(pred).round()).detach()
acc = corrects.sum()/len(corrects)
return acc
def categorical_accuracy(self, pred, labels):
corrects = (labels == torch.argmax(pred, dim = -1)).detach()
acc = corrects.sum()/len(corrects)
return acc
def assign_amp(self, amp_autocast, amp_scaler):
self.amp_autocast = amp_autocast
self.amp_scaler = amp_scaler
def enable_amp(self, amp_enable):
self.amp_enable = amp_enable
def train_on_batch(self, imgs_real, imgs_gen):
'''Performs one iteration of update for Discriminator, Generators and Classifier (Raw Split training)
imgs_real (torch.cuda.FloatTensor): mini-batch of real dataset images
imgs_gen (torch.cuda.FloatTensor): mini-batch of generated images
'''
self.gen_set.train()
self.disc.train()
#classification labels
labels_c = []
labels_c.append(self.Tensor([0]*(imgs_gen.shape[0]//2)))
labels_c.append(self.Tensor([1]*(imgs_gen.shape[0]//2)))
labels_c = torch.cat(labels_c, dim=0).type(torch.cuda.LongTensor)
#adversarial game labels
labels_d_valid = Variable(self.Tensor(imgs_real.shape[0], 1).fill_(1.0), requires_grad=False)
labels_d_fake = Variable(self.Tensor(imgs_gen.shape[0], 1).fill_(0.0), requires_grad=False)
labels_g_valid = Variable(self.Tensor(imgs_gen.shape[0], 1).fill_(1.0), requires_grad=False)
# --------------------
# Train Discriminator
# --------------------
self.optimizer_D.zero_grad()
with self.amp_autocast(self.amp_enable):
#gets real images loss/acc
validity = self.disc(imgs_real)
loss_disc_real = self.loss_disc(validity, labels_d_valid)
acc_disc_real = self.bin_accuracy(validity, labels_d_valid)
#gets generated images loss/acc
validity = self.disc(imgs_gen.detach())
loss_disc_fake = self.loss_disc(validity, labels_d_fake)
acc_disc_fake = self.bin_accuracy(validity, labels_d_fake)
#gets total loss for discriminator
loss_disc = loss_disc_fake + loss_disc_real
self.amp_scaler.scale(loss_disc).backward()
self.amp_scaler.step(self.optimizer_D)
# -----------------
# Train Classifier
# -----------------
self.optimizer_C.zero_grad()
with self.amp_autocast(self.amp_enable):
for par in self.feature_layers.parameters():
par.requires_grad_(False)
#gets classification loss/acc
classification = self.clasf(imgs_gen.detach())
loss_clasf = self.loss_clasf(classification, labels_c)
acc_clasf = self.categorical_accuracy(classification, labels_c)
for par in self.feature_layers.parameters():
par.requires_grad_(True)
self.amp_scaler.scale(loss_clasf).backward()
self.amp_scaler.step(self.optimizer_C)
# -----------------
# Train Generators
# -----------------
self.optimizer_G.zero_grad()
with self.amp_autocast(self.amp_enable):
#gets discriminative loss/acc
imgs_ft_gen = self.feature_layers(imgs_gen)
validity = self.disc(imgs_ft_gen, feature_input=True)
loss_gen_disc = self.loss_disc(validity, labels_g_valid)
#gets classification loss/acc
classification = self.clasf(imgs_ft_gen, feature_input=True)
if self.diversity_parameter_g > 0:
loss_gen_clasf = self.loss_clasf(classification, labels_c)*self.diversity_parameter_g
#gets total loss for generators
loss_gen = loss_gen_disc + loss_gen_clasf*self.diversity_parameter_g
self.amp_scaler.scale(loss_gen).backward()
self.amp_scaler.step(self.optimizer_G)
#updates metrics dictionaries
self.metrics_dict['loss_disc_real'] = loss_disc_real.item()
self.metrics_dict['acc_disc_real'] = acc_disc_real.item()
self.metrics_dict['loss_disc_fake'] = loss_disc_fake.item()
self.metrics_dict['acc_disc_fake'] = acc_disc_fake.item()
self.metrics_dict['loss_gen_disc'] = loss_gen_disc.item()
self.metrics_dict['loss_gen_clasf'] = loss_gen_clasf.item()
self.metrics_dict['loss_clasf'] = loss_clasf.item()
self.metrics_dict['acc_clasf'] = acc_clasf.item()
return self.metrics_dict
def train_on_batch_refinement(self, imgs_real, imgs_gen_internal, imgs_gen_external=None, clasf_external=None):
'''Performs one iteration of update for internal discriminator, internal generator, and internal classifier,
also requiring external generator's data and external classifier (Refinement training)
imgs_real (torch.cuda.FloatTensor): mini-batch of real dataset images
imgs_gen_internal (torch.cuda.FloatTensor): mini-batch of generated images by the internal generator
imgs_gen_external (torch.cuda.FloatTensor): mini-batch of generated images by the external generator for internal classifier's training
clasf_external (torch.nn.Module): external classifier used by internal generator's training
'''
self.gen_set.train()
self.disc.train()
#classification labels
labels_c = []
labels_c.append(self.Tensor([0]*imgs_gen_internal.shape[0]))
labels_c.append(self.Tensor([1]*imgs_gen_external.shape[0]))
labels_c = torch.cat(labels_c, dim=0).type(torch.cuda.LongTensor)
#adversarial labels
labels_d_valid = Variable(self.Tensor(imgs_real.shape[0], 1).fill_(1.0), requires_grad=False)
labels_d_fake = Variable(self.Tensor(imgs_gen_internal.shape[0], 1).fill_(0.0), requires_grad=False)
labels_g_valid = Variable(self.Tensor(imgs_gen_internal.shape[0], 1).fill_(1.0), requires_grad=False)
# --------------------
# Train Discriminator
# --------------------
loss_disc_fake = self.Tensor([0])
loss_disc_real = self.Tensor([0])
acc_disc_real = self.Tensor([0])
acc_disc_fake = self.Tensor([0])
self.optimizer_D.zero_grad()
with self.amp_autocast(self.amp_enable):
#real images result
validity = self.disc(imgs_real)
loss_disc_real = self.loss_disc(validity, labels_d_valid)
acc_disc_real = self.bin_accuracy(validity, labels_d_valid)
#gen images result
validity = self.disc(imgs_gen_internal.detach())
loss_disc_fake = self.loss_disc(validity, labels_d_fake)
acc_disc_fake = self.bin_accuracy(validity, labels_d_fake)
#total loss
loss_disc = loss_disc_fake + loss_disc_real
self.amp_scaler.scale(loss_disc).backward()
self.amp_scaler.step(self.optimizer_D)
# -----------------
# Train Classifier
# -----------------
self.optimizer_C.zero_grad()
with self.amp_autocast(self.amp_enable):
for par in self.feature_layers.parameters():
par.requires_grad_(False)
#gets classification
classification_internal = self.clasf(imgs_gen_internal.detach())
classification_external = self.clasf(imgs_gen_external.detach())
classification_concat = torch.cat([classification_internal, classification_external])
#gets loss/acc
loss_clasf = self.loss_clasf(classification_concat, labels_c)
acc_clasf = self.categorical_accuracy(classification_concat, labels_c)
for par in self.feature_layers.parameters():
par.requires_grad_(True)
self.amp_scaler.scale(loss_clasf).backward()
self.amp_scaler.step(self.optimizer_C)
# -----------------
# Train Generators
# -----------------
loss_gen_disc = self.Tensor([0])
loss_gen_clasf = self.Tensor([0])
self.optimizer_G.zero_grad()
with self.amp_autocast(self.amp_enable):
#gets discriminative loss/acc
imgs_ft_gen_internal = self.feature_layers(imgs_gen_internal)
validity = self.disc(imgs_ft_gen_internal, feature_input=True)
loss_gen_disc = self.loss_disc(validity, labels_g_valid)
#gets discriminative loss/acc
classification_internal = self.clasf(imgs_ft_gen_internal, feature_input=True)
if clasf_external.feature_layers == self.clasf.feature_layers:
classification_external = clasf_external(imgs_ft_gen_internal, feature_input=True)
else:
classification_external = clasf_external(imgs_gen_internal)
classification_concat = torch.cat([classification_internal, classification_external] )
if self.diversity_parameter_g > 0:
loss_gen_clasf = self.loss_clasf(classification_concat, labels_c)*self.diversity_parameter_g
loss_gen = loss_gen_disc + loss_gen_clasf
self.amp_scaler.scale(loss_gen).backward()
self.amp_scaler.step(self.optimizer_G)
self.metrics_dict['loss_disc_real'] = loss_disc_real.item()
self.metrics_dict['acc_disc_real'] = acc_disc_real.item()
self.metrics_dict['loss_disc_fake'] = loss_disc_fake.item()
self.metrics_dict['acc_disc_fake'] = acc_disc_fake.item()
self.metrics_dict['loss_gen_disc'] = loss_gen_disc.item()
self.metrics_dict['loss_gen_clasf'] = loss_gen_clasf.item()
self.metrics_dict['loss_clasf'] = loss_clasf.item()
self.metrics_dict['acc_clasf'] = acc_clasf.item()
return self.metrics_dict
def get_gen_images(self, z, rand_perm=False):
return(self.gen_set(z, rand_perm=rand_perm))
def get_disc_losses_for_gen(self, imgs_gen_internal, no_generators=2):
self.disc.train()
batch_size = imgs_gen_internal.shape[0]//no_generators
fake = Variable(self.Tensor(batch_size, 1).fill_(0.0), requires_grad=False)
losses_for_gen = []
for i in range(no_generators):
imgs_gen_i = imgs_gen_internal[batch_size*i:batch_size*(i+1)]
with torch.no_grad():
validity = self.disc(imgs_gen_i.detach())
loss_fake = self.loss_disc(validity, fake).detach()
losses_for_gen.append(loss_fake.item())
return losses_for_gen
| 12,909 | 42.177258 | 143 | py |
HC-MGAN | HC-MGAN-main/models/models_28x28.py | import argparse
import os
from torch.autograd import Variable
import torch.nn as nn
import torch
from models.utils import verify_string_args, linear_block, Reshape, convT_block, conv_block
class Generator(nn.Module):
def __init__(self,
architecture = 'cnn',
nf=128,
kernel_size=4,
latent_dim = 100,
nc = 1,
print_shapes=False,
norm = 'no_norm'
):
super(Generator, self).__init__()
print_shapes = False
architecture_list = ['cnn', 'cnn_short']
normalization_list = ['no_norm']
verify_string_args(architecture, architecture_list)
verify_string_args(norm, normalization_list)
self.img_size = 28
self.architecture = architecture
self.nf = nf
self.kernel_size = kernel_size
self.latent_dim = latent_dim
self.nc = nc
self.norm = norm
#print('Generator normalization is ', self.norm)
gen_layers = []
if architecture == 'cnn' or architecture == 'cnn_short':
first_map_shape = 7
gen_layers += linear_block(self.latent_dim, nf*2*first_map_shape*first_map_shape, norm='no_norm', act=nn.ReLU(True))
gen_layers += Reshape(-1, nf*2, first_map_shape, first_map_shape),
gen_layers += convT_block(nf*2, nf, stride=2, padding=1, norm=self.norm, act=nn.ReLU(True))
gen_layers += convT_block(nf, nc, stride=2, padding=1, norm='no_norm', act=nn.Tanh())
else:
print('Architecture {} not implemented!'.format(architecture))
self.generate = nn.Sequential(*gen_layers)
if print_shapes:
input_tensor = torch.zeros(100,self.latent_dim)
output = input_tensor
print("\nGenerator ConvT Shapes:\n")
for i, ly in enumerate(self.generate):
output = self.generate[i](output)
if (type(ly) == torch.nn.modules.conv.ConvTranspose2d):
print('layer: {}'.format(i))
print(ly)
print('output shape: {}'.format(output.shape))
def forward(self, z):
img = self.generate(z)
if self.architecture == 'mlp':
img = img.view(-1,self.nc, self.img_size, self.img_size)
return img
class EncoderLayers(nn.Module):
def __init__(self,
architecture='cnn',
nf=128,
kernel_size=5,
norm = 'no_norm',
nc = 1,
print_shapes=True
):
super(EncoderLayers, self).__init__()
print_shapes = False
architecture_list = ['cnn', 'cnn_short']
normalization_list = ['layer_norm', 'spectral_norm', 'no_norm']
verify_string_args(architecture, architecture_list)
verify_string_args(norm, normalization_list)
self.img_size = 28
self.architecture = architecture
self.nf = nf
self.kernel_size = kernel_size
self.norm = norm
self.nc = nc
self.leaky_relu = nn.LeakyReLU(0.2, inplace=True)
#print('Normalization for conv layers is {}'.format(norm))
encoder_layers = []
if (architecture == 'cnn' or architecture == 'cnn_short'):
encoder_layers += conv_block(nc, nf, fmap_shape=[14, 14], norm=self.norm, act=self.leaky_relu, kernel_size=self.kernel_size)
encoder_layers += conv_block(nf, nf * 2, fmap_shape=[7, 7], norm=self.norm, act=self.leaky_relu, kernel_size=self.kernel_size)
encoder_layers += conv_block(nf * 2, nf * 4, fmap_shape=[4,4], norm=self.norm, act=self.leaky_relu, kernel_size=self.kernel_size)
else:
print('Architecture {} not implemented!'.format(architecture))
self.encoder_layers = nn.Sequential(*encoder_layers)
if print_shapes:
print("\nConv Features Shapes\n")
input_tensor = torch.zeros(100, self.nc, self.img_size, self.img_size)
output=input_tensor
if architecture == 'mlp':
output = input_tensor.view(100,-1)
for i, ly in enumerate(self.encoder_layers):
output = self.encoder_layers[i](output)
if (type(ly) == torch.nn.modules.conv.Conv2d and print_shapes):
print('layer: {}'.format(i))
print(ly)
print('output shape: {}'.format(output.shape))
self.total_units = output.view(input_tensor.shape[0], -1).shape[-1]
def forward(self, img):
img_input_dim = img.shape[-1]
if img_input_dim!=self.img_size:
raise Exception("This discriminator/classifier assumes image inputs with {} resolution and an input with {} resolution was received. Please choose a compatible model or data.".format(self.img_size, img_input_dim))
if self.architecture == 'mlp':
img = img.view(img.shape[0],-1)
return self.encoder_layers(img)
| 5,274 | 37.786765 | 225 | py |
HC-MGAN | HC-MGAN-main/models/__init__.py | 0 | 0 | 0 | py |
|
HC-MGAN | HC-MGAN-main/tree/tree.py | import torch
from tree.refinement import refinement
from tree.raw_split import raw_split
import numpy as np
import copy
import os
from utils.soft_cluster import view_global_tree_logs, show, view_global_tree_logs
from utils.others import save_log_text, remove_bold_from_string, print_save_log, get_log_heading
class Node:
def __init__(self,
name,
cluster_probs,
tree_path,
parent=None,
child_left=None,
child_right=None,
node_status = "root node"):
self.name = name
self.cluster_probs = [cluster_probs]
self.parent = parent
self.child_left = child_left
self.child_right = child_right
self.tree_path = tree_path
self.node_path = os.path.join(tree_path, name)
self.status = node_status
self.skipped_refinemnts = False
def add_cluster_probs(self, cluster_probs):
self.cluster_probs.append(cluster_probs)
def create_children(self, cluster_probs_left, cluster_probs_right):
self.child_left = Node(self.name + 'L', torch.Tensor(cluster_probs_left), tree_path=self.tree_path, parent=self, node_status='raw split')
self.child_right = Node(self.name + 'R', torch.Tensor(cluster_probs_right), tree_path=self.tree_path, parent=self, node_status='raw split')
def get_leaf_nodes_list(root_node):
leaf_nodes_list = []
if (root_node.child_left is None) and (root_node.child_right is None):
return [root_node]
else:
if root_node.child_left is not None:
leaf_nodes_list += (get_leaf_nodes_list(root_node.child_left))
if root_node.child_right is not None:
leaf_nodes_list += (get_leaf_nodes_list(root_node.child_right))
return leaf_nodes_list
def get_non_leaf_nodes_list(root_node):
if (root_node.child_left is None) and (root_node.child_right is None):
return []
else:
non_leaf_nodes_list = [root_node]
if root_node.child_left is not None:
non_leaf_nodes_list += (get_non_leaf_nodes_list(root_node.child_left))
if root_node.child_right is not None:
non_leaf_nodes_list += (get_non_leaf_nodes_list(root_node.child_right))
return non_leaf_nodes_list
def get_node_by_name(root_node, name):
leaf_nodes_list = get_leaf_nodes_list(root_node)
non_leaf_nodes_list = get_non_leaf_nodes_list(root_node)
for node in leaf_nodes_list:
if node.name == name:
print("Node '{}' was found".format(name))
return node
for node in non_leaf_nodes_list:
if node.name == name:
print("Node '{}' was found".format(name))
return node
print("Node '{}' was not found".format(name))
return None
def search_node_to_split(root_node, text_logs_path):
log_headings = get_log_heading("SEARCHING NEXT LEAF NODE TO SPLIT", spacing=2)
print_save_log('\n\n\n' + log_headings, text_logs_path)
leaf_nodes_list = get_leaf_nodes_list(root_node)
prob_mass_per_leaf = [leaf.cluster_probs[-1].sum() for leaf in leaf_nodes_list]
split_node = leaf_nodes_list[np.argmax(prob_mass_per_leaf)]
print_save_log('Currently {} leaf nodes obtained: '.format(len(leaf_nodes_list)), text_logs_path)
print_save_log([(node.name, '{} prob. mass'.format(node.cluster_probs[-1].sum())) for node in leaf_nodes_list], text_logs_path)
log = 'Selecting for split leaf node {} (prob. mass {}) following the greatest prob. mass criteria.'.format(split_node.name, split_node.cluster_probs[-1].sum())
print_save_log(log, text_logs_path)
return split_node
def raw_split_tree_node(args, node_k, dataloader_train, halt_epoch= 20, collapse_check_loss=0.01, save_node_path=None):
dataloader_cluster_k = copy.deepcopy(dataloader_train)
dataloader_cluster_k.sampler.weights = node_k.cluster_probs[-1]
if node_k.node_path is not None:
os.makedirs(node_k.node_path, exist_ok=True)
trainer_raw_split = raw_split(args, dataloader_cluster_k, node_k, epochs=args.epochs_raw_split,
noise_start= args.noise_start, sample_interval = args.sample_interval,
collapse_check_loss=collapse_check_loss)
#if save_node_path is not None:
# np.save(save_node_path, node_k)
return node_k
def check_stop_refinement_condition(node, text_logs_path, min_prob_mass_variation = 150):
if len(node.child_left.cluster_probs)>=3:
prob_mass_variation = (node.child_left.cluster_probs[-1].numpy() - node.child_left.cluster_probs[-2].numpy())
prob_mass_variation = np.abs(prob_mass_variation).sum()
log_headings = get_log_heading("CHECKING CONDITION FOR CONTINUING REFINEMENTS FOR NODE {}".format(node.name), spacing=2)
print_save_log('\n\n\n' + log_headings, text_logs_path)
print_save_log("Condition for continuing refinements: total prob mass variation between the last 2 refinements must be > {}.".format(min_prob_mass_variation), text_logs_path)
print_save_log("(As a heuristic to save up time, we assume negligible variation indicates a clustering local minimum unlikely to change with more refinements)", text_logs_path)
print_save_log('The variation of prob. mass for the last 2 refinemnets is: {:.2f}.'.format(prob_mass_variation), text_logs_path)
if prob_mass_variation < min_prob_mass_variation:
print_save_log('Canceling next refinements for this node.', text_logs_path)
return True
else:
print_save_log('Continuing next refinements for this node. ', text_logs_path)
return False
else:
return False
def refine_tree_nodes(args, node_k, dataloader_train, ith_refinement, no_refinements, halt_epoch = 20, collapse_check_loss=0.01, save_node_path=None):
ith_refinement = len(node_k.child_left.cluster_probs)
dataloader_cluster_l = copy.deepcopy(dataloader_train)
dataloader_cluster_m = copy.deepcopy(dataloader_train)
dataloader_cluster_l.sampler.weights = node_k.child_left.cluster_probs[-1]
dataloader_cluster_m.sampler.weights = node_k.child_right.cluster_probs[-1]
trainer_ref= refinement(args, dataloader_cluster_l, dataloader_cluster_m, epochs=args.epochs_refinement,
noise_start= args.noise_start, ref_it=ith_refinement,
sample_interval=args.sample_interval, collapse_check_loss=collapse_check_loss,
node_k = node_k, print_vars=False)
dataloader_cluster_l.sampler.weights = node_k.child_left.cluster_probs[-1]
dataloader_cluster_m.sampler.weights = node_k.child_right.cluster_probs[-1]
#if save_node_path is not None:
# np.save(save_node_path, node_k)
return node_k
def grow_tree_from_root(root_node, dataloader_train, args):
os.makedirs(args.logs_path, exist_ok=True)
text_logs_path = os.path.join(args.logs_path, "global_tree_logs.txt")
save_log_text('', text_logs_path, open_mode='w')
for i in range(args.no_splits):
split_node = search_node_to_split(root_node, text_logs_path=text_logs_path)
split_node = raw_split_tree_node(args, split_node, dataloader_train, save_node_path='root_node')
non_leaf_list = get_non_leaf_nodes_list(root_node)
leaf_list = get_leaf_nodes_list(root_node)
log_title_raw_split = 'GLOBAL TREE LOGS AFTER RAW SPLIT OF NODE {}'.format(split_node.name)
view_global_tree_logs(dataloader_train, non_leaf_list, leaf_list, text_logs_path, log_title=log_title_raw_split)
for j in range(args.no_refinements):
stop_refinement_flag = check_stop_refinement_condition(split_node, text_logs_path, args.min_prob_mass_variation)
if not(stop_refinement_flag):
split_node = refine_tree_nodes(args, split_node, dataloader_train, ith_refinement=j, no_refinements=args.no_refinements, save_node_path='root_node')
if j == args.no_refinements-1:
log_headings = get_log_heading("END OF RIFENEMENTS FOR NODE {} SPLIT".format(split_node.name), spacing=2)
print_save_log("\n\n\n" + log_headings, text_logs_path)
print_save_log("{}/{} refinements concluded.".format(args.no_refinements, args.no_refinements), text_logs_path)
non_leaf_list = get_non_leaf_nodes_list(root_node)
leaf_list = get_leaf_nodes_list(root_node)
log_title_ref = 'GLOBAL TREE LOGS AFTER REFINEMENT {} OF NODE {} SPLIT'.format(j+1, split_node.name)
view_global_tree_logs(dataloader_train, non_leaf_list, leaf_list, text_logs_path, log_title=log_title_ref)
else:
split_node.child_left.status += ", skipped {}".format(args.no_refinements-j)
split_node.child_right.status += ", skipped {}".format(args.no_refinements-j)
#np.save('root_node', root_node)
log_headings = get_log_heading("END OF RIFINEMENTS FOR NODE {} SPLIT".format(split_node.name), spacing=2)
print_save_log("\n\n\n" + log_headings, text_logs_path)
print_save_log("{}/{} refinements concluded. Remaining {} refinements skipped due to negligible variation.".format(
j, args.no_refinements, args.no_refinements-j), text_logs_path)
break
| 9,572 | 49.920213 | 184 | py |
HC-MGAN | HC-MGAN-main/tree/refinement.py | #basic imports
import argparse
import os
import numpy as np
import math
import shutil
import time
import datetime
import copy
import sys
#torch imports
import torchvision.transforms as transforms
from torchvision.utils import save_image, make_grid
from torch.utils.data import DataLoader
from torchvision import datasets
from torch.autograd import Variable
import torch.nn as nn
import torch.nn.functional as F
import torchvision
import torch
#plot imports
import pandas as pd
import matplotlib.pyplot as plt
#from tabulate import tabulate
#other imports
from tqdm import autonotebook
from sklearn import metrics
#custom imports
from utils.soft_cluster import get_classification_table_variation
from utils.soft_cluster import show, distribution_select
from utils.others import create_gans, sum_dicts, zero_dict_values, save_log_text, get_log_heading, get_bold_string, print_save_log
try:
from IPython.display import Image
except:
print('Jupyter image display not available')
class GANGroupsTrainer:
def __init__(self,
dataloader_l, #dataloader object
dataloader_m, #dataloader object
gan_l, #gan object
gan_m, #gan object
amp_enable, #bool
prior_distribution = 'uniform',
node_k = None
):
self.dl_l = dataloader_l
self.dl_m = dataloader_m
self.gan_l = gan_l
self.gan_m = gan_m
self.no_c_outputs = 2
self.amp_scaler = torch.cuda.amp.GradScaler()
self.amp_autocast = torch.cuda.amp.autocast
self.amp_enable = amp_enable
self.cancel_training_flag = False
self.class_to_idx_dict = dataloader_l.dataset.class_to_idx.items()
self.idx_to_class_dict = {v:k for k,v in self.class_to_idx_dict}
self.prior_distribution = prior_distribution
self.node_k = node_k
self.classifiers = [self.gan_l.clasf, self.gan_m.clasf]
self.refresh_clustering_attributes()
self.Tensor = torch.cuda.FloatTensor
self.node_l = self.node_k.child_left
self.node_m = self.node_k.child_right
batch_size = dataloader_l.batch_size
self.no_batches = int(dataloader_l.sampler.weights.sum().item() + dataloader_m.sampler.weights.sum().item())//(batch_size*2)
self.gan_l.assign_amp(self.amp_autocast, self.amp_scaler)
self.gan_l.enable_amp(self.amp_enable)
self.gan_m.assign_amp(self.amp_autocast, self.amp_scaler)
self.gan_m.enable_amp(self.amp_enable)
def refresh_cluster_probs_per_example(self):
self.cluster_probs_per_example = [self.dl_l.sampler.weights.numpy(),
self.dl_m.sampler.weights.numpy()]
def refresh_cluster_prob_mass_per_class(self):
self.cluster_prob_mass_per_class = self.get_cluster_prob_mass_per_class(
self.dl_l, self.cluster_probs_per_example)
def refresh_cluster_assignments_per_class(self):
self.cluster_assignments_per_class = self.get_cluster_assignments_per_class(
self.dl_l, self.cluster_probs_per_example)
def refresh_classes_for_monitoring(self):
clusters_per_class_sum = [np.sum(clusters)
for clusters in self.cluster_prob_mass_per_class]
classes = self.dl_l.dataset.classes
targets_per_example = self.dl_l.dataset.targets
examples_per_class_sum = [np.sum(np.array(targets_per_example)==i)
for i in range(len(classes))]
self.classes_for_monitoring = [i for (i, sum_value) in enumerate(clusters_per_class_sum)
if sum_value > examples_per_class_sum[i] * 0.2]
if len(self.classes_for_monitoring) < 2:
print('Classes for metrics monitoring were set to {}, '
'which is too few (2 or more classes required)'.format(
self.classes_for_monitoring))
print('This means clusters are too small '
'(prob. mass for classes < 20% of original mass at root node).')
print('Enabling all classes for metrics monitoring.')
self.classes_for_monitoring = np.arange(len(classes)).tolist()
def refresh_clustering_attributes(self):
self.refresh_cluster_probs_per_example()
self.refresh_cluster_prob_mass_per_class()
self.refresh_cluster_assignments_per_class()
self.refresh_classes_for_monitoring()
def train(self, epochs, text_logs_path, refinement_path, noise_start=0, sample_interval=20,
collapse_check_loss=0.001, collapse_check_epoch=0, batch_size_gen=100, ref_it=0, ref_attempt=1, no_refinements=0):
'''Main training loop.
Args:
epochs (int): total training epochs
text_logs_path (string): .txt file to save textual training logs
refinement_path (string): path to refinement folder where logs will be stored
noise_start (float): Start image noise intensity linearly decaying throughout the training
sample_interval (int): interval for sample logs printing/saving
collapse_check_loss (float): threshold discriminator loss for detecting collapsed generators and halting the training
batch_sige_gen (int): number of samples per minibatch for generated images
ref_it (int): no. of iteration of refinement for printing/saving logs
ref_attempt (int): no. of attempt for a given refinement it. (counts +1 if previous attempt was halted due to generators collapse)
'''
self.refresh_clustering_attributes()
self.cancel_training_flag = False
print("\n\nTraining epochs progress bar (training logs printed/saved every {} epochs):".format(sample_interval))
for epoch in autonotebook.tqdm(range(1, epochs+1)):
img_noise_scale = noise_start*(1-epoch/epochs)
epoch_start = time.time()
#running losses/acc dictionary
epoch_metrics_dict_l = zero_dict_values(copy.copy(self.gan_l.metrics_dict))
epoch_metrics_dict_m = zero_dict_values(copy.copy(self.gan_m.metrics_dict))
dicts = self.train_on_epoch(epoch_metrics_dict_l, epoch_metrics_dict_m, img_noise_scale, batch_size_gen)
epoch_metrics_dict_l, epoch_metrics_dict_m = dicts
epoch_interval = time.time() - epoch_start
#logs
if (epoch % sample_interval) == 0:
self.view_log_headings(epoch, epochs, epoch_interval, text_logs_path, ref_it=ref_it, ref_attempt=ref_attempt)
self.view_epoch_losses(epoch_metrics_dict_l, epoch_metrics_dict_m, text_logs_path)
self.view_gen_imgs(epoch, ref_attempt, refinement_path, text_logs_path)
self.verify_collapsed_generators(epoch, text_logs_path, img_noise_scale, collapse_check_loss, collapse_check_epoch=collapse_check_epoch)
#flag for cancelling training if generators collapses is detected
if self.cancel_training_flag:
break
#prints end of training logs
end_of_training_logs = "END OF REFINEMENT TRAINING FOR NODE {} SPLIT".format(self.node_k.name)
print_save_log("\n\n"+get_log_heading(end_of_training_logs), text_logs_path)
print_save_log("End of training.", text_logs_path)
if not(self.cancel_training_flag):
#gets cluster assignment probabilities for each example, avaraging the 2 classifiers results
clasf_cluster_probs = self.get_clasf_cluster_probs(self.dl_l, self.classifiers, img_noise_scale)
# updates children with new refined clasf cluster probs
self.node_k.child_left.add_cluster_probs(torch.Tensor(clasf_cluster_probs[0]))
self.node_k.child_left.status = "{}/{} refinements".format(ref_it, no_refinements)
self.node_k.child_right.add_cluster_probs(torch.Tensor(clasf_cluster_probs[1]))
self.node_k.child_right.status = "{}/{} refinements".format(ref_it, no_refinements)
#end of training logs with refined binary clustering for current node
new_cluster_prob_mass_per_class = self.get_cluster_prob_mass_per_class(self.dl_l, clasf_cluster_probs)
self.view_new_clasf_clustering(new_cluster_prob_mass_per_class, ref_it, text_logs_path)
def train_on_epoch(self, epoch_metrics_dict_l, epoch_metrics_dict_m, img_noise_scale, batch_size_gen=100):
gan_l = self.gan_l
gan_m = self.gan_m
for batch_idx in range(self.no_batches):
#samples real images from groups l and m
imgs_real_l, imgs_real_m = self.get_real_images(img_noise_scale)
#Trains group l components with needed external data/components from group m
imgs_gen_l, imgs_gen_m = self.get_gen_images(img_noise_scale, batch_size_gen)
batch_metrics_dict_l = gan_l.train_on_batch_refinement(imgs_real = imgs_real_l,
imgs_gen_internal = imgs_gen_l,
imgs_gen_external = imgs_gen_m,
clasf_external = gan_m.clasf)
epoch_metrics_dict_l = sum_dicts(epoch_metrics_dict_l, batch_metrics_dict_l)
#Trains group m components with needed external data/components from group l
imgs_gen_l, imgs_gen_m = self.get_gen_images(img_noise_scale, batch_size_gen)
batch_metrics_dict_m = gan_m.train_on_batch_refinement(imgs_real = imgs_real_m,
imgs_gen_internal = imgs_gen_m,
imgs_gen_external = imgs_gen_l,
clasf_external = gan_l.clasf)
epoch_metrics_dict_m = sum_dicts(epoch_metrics_dict_m, batch_metrics_dict_m)
#updates amp scaler after training components from groups l and m
self.amp_scaler.update()
return epoch_metrics_dict_l, epoch_metrics_dict_m
def get_real_images(self, img_noise_scale):
'''Gets real images from groups l and m'''
imgs_real_l = next(iter(self.dl_l))[0].type(self.Tensor)
imgs_real_l = self._add_noise(imgs_real_l, img_noise_scale)
imgs_real_m = next(iter(self.dl_m))[0].type(self.Tensor)
imgs_real_m = self._add_noise(imgs_real_m, img_noise_scale)
return imgs_real_l, imgs_real_m
def get_gen_images(self, img_noise_scale, batch_size=100):
'''Generates imgs from each gan (already concatenated per gan)'''
latent_dim = self.gan_l.latent_dim
z = self.Tensor(distribution_select(self.prior_distribution, (batch_size, latent_dim))).requires_grad_(False)
imgs_gen_l = self.gan_l.get_gen_images(z, rand_perm=True)
imgs_gen_l = self._add_noise(imgs_gen_l, img_noise_scale)
imgs_gen_m = self.gan_m.get_gen_images(z, rand_perm=True)
imgs_gen_m = self._add_noise(imgs_gen_m, img_noise_scale)
return imgs_gen_l, imgs_gen_m
def get_cluster_prob_mass_per_class(self, dataloader, cluster_probs_per_example):
no_of_clusters = 2
assert(len(cluster_probs_per_example) == no_of_clusters)
no_of_classes = len(dataloader.dataset.classes)
prob_mass_per_class = []
for i in range(no_of_classes):
prob_mass_ij = []
for j in range(no_of_clusters):
prob_mass_ij.append( ((np.array(dataloader.dataset.targets)==i)*cluster_probs_per_example[j]).sum().item() )
prob_mass_per_class.append(prob_mass_ij)
return np.round(prob_mass_per_class, 2)
def get_cluster_assignments_per_class(self, dataloader, cluster_probs_per_example):
no_of_clusters = 2
assert(len(cluster_probs_per_example) == no_of_clusters)
no_of_classes = len(dataloader.dataset.classes)
cluster_assignments_per_class = []
for i in range(no_of_classes):
cluster_counts_ij = []
for j in range(no_of_clusters):
cluster_counts_ij.append( ((np.array(dataloader.dataset.targets)==i)*(cluster_probs_per_example[j])>0.5).sum().item() )
cluster_assignments_per_class.append(cluster_counts_ij)
return cluster_assignments_per_class
def get_clasf_cluster_probs(self, dataloader, classifiers, img_noise_scale=0):
'''Performs cluster inference over the entire training set w/ the 2 classifiers.
Returns the avg. cluster probabilities between the 2 classifiers for each training example.'''
dataloader=torch.utils.data.DataLoader(dataloader.dataset, batch_size=100, shuffle=False, drop_last=False)
#empty sublists to accumulate the minibatches of probabilities
clasf_cluster_probs = [ [[] for _ in range(self.no_c_outputs)] for clasf in classifiers ]
#iterates through the dataset to collect classifiers inference with minibatches
for (batch_imgs, batch_targets) in dataloader:
batch_imgs = batch_imgs.cuda()
batch_imgs = self._add_noise(batch_imgs, img_noise_scale)
with torch.no_grad():
clasf_cluster_probs_batch = [torch.exp(clasf(batch_imgs)).transpose(1,0) for clasf in classifiers]
for i in range(len(classifiers)):
for j in range(self.no_c_outputs):
clasf_cluster_probs[i][j].append(clasf_cluster_probs_batch[i][j])
#concatenates results for each batch of the whole data
clasf_cluster_probs = [[torch.cat(batch).cpu().numpy() for batch in classifier_i_batches]
for classifier_i_batches in clasf_cluster_probs]
#gets the average between the two classifiers' cluster probabilities
clasf_cluster_probs_avg = np.array([(clasf_cluster_probs[0][0] + clasf_cluster_probs[1][1])/2,
(clasf_cluster_probs[0][1] + clasf_cluster_probs[1][0])/2])
#gets parent node (k) probabilities by summing previous probabilities in l and m
parent_cluster_probs = (self.dl_l.sampler.weights+self.dl_m.sampler.weights).numpy()
#multiplies by the parent node`s probabilities
clasf_cluster_probs_avg[0] *= parent_cluster_probs
clasf_cluster_probs_avg[1] *= parent_cluster_probs
clasf_cluster_probs_avg = clasf_cluster_probs_avg.tolist()
return clasf_cluster_probs_avg
def _plot_img_grid(self, imgs_plot, img_save_path, node_name, text_logs_path):
if imgs_plot.shape[1] == 3 or imgs_plot.cpu().shape[1] == 1:
grid = make_grid(imgs_plot, nrow=20, normalize=True)
if img_save_path is not None:
save_image(grid, img_save_path)
try:
print_save_log("\nSample of generated images from group {}:".format(node_name), text_logs_path)
print_save_log('(This sample is saved at {})'.format(img_save_path), text_logs_path)
display(Image(filename=img_save_path, width=900))
except:
print_save_log('Jupyter image display not available for plotting sample of generated images from group {}'.format(node_name), text_logs_path)
#print_save_log("Sample of generated images from group {} saved at {}".format(node_name, img_save_path), text_logs_path)
else:
print_save_log("\nNo image save path defined, can't save sample of generated images", text_logs_path)
else:
print_save_log("\nCan't plot/save imgs with shape {}".format(imgs_plot.shape), text_logs_path)
def view_log_headings(self, epoch, epochs, epoch_interval, text_logs_path, ref_it=-1, ref_attempt=-1):
'''Part 1/4 of training logs'''
log_headings = "[REFINEMENT %d OF NODE %s SPLIT] [EPOCH %d/%d] [EPOCH TIME INTERVAL: %.2f sec.] [REF %d] [ATTEMPT %d]"%(ref_it, self.node_k.name,
epoch, epochs, epoch_interval, ref_it, ref_attempt)
log_headings = get_log_heading(log_headings)
print_save_log("\n\n" + log_headings, text_logs_path)
def view_epoch_losses(self, epoch_metrics_dict_l, epoch_metrics_dict_m, text_logs_path):
'''part 2/4 of training logs'''
print_save_log("Mean epoch losses/acc for each component in group l's GAN", text_logs_path)
print_save_log({k:np.round(v/self.no_batches,5) for k,v in epoch_metrics_dict_l.items()}, text_logs_path)
print_save_log("Mean epoch losses/acc for each component in group m's GAN:", text_logs_path)
print_save_log({k:np.round(v/self.no_batches,5) for k,v in epoch_metrics_dict_m.items()}, text_logs_path)
def view_gen_imgs(self, epoch, ref_attempt, refinement_path, text_logs_path):
'''part 3/4 of training logs'''
imgs_plot_l, imgs_plot_m = self.get_gen_images(img_noise_scale=0, batch_size=10)
if self.node_k is not None:
if refinement_path is not None:
img_save_path_l = refinement_path + "attempt_{}_ep_{}_{}.jpg".format(ref_attempt, epoch, self.node_l.name)
img_save_path_m = refinement_path + "attempt_{}_ep_{}_{}.jpg".format(ref_attempt, epoch, self.node_m.name)
self._plot_img_grid(imgs_plot_l, img_save_path_l, "l", text_logs_path)
self._plot_img_grid(imgs_plot_m, img_save_path_m, "m", text_logs_path)
def verify_collapsed_generators(self, epoch, text_logs_path, img_noise_scale=0, collapse_check_loss=0.01, collapse_check_epoch=50, batch_size=100):
'''part 4/4 of training logs'''
if epoch < collapse_check_epoch:
print_save_log("\nGenerator collapse will be checked after epoch {}".format(collapse_check_epoch), text_logs_path)
else:
print_save_log("\nChecking if generators have collapsed...", text_logs_path)
imgs_gen_l, imgs_gen_m = self.get_gen_images(img_noise_scale, batch_size)
losses_l = self.gan_l.get_disc_losses_for_gen(imgs_gen_l)
losses_m = self.gan_m.get_disc_losses_for_gen(imgs_gen_m)
for loss in losses_l + losses_m:
if loss < collapse_check_loss and epoch>=collapse_check_epoch:
log_string = "\nDiscriminator loss for generated images is too low (<{}), indicating generators collapse. The training shall restart.".format(collapse_check_loss)
print_save_log(log_string, text_logs_path)
self.cancel_training_flag = True
break
if not(self.cancel_training_flag):
print_save_log("Generators collapse not found, the training shall continue.", text_logs_path)
def view_new_clasf_clustering(self, new_cluster_prob_mass_per_class, ref_it, text_logs_path):
'''Prints logs with refined binary clustering result for current nodes'''
#header
log_headings = "REFINED BINARY CLUSTERING FOR NODE {} SPLIT OBTAINED WITH AVG CLASSIFIER'S INFERENCE".format(self.node_k.name)
log_headings = get_log_heading(log_headings)
print_save_log("\n\n"+log_headings, text_logs_path)
#clustering table
print_save_log("Local binary soft clustering (prob. mass division) for node {} split after refinement, according to each class.".format(self.node_k.name), text_logs_path)
print_save_log('Probability mass variation since last refinement or raw split is indicated in parenthesis for each cluster and class.', text_logs_path)
print('(This table is saved at {})'.format(text_logs_path))
table_df = get_classification_table_variation(self.cluster_prob_mass_per_class, new_cluster_prob_mass_per_class, self.idx_to_class_dict,
node=self.node_k, table_name = 'Local split soft clusters refined')
pd.set_option("max_colwidth", None)
pd.set_option('max_columns', None)
try:
display(table_df)
except:
print(table_df)
print_save_log(str(table_df), text_logs_path, print_log=False)
def _add_noise(self, tensor, normal_std_scale):
if (normal_std_scale > 0):
return tensor + (tensor*torch.randn_like(tensor)*normal_std_scale)
else:
return tensor
def refinement(args, dataloader_l, dataloader_m, epochs, noise_start, ref_it = -1, sample_interval=10, collapse_check_loss=0.001,
save=False, node_k=None, print_vars=False):
redo_training = True
ref_attempt = 1
max_attempts = 4
while redo_training:
#configure log saving paths
refinement_path = os.path.join(node_k.node_path, "refinement_{}/".format(ref_it))
os.makedirs(refinement_path, exist_ok=True)
text_logs_path = refinement_path + "attempt_{}_training_logs.txt".format(ref_attempt)
save_log_text('', text_logs_path, open_mode='w')
#print main log headings
log_headings = 'REFINEMENT {} OF OF NODE {} SPLIT (ATTEMPT {})'.format(ref_it, node_k.name, ref_attempt)
log_headings = get_log_heading(log_headings, spacing=2)
print_save_log('\n\n\n' + log_headings, text_logs_path)
#print parameters
log_headings = get_log_heading("TRAINING PARAMETERS")
print_save_log(log_headings, text_logs_path)
print_save_log("Training Arguments: ", text_logs_path)
print_save_log(vars(args), text_logs_path)
print_save_log("Training using device : {}".format(args.device), text_logs_path)
print_save_log("Training logs save path: {}".format(text_logs_path), text_logs_path)
print_save_log("Limit of Training Attempts: {}".format(max_attempts), text_logs_path)
#create MGAN models
[gan_l, gan_m] = create_gans(args, no_gans=2)
#print models' architecture
log_headings = get_log_heading("MODELS ARCHITETURE")
print_save_log('\n\n' + log_headings, text_logs_path)
print_save_log("Discriminator Architecture:", text_logs_path)
print_save_log(gan_l.disc, text_logs_path)
print_save_log("\nGernerator Architecture:", text_logs_path)
print_save_log(gan_l.gen_set.paths[0], text_logs_path)
print_save_log("\nClassifier Architecture:", text_logs_path)
print_save_log(gan_l.clasf, text_logs_path)
trainer = GANGroupsTrainer(dataloader_l,
dataloader_m,
gan_l,
gan_m,
amp_enable=args.amp_enable,
prior_distribution = "uniform",
node_k = node_k)
trainer.train(epochs = epochs,
text_logs_path=text_logs_path,
refinement_path = refinement_path,
noise_start=noise_start,
collapse_check_loss=collapse_check_loss,
collapse_check_epoch=args.collapse_check_epoch,
sample_interval=sample_interval,
ref_it=ref_it,
batch_size_gen=args.batch_size_gen,
ref_attempt = ref_attempt,
no_refinements=args.no_refinements)
#flag for restarting the training if generation collapse is detected
if trainer.cancel_training_flag == False:
redo_training = False
else:
ref_attempt += 1
if ref_attempt>max_attempts:
max_attempt_log_headings = get_log_heading("LIMIT OF {} FAILED ATTEMPTS REACHED".format(max_attempts))
max_attempt_log = "The training for this refinement reached the limit of {} failed attempts due generation collapse.".format(max_attempts)
max_attempt_log += " Please, select more stable tunnings for the models so that the generation stops collapsing."
print_save_log("\n\n" + max_attempt_log_headings, text_logs_path)
print_save_log(max_attempt_log, text_logs_path)
sys.exit(max_attempt_log)
return trainer
| 24,678 | 52.417749 | 182 | py |
HC-MGAN | HC-MGAN-main/tree/raw_split.py | #basic imports
import argparse
import os
import numpy as np
import math
import shutil
import time
import datetime
import copy
import sys
#torch imports
import torchvision.transforms as transforms
from torchvision.utils import save_image, make_grid
from torch.utils.data import DataLoader
from torchvision import datasets
from torch.autograd import Variable
import torch.nn as nn
import torch.nn.functional as F
import torchvision
import torch
#plot imports
#import seaborn as sn
import pandas as pd
import matplotlib.pyplot as plt
#from tabulate import tabulate
#other imports
from tqdm import autonotebook
from sklearn import metrics
#import nbimporter
#custom imports
from utils.soft_cluster import get_local_cluster_table, show, distribution_select
from utils.others import create_gans, sum_dicts, save_log_text, get_log_heading, print_save_log, zero_dict_values
try:
from IPython.display import Image
except:
print('Jupyter image display not available')
class MGANTrainer:
def __init__(self,
dataloader_cluster_k, #dataloader object
mgan_k, #mgan object
amp_enable, #bool
prior_distribution = 'uniform',
node_k = None,
feat_extractor = None
):
self.dl_k = dataloader_cluster_k
self.mgan_k = mgan_k
self.latent_dim = mgan_k.latent_dim
self.no_c_outputs = mgan_k.clasf.linear_clasf.out_features
self.amp_scaler = torch.cuda.amp.GradScaler()
self.amp_autocast = torch.cuda.amp.autocast
self.amp_enable = amp_enable
self.cancel_training_flag = False
self.class_to_idx_dict = dataloader_cluster_k.dataset.class_to_idx.items()
self.idx_to_class_dict = {v:k for k,v in self.class_to_idx_dict}
self.classes_names_dict = {v:k for k,v in dataloader_cluster_k.dataset.class_to_idx.items()}
self.classes_targets_groups = [ dataloader_cluster_k.dataset.class_to_idx[class_name] for class_name in dataloader_cluster_k.dataset.classes]
self.prior_distribution = prior_distribution
self.node_k = node_k
self.feat_extractor = feat_extractor
self.mgan_k.assign_amp(self.amp_autocast, self.amp_scaler)
self.mgan_k.enable_amp(self.amp_enable)
CUDA = True if torch.cuda.is_available() else False
self.Tensor = torch.cuda.FloatTensor if CUDA else torch.FloatTensor
self.no_batches = int(dataloader_cluster_k.sampler.weights.sum().item())//dataloader_cluster_k.batch_size
def train(self, epochs, text_logs_path, raw_split_path, noise_start=0, sample_interval=20, collapse_check_loss=0.001, collapse_check_epoch=0,
batch_size_gen=100, raw_split_attempt=0):
'''Main training loop.
Args:
epochs (int): total training epochs
text_logs_path (string): .txt file to save textual training logs
raw_split_path (string): path to raw split folder where logs will be stored
noise_start (float): Start image noise intensity linearly decaying throughout the training
sample_interval (int): interval for sample logs printing/saving
collapse_check_loss (float): threshold discriminator loss for detecting collapsed generators and halting the training
batch_sige_gen (int): no. of samples per minibatch for generated images
ref_it (int): no. of iteration of refinement for printing/saving logs
ref_attempt (int): no. of attempt for a given refinement it. (counts +1 if previous attempt was halted due to generators collapse)
'''
self.cancel_training_flag = False
print("\n\nTraining epochs progress bar (training logs printed/saved every {} epochs):".format(sample_interval))
for epoch in autonotebook.tqdm(range(1, epochs+1)):
img_noise_scale = noise_start*(1-epoch/epochs)
epoch_start = time.time()
#running losses/acc dictionary
epoch_metrics_dict = zero_dict_values(copy.copy(self.mgan_k.metrics_dict))
epoch_metrics_dict = self.train_on_epoch(epoch_metrics_dict, img_noise_scale, batch_size_gen)
epoch_interval = time.time() - epoch_start
#logs
if (epoch % sample_interval) == 0:
#text_logs_path = self.raw_split_path + "attempt_{}_ep_{}_logs.txt".format(raw_split_attempt, epoch)
self.view_log_headings(epoch, epochs, epoch_interval, text_logs_path, raw_split_attempt)
self.view_epoch_losses(epoch_metrics_dict, text_logs_path)
self.view_gen_imgs(epoch, raw_split_attempt, raw_split_path, text_logs_path)
self.verify_collapsed_generators(epoch, text_logs_path, img_noise_scale, collapse_check_loss, collapse_check_epoch)
#flag for cancelling the training if generators collapse is detected
if self.cancel_training_flag:
break
#prints end of training logs
print_save_log("\n\n"+get_log_heading("END OF RAW SPLIT TRAINING FOR NODE {}".format(self.node_k.name)), text_logs_path)
print_save_log("End of training.", text_logs_path)
if not(self.cancel_training_flag):
#gets cluster assignment probabilities for each example with classifier's inference
clasf_cluster_probs = self.get_clasf_cluster_probs(self.dl_k, self.mgan_k.clasf, img_noise_scale)
# creates children with new clasf cluster probs
self.node_k.create_children(cluster_probs_left = clasf_cluster_probs[0], cluster_probs_right = clasf_cluster_probs[1])
#logs with the binary clustering result for current node's raw split
new_cluster_prob_mass_per_class = self.get_cluster_prob_mass_per_class(self.dl_k, clasf_cluster_probs)
self.view_new_clasf_clustering(new_cluster_prob_mass_per_class, text_logs_path)
def train_on_epoch(self, epoch_metrics_dict, img_noise_scale, batch_size_gen=100):
mgan_k = self.mgan_k
for batch_idx in range(self.no_batches):
#samples real images from groups l and m
imgs_real_k = self.get_real_images(img_noise_scale)
#Trains group l components with needed external data/components from group m
imgs_gen_k = self.get_gen_images(img_noise_scale, batch_size_gen)
batch_metrics_dict = mgan_k.train_on_batch(imgs_real = imgs_real_k,
imgs_gen = imgs_gen_k)
epoch_metrics_dict = sum_dicts(epoch_metrics_dict, batch_metrics_dict)
#updates amp scaler after training components from groups l and m
self.amp_scaler.update()
return epoch_metrics_dict
def _get_classes_for_monitoring(self, cluster_prob_mass_per_class, min_proportion=0.2):
clusters_per_class_sum = [np.sum(clusters)
for clusters in cluster_prob_mass_per_class]
classes = self.dl_k.dataset.classes
targets_per_example = self.dl_k.dataset.targets
examples_per_class_sum = [np.sum(np.array(targets_per_example)==i)
for i in range(len(classes))]
classes_for_monitoring = [i for (i, sum_value) in enumerate(clusters_per_class_sum)
if sum_value > examples_per_class_sum[i] * min_proportion]
if len(classes_for_monitoring) < 2:
print('\nClasses for metrics monitoring were set to {}, '
'which is too few (2 or more classes required)'.format(classes_for_monitoring))
print('This means clusters are too small (prob. mass for classes < {}} of original mass at root node).'.format(min_proportion))
print('Enabling all classes for metrics monitoring.')
classes_for_monitoring = np.arange(len(classes)).tolist()
return classes_for_monitoring
else:
return classes_for_monitoring
def get_real_images(self, img_noise_scale):
'''Gets real images from groups l and m'''
imgs_real_k = next(iter(self.dl_k))[0].type(self.Tensor)
imgs_real_k = self._add_noise(imgs_real_k, img_noise_scale)
return imgs_real_k
def get_clasf_cluster_probs(self, dataloader, classifier, img_noise_scale=0):
'''Performs cluster inference over the entire training set w/ the 1 classifier.'''
dataloader=torch.utils.data.DataLoader(dataloader.dataset, batch_size=100, shuffle=False, drop_last=False)
#empty sublists to accumulate the minibatches of probabilities
clasf_cluster_probs = [[] for _ in range(self.no_c_outputs)]
#iterates through the dataset to collect classifiers inference with minibatches
for (batch_imgs, batch_targets) in dataloader:
batch_imgs = batch_imgs.cuda()
batch_imgs = self._add_noise(batch_imgs, img_noise_scale)
with torch.no_grad():
clasf_cluster_probs_batch = torch.exp(classifier(batch_imgs)).transpose(1,0)
for i in range(self.no_c_outputs):
clasf_cluster_probs[i].append(clasf_cluster_probs_batch[i])
#concatenates results for each batch of the whole data
clasf_cluster_probs = np.array([torch.cat(batch).cpu().numpy() for batch in clasf_cluster_probs])
#gets parent node (k) probabilities by summing previous probabilities in l and m
current_cluster_probs = self.dl_k.sampler.weights.numpy()
#multiplies clasf inference by the current node`s probabilities
clasf_cluster_probs[0] *= current_cluster_probs
clasf_cluster_probs[1] *= current_cluster_probs
return clasf_cluster_probs.tolist()
def get_cluster_prob_mass_per_class(self, dataloader, cluster_probs_per_example):
no_of_clusters = 2
assert(len(cluster_probs_per_example) == no_of_clusters)
no_of_classes = len(dataloader.dataset.classes)
prob_mass_per_class = []
for i in range(no_of_classes):
prob_mass_ij = []
for j in range(no_of_clusters):
prob_mass_ij.append( ((np.array(dataloader.dataset.targets)==i)*cluster_probs_per_example[j]).sum().item() )
prob_mass_per_class.append(prob_mass_ij)
return np.round(prob_mass_per_class, 2)
def get_cluster_assignments_per_class(self, dataloader, cluster_probs_per_example):
no_of_clusters = 2
assert(len(cluster_probs_per_example) == no_of_clusters)
no_of_classes = len(dataloader.dataset.classes)
cluster_assignments_per_class = []
for i in range(no_of_classes):
cluster_counts_ij = []
for j in range(no_of_clusters):
cluster_counts_ij.append( ((np.array(dataloader.dataset.targets)==i)*(cluster_probs_per_example[j])>0.5).sum().item() )
cluster_assignments_per_class.append(cluster_counts_ij)
return cluster_assignments_per_class
def view_log_headings(self, epoch, epochs, epoch_interval, text_logs_path, raw_split_attempt=0):
'''Part 1/4 of training logs'''
log_headings = "[RAW SPLIT NODE %s] [EPOCH %d/%d] [EPOCH TIME INTERVAL: %.2f sec.] [ATTEMPT %d]"%(self.node_k.name, epoch, epochs, epoch_interval, raw_split_attempt)
log_headings = get_log_heading(log_headings)
print_save_log('\n\n' + log_headings, text_logs_path)
def view_epoch_losses(self, epoch_metrics_dict, text_logs_path):
'''Part 2/4 of training logs'''
log_string = 'Mean epoch losses/acc for each component in the MGAN: \n'
log_string += str({k:np.round(v/self.no_batches,5) for k,v in epoch_metrics_dict.items()}) + '\n'
print_save_log(log_string, text_logs_path)
def view_gen_imgs(self, epoch, raw_split_attempt, raw_split_path, text_logs_path):
'''Part 3/4 of training logs'''
imgs_plot = self.get_gen_images(img_noise_scale=0, batch_size=20)
if self.node_k is not None:
if raw_split_path is not None:
img_save_path = raw_split_path + "attempt_{}_ep_{}.jpg".format(raw_split_attempt, epoch)
self._plot_img_grid(imgs_plot, img_save_path, self.node_k.name, text_logs_path)
def verify_collapsed_generators(self, epoch, text_logs_path, img_noise_scale=0, collapse_check_loss=0.01, collapse_check_epoch=50, batch_size=100):
'''Part 4/4 of training logs'''
if epoch < collapse_check_epoch:
print_save_log("\nGenerator collapse will be checked after epoch {}".format(collapse_check_epoch), text_logs_path)
else:
imgs_gens_k = self.get_gen_images(img_noise_scale, batch_size)
losses = self.mgan_k.get_disc_losses_for_gen(imgs_gens_k)
for loss in losses:
if loss < collapse_check_loss and epoch>=collapse_check_epoch:
log_string = "\nDiscriminator loss for generated images is too low (<{}), indicating generators collapse. The training shall restart.".format(collapse_check_loss)
print_save_log(log_string, text_logs_path)
self.cancel_training_flag = True
break
if not(self.cancel_training_flag):
print_save_log("\nGenerator collapse check: no collapse detected, training shall continue.", text_logs_path)
else:
print_save_log("\nGenerator collapse check: collapse detected, restart training.", text_logs_path)
def view_new_clasf_clustering(self, new_cluster_prob_mass_per_class, text_logs_path):
'''Prints logs with binary clustering result for current node'''
#header
log_headings = "EXHIBITING BINARY CLUSTERING FOR NODE %s OBTAINED WITH CLASSIFIER'S INFERENCE"%(self.node_k.name)
log_headings = get_log_heading(log_headings)
print_save_log("\n\n"+log_headings, text_logs_path)
#clustering table
log_text_1 = 'Local binary soft clustering (prob. mass division) for node {}, according to each reference class\n'.format(self.node_k.name)
print_save_log(log_text_1, text_logs_path)
table_df = get_local_cluster_table(new_cluster_prob_mass_per_class, self.idx_to_class_dict, node=self.node_k,
table_name = 'Local soft clusters from binary split')
pd.set_option("max_colwidth", None)
pd.set_option('max_columns', None)
try:
display(table_df)
except:
print(table_df)
print_save_log(str(table_df), text_logs_path, print_log=False)
def _add_noise(self, tensor, normal_std_scale):
if (normal_std_scale > 0):
return tensor + (tensor*torch.randn_like(tensor)*normal_std_scale)
else:
return tensor
def get_gen_images(self, img_noise_scale, batch_size=100):
'''Generates imgs from each gan (already concatenated per gan)'''
latent_dim = self.mgan_k.latent_dim
z = self.Tensor(distribution_select(self.prior_distribution, (batch_size, latent_dim))).requires_grad_(False)
imgs_gen = self.mgan_k.get_gen_images(z, rand_perm=False)
imgs_gen = self._add_noise(imgs_gen, img_noise_scale)
return imgs_gen
def _plot_img_grid(self, imgs_plot, img_save_path, node_name, text_logs_path):
if imgs_plot.shape[1] == 3 or imgs_plot.shape[1] == 1:
grid = make_grid(imgs_plot.cpu(), nrow=20, normalize=True)
if img_save_path is not None:
save_image(grid, img_save_path)
try:
print_save_log("\nSample of generated images from raw split MGAN for node {} (each row for each generator' output):".format(node_name), text_logs_path)
print_save_log('(This sample is saved at {})'.format(img_save_path), text_logs_path)
display(Image(filename=img_save_path, width=900))
except:
print_save_log('Jupyter image display not available for plotting sample of generated images', text_logs_path)
#print_save_log("Sample of generated images (each row for each generator' output) saved at {}".format(img_save_path), text_logs_path)
else:
print_save_log("\nNo image save path defined, can't save sample of generated images", text_logs_path)
else:
print_save_log("\nCan't plot/save imgs with shape {}".format(imgs_plot.shape), text_logs_path)
def raw_split(args, dataloader_cluster_k, node_k, epochs, noise_start, sample_interval=10, collapse_check_loss=0.001):
restart_training = True
raw_split_attempt = 1
max_attempts = 4
while restart_training:
#configure log saving paths
raw_split_path = os.path.join(node_k.node_path, "raw_split/")
os.makedirs(raw_split_path, exist_ok=True)
text_logs_path = raw_split_path + "attempt_{}_training_logs.txt".format(raw_split_attempt)
save_log_text('', text_logs_path, open_mode='w')
#print main log headings
log_headings = get_log_heading("RAW SPLIT OF NODE {} (ATTEMPT {}) ".format(node_k.name, raw_split_attempt), spacing=2)
print_save_log('\n\n\n' + log_headings, text_logs_path)
#print parameters
log_headings = get_log_heading("TRAINING PARAMETERS")
print_save_log(log_headings, text_logs_path)
print_save_log("Training Arguments: ", text_logs_path)
print_save_log(vars(args), text_logs_path)
print_save_log("Training using device : {}".format(args.device), text_logs_path)
print_save_log("Training logs save path: {}".format(text_logs_path), text_logs_path)
print_save_log("Limit of Training Attempts: {}".format(max_attempts), text_logs_path)
#create MGAN models
[mgan_k] = create_gans(args, no_gans=1, no_g_paths=2)
#print models' architecture
log_headings = get_log_heading("MODELS ARCHITETURE")
print_save_log('\n\n' + log_headings, text_logs_path)
print_save_log("Discriminator Architecture:", text_logs_path)
print_save_log(mgan_k.disc, text_logs_path)
print_save_log("\nGernerator Architecture:", text_logs_path)
print_save_log(mgan_k.gen_set.paths[0], text_logs_path)
print_save_log("\nClassifier Architecture:", text_logs_path)
print_save_log(mgan_k.clasf, text_logs_path)
#create trainer object
trainer = MGANTrainer(dataloader_cluster_k,
mgan_k,
amp_enable=args.amp_enable,
prior_distribution = "uniform",
node_k = node_k
)
#train
trainer.train(epochs = epochs,
text_logs_path = text_logs_path,
raw_split_path = raw_split_path,
noise_start=noise_start,
sample_interval=sample_interval,
collapse_check_loss =collapse_check_loss,
collapse_check_epoch = args.collapse_check_epoch,
raw_split_attempt = raw_split_attempt,
)
#flag for restarting the training if generation collapse is detected
if trainer.cancel_training_flag == False:
restart_training = False
else:
raw_split_attempt += 1
if raw_split_attempt>max_attempts:
max_attempt_log_headings = get_log_heading("LIMIT OF {} FAILED ATTEMPTS REACHED".format(max_attempts))
max_attempt_log = "The training for the raw split of node {} reached the limit of {} failed attempts due generation collapse.".format(node_k, max_attempts)
max_attempt_log += " Please, select more stable tunnings for the models so that the generation stops collapsing."
print_save_log("\n\n" + max_attempt_log_headings, text_logs_path)
print_save_log(max_attempt_log, text_logs_path)
sys.exit(max_attempt_log)
return trainer
| 20,482 | 49.575309 | 182 | py |
HC-MGAN | HC-MGAN-main/utils/others.py | import os
import math
import torch
import torchvision.transforms as transforms
from torchvision.utils import save_image, make_grid
from torchvision import datasets
import torch
from models.gan import GAN
def sum_dicts(dict_a, dict_b):
assert(dict_a.keys() == dict_b.keys())
return {k:dict_a[k]+dict_b[k] for k,v in dict_a.items()}
def zero_dict_values(dict):
return {k:0 for k,v in dict.items()}
def save_log_text(log_text, save_path, open_mode = 'a'):
try:
with open(save_path, open_mode) as f:
f.write(log_text)
except FileNotFoundError:
print("Path {} for saving training logs does not exist".format(save_path))
def print_save_log(log, save_path, print_log=True):
if print_log:
print(log)
if save_path is not None:
save_log_text(remove_bold_from_string(str(log))+'\n', save_path, open_mode='a')
def get_log_heading(text, spacing=0):
hyphen_bar = (len(text)+2)*'-'
line_break = ('#\n')*spacing
return get_bold_string(hyphen_bar + '\n'+ line_break + '# ' + text + '\n'+ line_break + hyphen_bar)
def get_bold_string(string):
return "\033[1m" + string + "\033[0m"
def remove_bold_from_string(string):
string = string.replace('\033[1m', '')
string = string.replace('\033[0m', '')
return string
def create_gans(args, no_gans=1, no_g_paths=2):
available_img_dim = [28, 32]
import models.models_general as mdg
if args.img_dim == 32:
import models.models_32x32 as md
elif args.img_dim == 28:
import models.models_28x28 as md
else:
raise ValueError('Data type {} not available, choose from {}'.format(args.data_type, available_img_dim))
def create_feature_layer():
return md.EncoderLayers(architecture=args.architecture_d,
nf = args.nf_d,
kernel_size=args.kernel_size_d,
norm=args.normalization_d,
nc=args.img_channels,
print_shapes=True)
gan_list = []
def create_gen():
return md.Generator(architecture = args.architecture_g,
nf = args.nf_g,
kernel_size = args.kernel_size_g,
latent_dim = args.latent_dim,
nc = args.img_channels,
norm = args.normalization_g,
print_shapes=True)
if args.shared_features_across_ref:
shared_feature_layers = create_feature_layer().cuda()
for i in range(no_gans):
gen = create_gen().cuda()
gens = [gen]
for i in range(no_g_paths-1):
gens.append(create_gen().cuda())
gen_set = mdg.GeneratorSet(*gens)
if args.shared_features_across_ref:
feature_layers = shared_feature_layers
else:
feature_layers = create_feature_layer().cuda()
disc = mdg.Discriminator(feature_layers).cuda()
clasf = mdg.Classifier(feature_layers, no_c_outputs=2).cuda()
#optimizers
optimizer_G = torch.optim.Adam(list(gen_set.parameters()), lr=args.lr_g, betas=(args.b1, args.b2))
optimizer_D = torch.optim.Adam(list(disc.parameters()), lr=args.lr_d, betas=(args.b1, args.b2))
optimizer_C = torch.optim.Adam(list(clasf.linear_clasf.parameters()), lr=args.lr_c, betas=(args.b1, args.b2))
gan = GAN(gen_set, disc, clasf, feature_layers, optimizer_G, optimizer_D, optimizer_C, args.diversity_parameter_g)
gan_list.append(gan)
return gan_list
'''def get_pretty_df(df):
dfStyler = df.style.set_properties(**{'text-align': 'center',
'border' : '1px solid !important' })
df = dfStyler.set_table_styles([dict(selector='th', props=[('text-align', 'center')])])
return df''' | 3,956 | 34.648649 | 122 | py |
HC-MGAN | HC-MGAN-main/utils/data.py | import os
import math
import torch
import torchvision.transforms as transforms
from torchvision.utils import save_image, make_grid
from torchvision import datasets
from torch.utils.data import Dataset
import torch
class MyDataset(Dataset):
def __init__(self, dataset):
self.dataset = dataset
self.targets = dataset.targets
def __getitem__(self, index):
data, target = self.dataset[index]
return data, target, index
def __len__(self):
return len(self.dataset)
def merge_dataloaders(dl1, dl2):
dl1.dataset.data = torch.cat([dl1.dataset.data, dl2.dataset.data])
dl1.dataset.targets = torch.cat([dl1.dataset.targets, dl2.dataset.targets])
dl1.sampler.weights = torch.cat([dl1.sampler.weights, dl2.sampler.weights])
return dl1
def create_dataloader(dataset, test = False, batch_size = 100, path='../data/'):
available_datasets = ['fmnist', 'mnist','sop']
if dataset not in available_datasets:
raise ValueError('Dataset {} not available, choose from {}'.format(dataset, available_datasets))
os.makedirs(path, exist_ok=True)
if dataset == 'fmnist' :
if test:
sampler = torch.utils.data.WeightedRandomSampler(weights=[1]*10000, num_samples=batch_size, replacement=True, generator=None)
else:
sampler = torch.utils.data.WeightedRandomSampler(weights=[1]*60000, num_samples=batch_size, replacement=True, generator=None)
dataloader = torch.utils.data.DataLoader(
datasets.FashionMNIST(path, train=not(test), download=True,
transform=transforms.Compose([
transforms.ToTensor(),
transforms.Normalize([.5],[.5],[.5])
])),
batch_size=batch_size, shuffle=False, drop_last=True, sampler=sampler)
dataloader.dataset.classes = ['tshirt', 'pants', 'pullov', 'dress', 'coat','sandal', 'shirt', 'sneak', 'bag', 'ank-bt']
elif dataset =='mnist':
if test:
sampler = torch.utils.data.WeightedRandomSampler(weights=[1]*10000, num_samples=batch_size, replacement=True, generator=None)
else:
sampler = torch.utils.data.WeightedRandomSampler(weights=[1]*60000, num_samples=batch_size, replacement=True, generator=None)
dataloader = torch.utils.data.DataLoader(
datasets.MNIST(path, train=not(test), download=True,
transform=transforms.Compose([
transforms.ToTensor(),
transforms.Normalize([.5],[.5],[.5])
])),
batch_size=batch_size, shuffle=False, drop_last=True, sampler=sampler)
dataloader.dataset.classes = ['zero', 'one', 'two', 'three', 'four', 'five','six', 'seven', 'eight', 'nine']
elif dataset =='sop':
sampler = torch.utils.data.WeightedRandomSampler(weights=[1], num_samples=batch_size, replacement=True, generator=None)
dataloader = torch.utils.data.DataLoader(
datasets.ImageFolder(os.path.join(path,'Stanford_Online_Products'),
transform=transforms.Compose([
transforms.Grayscale(),
transforms.ToTensor(),
transforms.Normalize([.5],[.5],[.5])
])),
batch_size=batch_size, shuffle=False, drop_last=True, sampler=sampler)
dataloader.sampler.weights = torch.Tensor([1]*len(dataloader.dataset))
return dataloader
| 3,721 | 46.113924 | 137 | py |
HC-MGAN | HC-MGAN-main/utils/soft_cluster.py | import os
import numpy as np
import math
import matplotlib.pyplot as plt
import torch
import seaborn as sn
import pandas as pd
import numpy as np
import math
from sklearn import metrics
import sklearn
import scipy
import scipy.optimize as opt
import matplotlib.pyplot as plt
import torchvision.transforms as transforms
from torchvision.utils import save_image, make_grid
from torch.utils.data import DataLoader
from torchvision import datasets
from torch.autograd import Variable
from tqdm import tqdm_notebook
import torch.nn as nn
import torch.nn.functional as F
import torch
import shutil
import time
from utils.others import get_log_heading, get_bold_string, save_log_text, remove_bold_from_string, print_save_log
CUDA = True if torch.cuda.is_available() else False
Tensor = torch.cuda.FloatTensor if CUDA else torch.FloatTensor
def view_global_tree_logs(dataloader_train, non_leaf_nodes, leaf_nodes, log_save_path, log_title = None, display_table=True):
if log_title is None:
log_title = '\nGLOBAL TREE LOGS AFTER LAST RAW SPLIT OR REFINEMENT'
log_heading = get_bold_string(get_log_heading(log_title, spacing=2))
print_save_log('\n\n\n'+log_heading, log_save_path)
no_classes = len(dataloader_train.dataset.classes)
#-----------------------
#LOGS PART 1: TREE NODES
#-----------------------
log_title = 'GLOBAL TREE LOGS 1/3: TREE NODES'
log_heading = get_bold_string(get_log_heading(log_title))
print_save_log(log_heading, log_save_path)
print_save_log('Non-leaf nodes:', log_save_path)
print_save_log(str([node.name + ' (' + node.status + ')' for node in non_leaf_nodes]), log_save_path)
print_save_log('\nLeaf nodes:', log_save_path)
print_save_log(str([node.name + ' (' + node.status + ')' for node in leaf_nodes]), log_save_path)
#-------------------------------
#LOGS PART 2: CLUSTERING MATRIX
#-------------------------------
log_title = 'GLOBAL TREE LOGS 2/3: CLUSTERING MATRIX'
log_heading = get_bold_string(get_log_heading(log_title))
print_save_log('\n\n'+log_heading, log_save_path)
print_save_log("This table indicates the clustering matrix when it reaches N clusters, or N leaf nodes.", log_save_path)
print_save_log("The final matrix occurs when N equals the number of classes.\n", log_save_path)
print("(This table is saved at {})".format(log_save_path))
leaf_nodes_probs = []
for node in leaf_nodes:
cluster_probs = node.cluster_probs
leaf_nodes_probs.append(cluster_probs[-1].numpy())
leaf_nodes_probs = np.array(leaf_nodes_probs)
cluster_counts_per_class = get_hard_cluster_per_class_parallel(dataloader_train, leaf_nodes_probs, no_of_classes=no_classes)
cluster_matrix = np.array(cluster_counts_per_class)
cluster_matrix_dict = {}
classes_names_dict = {v:k for k,v in dataloader_train.dataset.class_to_idx.items()}
classes_names = [classes_names_dict[t] for t in range(no_classes)]
cluster_matrix_dict['Leaf Nodes Clusters'] = [node.name + ' (' + node.status + ')' for node in leaf_nodes]
for i in range(len(cluster_matrix)):
column_name = classes_names[i] + '({})'.format(np.sum(cluster_matrix[i]).round(2))
column_contents = cluster_matrix[i]
cluster_matrix_dict[column_name] = column_contents
if display_table:
pd.set_option("max_colwidth", None)
pd.set_option('max_columns', None)
try:
display(pd.DataFrame(cluster_matrix_dict))
print_save_log(str(pd.DataFrame(cluster_matrix_dict)), log_save_path, print_log=False)
except:
print_save_log(str(pd.DataFrame(cluster_matrix_dict)), log_save_path)
#-------------------------------
#LOGS PART 3: CLUSTERING METRICS
#-------------------------------
log_title = 'GLOBAL TREE LOGS 3/3: CLUSTERING METRICS'
log_heading = get_bold_string(get_log_heading(log_title))
print_save_log('\n\n'+log_heading, log_save_path)
#NMI
nmi = get_parallel_clustering_nmi(cluster_counts_per_class)
print_save_log("Normalized Mutual Information (NMI): {}".format(nmi), log_save_path)
#max 1 class ACC
classes_per_cluster, classes_counts_per_cluster = get_opt_assignment(1, cluster_counts_per_class)
total_counts = np.sum([np.sum(classes_counts) for classes_counts in classes_counts_per_cluster])
total_data_examples= np.sum(cluster_counts_per_class)
acc = total_counts/total_data_examples
print_save_log('\nBest accuracy (ACC) with at most 1 (one) class per cluster: {}/{} = {}'.format(total_counts, total_data_examples, acc), log_save_path)
opt_assign_string = 'Optimum assignment considered: \n'
opt_assign_string += get_opt_assignment_str(classes_per_cluster, classes_counts_per_cluster, classes_names_dict, leaf_nodes)
print_save_log(opt_assign_string, log_save_path)
#ACC
classes_per_cluster_best = []
classes_counts_per_cluster_best = []
total_counts_best = 0
for max_classes_per_cluster in range(1, no_classes+1):
classes_per_cluster, classes_counts_per_cluster = get_opt_assignment(max_classes_per_cluster, cluster_counts_per_class)
total_counts = np.sum([np.sum(classes_counts) for classes_counts in classes_counts_per_cluster])
if total_counts>total_counts_best:
classes_per_cluster_best = classes_per_cluster
classes_counts_per_cluster_best = classes_counts_per_cluster
total_counts_best = total_counts
acc = total_counts_best/total_data_examples
print_save_log('\nBest accuracy (ACC) with multiple classes per cluster: {}/{} = {}'.format(total_counts_best, total_data_examples, acc), log_save_path)
opt_assign_string = 'Optimum assignment considered: \n'
opt_assign_string += get_opt_assignment_str(classes_per_cluster_best, classes_counts_per_cluster_best, classes_names_dict, leaf_nodes)
print_save_log(opt_assign_string, log_save_path)
print_save_log("\n(Note on the above ACC metrics: if the no. of classes is less then the no. clusters, " +
"we can either consider multiple classes belonging to a single cluster or left certain classes unassigned for computing ACC. " +
"The first ACC metric above considers at most 1 classes per cluster, and when the number of clusters and classes are equal, it provides the "+
"usual ACC metric used in horizontal clustering and also used in our paper as benchmark." +
"The second ACC metric considers the best assignment possible with multiple classes allowed to be assigned to each cluster, " +
"and its useful to track an upper bound for the final 1-to-1 ACC during the growth of the tree, before it reaches one cluster to each class.", log_save_path)
def get_opt_assignment(max_classes_per_cluster, cluster_counts_per_class):
"""Gets optimum cluster assignment with hungarian algorithm, returning classes assignments and classes counts per cluster.
For enabling multiple classes per cluster, the clustering matrix needs to have its cluster idx (columns) replicated n times,
where n is the maximum number of classes allowed for each cluster.
Args:
max_classes_per cluster (int): maximum classes allowed for each cluster during the search for optimum assignment
cluster_counts_per_class (int list): clustering matrix with axis 0 relating to classes and axis 1 to clusters
"""
#cluster matrix is repeated N times to allow max N classes per cluster
mat = np.repeat(cluster_counts_per_class, max_classes_per_cluster, axis=1)
#gets optimum assignment idxs and example counts
lines, columns = scipy.optimize.linear_sum_assignment(mat, maximize=True)
opt_assign_counts_per_cluster = np.array(mat)[lines, columns]
#columns idxs refer to the N times repeated columns.
#to get cluster idxs, we need the integer division of the repeated idxs by their repetition number
columns_as_cluster_idx = columns//max_classes_per_cluster
#for loop for getting class idxs and class counts for each cluster i
classes_per_cluster = []
classes_counts_per_cluster = []
no_clusters = len(cluster_counts_per_class[0])
for i in range(no_clusters):
classes_per_cluster.append(lines[columns_as_cluster_idx==i])
classes_counts_per_cluster.append(opt_assign_counts_per_cluster[columns_as_cluster_idx==i])
return classes_per_cluster, classes_counts_per_cluster
def get_opt_assignment_str(classes_per_cluster, classes_counts_per_cluster, classes_names_dict, leaf_nodes):
no_clusters = len(classes_per_cluster)
opt_assign_string = ''
for i in range(no_clusters):
opt_assign_string += '['
opt_assign_string += ",".join(["'"+classes_names_dict[c]+"'({})".format(c_counts)
for c,c_counts in zip(classes_per_cluster[i], classes_counts_per_cluster[i])])
opt_assign_string += ']'
opt_assign_string += " --> '{}'; ".format(leaf_nodes[i].name)
return opt_assign_string
def get_hard_cluster_per_class_parallel(dataloader, split_cluster_probs, no_of_classes = 10, filter_classes=[]):
max_mask = (split_cluster_probs.max(axis=0,keepdims=1) == split_cluster_probs)
#print(max_mask[0])
no_of_clusters = len(split_cluster_probs)
cluster_counts_per_class = []
cluster_probs_sum = split_cluster_probs[0] + split_cluster_probs[1]
for i in range(no_of_classes):
cluster_counts_ij = []
if i not in filter_classes:
for j in range(no_of_clusters):
#print(j)
cluster_counts_ij.append( (((np.array(dataloader.dataset.targets)==i))*np.array(max_mask[j]) ).sum().item() )
cluster_counts_per_class.append(cluster_counts_ij)
classes_names_dict = {v:k for k,v in dataloader.dataset.class_to_idx.items()}
#print(np.array(cluster_counts_per_class))
return cluster_counts_per_class
def get_parallel_clustering_nmi(cluster_counts_per_class):
reference_labels = []
for i in range(len(cluster_counts_per_class)):
reference_labels += [i]*np.array(cluster_counts_per_class[i]).sum()
clustering_labels = []
for i in range(len(cluster_counts_per_class)):
for j in range(len(cluster_counts_per_class[0])):
clustering_labels += [j]*cluster_counts_per_class[i][j]
#print(len(reference_labels))
#print(len(clustering_labels))
nmi = sklearn.metrics.cluster.normalized_mutual_info_score(reference_labels, clustering_labels)
return nmi
def show(img, rows):
npimg = img.detach().numpy()
plt.figure(figsize = (20, rows))
plt.imshow(np.transpose(npimg, (1,2,0)), interpolation='nearest')
plt.axis('off')
plt.show()
def distribution_select(dist, shape):
assert(dist in ['uniform', 'normal'])
if dist=='uniform':
return np.random.uniform(-1, 1, shape)
elif dist=='normal':
return np.random.normal(0, 1, shape)
else:
return None
def get_local_cluster_table(clusters_per_class, classes_names_dict, node, table_name = 'Local binary clustering'):
no_of_classes = len(clusters_per_class)
classes_names = [classes_names_dict[c] for c in range(no_of_classes)]
table_dict = {}
left = node.child_left
right = node.child_right
table_dict[table_name] = ["Left cluster: " + left.name + "({})".format(left.status), "Right cluster: " + right.name + "({})".format(right.status)]
for i in range(no_of_classes):
column_name = classes_names[i] + '({})'.format(np.sum(clusters_per_class[i]).round(2))
classes_names_dict[i]
column_contents = clusters_per_class[i]
table_dict[column_name] = column_contents
return pd.DataFrame(table_dict)
def get_classification_table_variation(clusters_per_class_orig, clusters_per_class_new, classes_names_dict, node, data_prefix = '', table_name='Clustering result'):
no_of_clusters = len(clusters_per_class_orig[0])
no_of_classes = len(clusters_per_class_orig)
classes_names = [classes_names_dict[t] for t in range(no_of_classes)]
table_dict = {}
left = node.child_left
right = node.child_right
ordinal = lambda n: "%d%s" % (n,"tsnrhtdd"[(n//10%10!=1)*(n%10<4)*n%10::4])
table_dict[table_name] = ["Left cluster: " + left.name + "({})".format(left.status),
"Right cluster: " + right.name + "({})".format(right.status)]
clusters_per_class_diff = np.array(clusters_per_class_new) - np.array(clusters_per_class_orig)
clusters_per_class_diff = clusters_per_class_diff.round(2)
for i in range(no_of_classes):
column_name = data_prefix + classes_names[i] + '({})'.format(np.sum(clusters_per_class_new[i]).round(2))
column_contents_new = clusters_per_class_new[i]
column_contents_diff = clusters_per_class_diff[i]
column_formatted = ['{} (+{})'.format(column_contents_new[j], column_contents_diff[j]) if column_contents_diff[j]>=0
else '{} ({})'.format(column_contents_new[j], column_contents_diff[j]) for j in range(len(clusters_per_class_new[i])) ]
table_dict[column_name] = column_formatted
return(pd.DataFrame(table_dict))
| 13,406 | 46.042105 | 176 | py |
DKVMN | DKVMN-main/evaluation/run.py | """
Usage:
run.py [options]
Options:
--length=<int> max length of question sequence [default: 50]
--questions=<int> num of question [default: 100]
--lr=<float> learning rate [default: 0.001]
--bs=<int> batch size [default: 64]
--seed=<int> random seed [default: 59]
--epochs=<int> number of epochs [default: 30]
--cuda=<int> use GPU id [default: 0]
--final_fc_dim=<int> dimension of final dim [default: 10]
--question_dim=<int> dimension of question dim[default: 50]
--question_and_answer_dim=<int> dimension of question and answer dim [default: 100]
--memory_size=<int> memory size [default: 20]
--model=<string> model type [default: DKVMN]
"""
import os
import random
import logging
import torch
import torch.optim as optim
import numpy as np
from datetime import datetime
from docopt import docopt
from data.dataloader import getDataLoader
from evaluation import eval
def setup_seed(seed=0):
random.seed(seed)
np.random.seed(seed)
torch.random.manual_seed(seed)
torch.cuda.manual_seed_all(seed)
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = False
def main():
args = docopt(__doc__)
length = int(args['--length'])
questions = int(args['--questions'])
lr = float(args['--lr'])
bs = int(args['--bs'])
seed = int(args['--seed'])
epochs = int(args['--epochs'])
cuda = args['--cuda']
final_fc_dim = int(args['--final_fc_dim'])
question_dim = int(args['--question_dim'])
question_and_answer_dim = int(args['--question_and_answer_dim'])
memory_size = int(args['--memory_size'])
model_type = args['--model']
logger = logging.getLogger('main')
logger.setLevel(level=logging.DEBUG)
date = datetime.now()
handler = logging.FileHandler(
f'log/{date.year}_{date.month}_{date.day}_{model_type}_result.log')
handler.setLevel(logging.INFO)
formatter = logging.Formatter(
'%(asctime)s - %(name)s - %(levelname)s - %(message)s')
handler.setFormatter(formatter)
logger.addHandler(handler)
logger.info('DKVMN')
logger.info(list(args.items()))
setup_seed(seed)
if torch.cuda.is_available():
os.environ["CUDA_VISIBLE_DEVICES"] = cuda
device = torch.device('cuda')
else:
device = torch.device('cpu')
trainLoader, validationLoader, testLoader = getDataLoader(bs, questions, length)
from model.model import MODEL
model = MODEL(n_question=questions, batch_size=bs, q_embed_dim=question_dim, qa_embed_dim=question_and_answer_dim,
memory_size=memory_size, final_fc_dim=final_fc_dim)
model.init_params()
model.init_embeddings()
optimizer = optim.Adam(model.parameters(), lr=lr)
best_auc = 0
for epoch in range(epochs):
print('epoch: ' + str(epoch+1))
model, optimizer = eval.train_epoch(model, trainLoader, optimizer, device)
logger.info(f'epoch {epoch+1}')
auc = eval.test_epoch(model, validationLoader, device)
if auc > best_auc:
print('best checkpoint')
torch.save({'state_dict': model.state_dict()}, 'checkpoint/'+model_type+'.pth.tar')
best_auc = auc
eval.test_epoch(model, testLoader, device, ckpt='checkpoint/'+model_type+'.pth.tar')
if __name__ == '__main__':
main()
| 3,566 | 34.67 | 118 | py |
DKVMN | DKVMN-main/evaluation/eval.py | import tqdm
import torch
import logging
import os
from sklearn import metrics
logger = logging.getLogger('main.eval')
def __load_model__(ckpt):
'''
ckpt: Path of the checkpoint
return: Checkpoint dict
'''
if os.path.isfile(ckpt):
checkpoint = torch.load(ckpt)
print("Successfully loaded checkpoint '%s'" % ckpt)
return checkpoint
else:
raise Exception("No checkpoint found at '%s'" % ckpt)
def train_epoch(model, trainLoader, optimizer, device):
model.to(device)
for batch in tqdm.tqdm(trainLoader, desc='Training: ', mininterval=2):
batch = batch.to(device)
datas = torch.chunk(batch, 3, 2)
optimizer.zero_grad()
loss, prediction, ground_truth = model(datas[0].squeeze(2), datas[1].squeeze(2), datas[2])
loss.backward()
optimizer.step()
return model, optimizer
def test_epoch(model, testLoader, device, ckpt=None):
model.to(device)
if ckpt is not None:
checkpoint = __load_model__(ckpt)
model.load_state_dict(checkpoint['state_dict'])
ground_truth = torch.tensor([], device=device)
prediction = torch.tensor([], device=device)
for batch in tqdm.tqdm(testLoader, desc='Testing: ', mininterval=2):
batch = batch.to(device)
datas = torch.chunk(batch, 3, 2)
loss, p, label = model(datas[0].squeeze(2), datas[1].squeeze(2), datas[2])
prediction = torch.cat([prediction, p])
ground_truth = torch.cat([ground_truth, label])
acc = metrics.accuracy_score(torch.round(ground_truth).detach().cpu().numpy(), torch.round(prediction).detach().cpu().numpy())
auc = metrics.roc_auc_score(ground_truth.detach().cpu().numpy(), prediction.detach().cpu().numpy())
logger.info('auc: ' + str(auc) + ' acc: ' + str(acc))
print('auc: ' + str(auc) + ' acc: ' + str(acc))
return auc
| 1,882 | 35.921569 | 130 | py |
DKVMN | DKVMN-main/evaluation/__init__.py | 0 | 0 | 0 | py |
|
DKVMN | DKVMN-main/evaluation/checkpoint/__init__.py | 0 | 0 | 0 | py |
|
DKVMN | DKVMN-main/evaluation/log/__init__.py | 0 | 0 | 0 | py |
|
DKVMN | DKVMN-main/data/readdata.py | import numpy as np
import itertools
from sklearn.model_selection import KFold
class DataReader():
def __init__(self, train_path, test_path, maxstep, num_ques):
self.train_path = train_path
self.test_path = test_path
self.maxstep = maxstep
self.num_ques = num_ques
def getData(self, file_path):
datas = []
with open(file_path, 'r') as file:
for len, ques, ans in itertools.zip_longest(*[file] * 3):
len = int(len.strip().strip(','))
ques = [int(q) for q in ques.strip().strip(',').split(',')]
ans = [int(a) for a in ans.strip().strip(',').split(',')]
slices = len//self.maxstep + (1 if len % self.maxstep > 0 else 0)
for i in range(slices):
data = np.zeros(shape=[self.maxstep, 3]) # 0 ->question and answer(1->)
if len > 0: # 1->question (1->)
if len >= self.maxstep: # 2->label (0->1, 1->2)
steps = self.maxstep
else:
steps = len
for j in range(steps):
data[j][0] = ques[i * self.maxstep + j] + 1
data[j][2] = ans[i * self.maxstep + j] + 1
if ans[i * self.maxstep + j] == 1:
data[j][1] = ques[i * self.maxstep + j] + 1
else:
data[j][1] = ques[i * self.maxstep + j] + self.num_ques + 1
len = len - self.maxstep
datas.append(data.tolist())
print('done: ' + str(np.array(datas).shape))
return datas
def getTrainData(self):
print('loading train data...')
kf = KFold(n_splits=5, shuffle=True, random_state=3)
Data = np.array(self.getData(self.train_path))
for train_indexes, vali_indexes in kf.split(Data):
valiData = Data[vali_indexes].tolist()
trainData = Data[train_indexes].tolist()
return np.array(trainData), np.array(valiData)
def getTestData(self):
print('loading test data...')
testData = self.getData(self.test_path)
return np.array(testData)
| 2,351 | 43.377358 | 92 | py |
DKVMN | DKVMN-main/data/dataloader.py | import torch
import torch.utils.data as Data
from .readdata import DataReader
#assist2015/assist2015_train.txt assist2015/assist2015_test.txt
#assist2017/assist2017_train.txt assist2017/assist2017_test.txt
#assist2009/builder_train.csv assist2009/builder_test.csv
def getDataLoader(batch_size, num_of_questions, max_step):
handle = DataReader('../dataset/assist2015/assist2015_train.txt',
'../dataset/assist2015/assist2015_test.txt', max_step,
num_of_questions)
train, vali = handle.getTrainData()
dtrain = torch.tensor(train.astype(int).tolist(), dtype=torch.long)
dvali = torch.tensor(vali.astype(int).tolist(), dtype=torch.long)
dtest = torch.tensor(handle.getTestData().astype(int).tolist(),
dtype=torch.long)
trainLoader = Data.DataLoader(dtrain, batch_size=batch_size, shuffle=True)
valiLoader = Data.DataLoader(dvali, batch_size=batch_size, shuffle=True)
testLoader = Data.DataLoader(dtest, batch_size=batch_size, shuffle=False)
return trainLoader, valiLoader, testLoader | 1,089 | 50.904762 | 78 | py |
DKVMN | DKVMN-main/data/__init__.py | 0 | 0 | 0 | py |
|
DKVMN | DKVMN-main/model/memory.py | import torch
from torch import nn
class DKVMNHeadGroup(nn.Module):
def __init__(self, memory_size, memory_state_dim, is_write):
super(DKVMNHeadGroup, self).__init__()
""""
Parameters
memory_size: scalar
memory_state_dim: scalar
is_write: boolean
"""
self.memory_size = memory_size
self.memory_state_dim = memory_state_dim
self.is_write = is_write
if self.is_write:
self.erase = torch.nn.Linear(self.memory_state_dim, self.memory_state_dim, bias=True)
self.add = torch.nn.Linear(self.memory_state_dim, self.memory_state_dim, bias=True)
nn.init.kaiming_normal_(self.erase.weight)
nn.init.kaiming_normal_(self.add.weight)
nn.init.constant_(self.erase.bias, 0)
nn.init.constant_(self.add.bias, 0)
def addressing(self, control_input, memory):
"""
Parameters
control_input: Shape (batch_size, control_state_dim)
memory: Shape (memory_size, memory_state_dim)
Returns
correlation_weight: Shape (batch_size, memory_size)
"""
similarity_score = torch.matmul(control_input, torch.t(memory))
correlation_weight = torch.nn.functional.softmax(similarity_score, dim=1) # Shape: (batch_size, memory_size)
return correlation_weight
def read(self, memory, control_input=None, read_weight=None):
"""
Parameters
control_input: Shape (batch_size, control_state_dim)
memory: Shape (batch_size, memory_size, memory_state_dim)
read_weight: Shape (batch_size, memory_size)
Returns
read_content: Shape (batch_size, memory_state_dim)
"""
if read_weight is None:
read_weight = self.addressing(control_input=control_input, memory=memory)
read_weight = read_weight.view(-1, 1)
memory = memory.view(-1, self.memory_state_dim)
rc = torch.mul(read_weight, memory)
read_content = rc.view(-1, self.memory_size, self.memory_state_dim)
read_content = torch.sum(read_content, dim=1)
return read_content
def write(self, control_input, memory, write_weight):
"""
Parameters
control_input: Shape (batch_size, control_state_dim)
write_weight: Shape (batch_size, memory_size)
memory: Shape (batch_size, memory_size, memory_state_dim)
Returns
new_memory: Shape (batch_size, memory_size, memory_state_dim)
"""
assert self.is_write
erase_signal = torch.sigmoid(self.erase(control_input))
add_signal = torch.tanh(self.add(control_input))
erase_reshape = erase_signal.view(-1, 1, self.memory_state_dim)
add_reshape = add_signal.view(-1, 1, self.memory_state_dim)
write_weight_reshape = write_weight.view(-1, self.memory_size, 1)
erase_mult = torch.mul(erase_reshape, write_weight_reshape)
add_mul = torch.mul(add_reshape, write_weight_reshape)
new_memory = memory * (1 - erase_mult) + add_mul
return new_memory
class DKVMN(nn.Module):
def __init__(self, memory_size, memory_key_state_dim, memory_value_state_dim, init_memory_key):
super(DKVMN, self).__init__()
"""
:param memory_size: scalar
:param memory_key_state_dim: scalar
:param memory_value_state_dim: scalar
:param init_memory_key: Shape (memory_size, memory_value_state_dim)
:param init_memory_value: Shape (batch_size, memory_size, memory_value_state_dim)
"""
self.memory_size = memory_size
self.memory_key_state_dim = memory_key_state_dim
self.memory_value_state_dim = memory_value_state_dim
self.key_head = DKVMNHeadGroup(memory_size=self.memory_size,
memory_state_dim=self.memory_key_state_dim,
is_write=False)
self.value_head = DKVMNHeadGroup(memory_size=self.memory_size,
memory_state_dim=self.memory_value_state_dim,
is_write=True)
self.memory_key = init_memory_key
self.memory_value = None
def init_value_memory(self, memory_value):
self.memory_value = memory_value
def attention(self, control_input):
correlation_weight = self.key_head.addressing(control_input=control_input, memory=self.memory_key)
return correlation_weight
def read(self, read_weight):
read_content = self.value_head.read(memory=self.memory_value, read_weight=read_weight)
return read_content
def write(self, write_weight, control_input):
memory_value = self.value_head.write(control_input=control_input,
memory=self.memory_value,
write_weight=write_weight)
self.memory_value = nn.Parameter(memory_value.data)
return self.memory_value
| 5,209 | 41.704918 | 117 | py |
DKVMN | DKVMN-main/model/model.py | import torch
import torch.nn as nn
from model.memory import DKVMN
class MODEL(nn.Module):
def __init__(self, n_question, batch_size, q_embed_dim, qa_embed_dim, memory_size, final_fc_dim):
super(MODEL, self).__init__()
self.n_question = n_question
self.batch_size = batch_size
self.q_embed_dim = q_embed_dim
self.qa_embed_dim = qa_embed_dim
self.memory_size = memory_size
self.memory_key_state_dim = q_embed_dim
self.memory_value_state_dim = qa_embed_dim
self.final_fc_dim = final_fc_dim
self.read_embed_linear = nn.Linear(self.memory_value_state_dim + self.memory_key_state_dim, self.final_fc_dim, bias=True)
self.predict_linear = nn.Linear(self.final_fc_dim, 1, bias=True)
self.init_memory_key = nn.Parameter(torch.randn(self.memory_size, self.memory_key_state_dim))
nn.init.kaiming_normal_(self.init_memory_key)
self.init_memory_value = nn.Parameter(torch.randn(self.memory_size, self.memory_value_state_dim))
nn.init.kaiming_normal_(self.init_memory_value)
self.mem = DKVMN(memory_size=self.memory_size,
memory_key_state_dim=self.memory_key_state_dim,
memory_value_state_dim=self.memory_value_state_dim, init_memory_key=self.init_memory_key)
self.q_embed = nn.Embedding(self.n_question + 1, self.q_embed_dim, padding_idx=0)
self.qa_embed = nn.Embedding(2 * self.n_question + 1, self.qa_embed_dim, padding_idx=0)
def init_params(self):
nn.init.kaiming_normal_(self.predict_linear.weight)
nn.init.kaiming_normal_(self.read_embed_linear.weight)
nn.init.constant_(self.read_embed_linear.bias, 0)
nn.init.constant_(self.predict_linear.bias, 0)
def init_embeddings(self):
nn.init.kaiming_normal_(self.q_embed.weight)
nn.init.kaiming_normal_(self.qa_embed.weight)
def forward(self, q_data, qa_data, target):
batch_size = q_data.shape[0]
seqlen = q_data.shape[1]
q_embed_data = self.q_embed(q_data)
qa_embed_data = self.qa_embed(qa_data)
memory_value = nn.Parameter(torch.cat([self.init_memory_value.unsqueeze(0) for _ in range(batch_size)], 0).data)
self.mem.init_value_memory(memory_value)
slice_q_embed_data = torch.chunk(q_embed_data, seqlen, 1)
slice_qa_embed_data = torch.chunk(qa_embed_data, seqlen, 1)
value_read_content_l = []
input_embed_l = []
for i in range(seqlen):
# Attention
q = slice_q_embed_data[i].squeeze(1)
correlation_weight = self.mem.attention(q)
# Read Process
read_content = self.mem.read(correlation_weight)
value_read_content_l.append(read_content)
input_embed_l.append(q)
# Write Process
qa = slice_qa_embed_data[i].squeeze(1)
self.mem.write(correlation_weight, qa)
all_read_value_content = torch.cat([value_read_content_l[i].unsqueeze(1) for i in range(seqlen)], 1)
input_embed_content = torch.cat([input_embed_l[i].unsqueeze(1) for i in range(seqlen)], 1)
predict_input = torch.cat([all_read_value_content, input_embed_content], 2)
read_content_embed = torch.tanh(self.read_embed_linear(predict_input.view(batch_size * seqlen, -1)))
pred = self.predict_linear(read_content_embed)
target_1d = target.view(-1, 1) # [batch_size * seq_len, 1]
mask = target_1d.ge(1) # [batch_size * seq_len, 1]
pred_1d = pred.view(-1, 1) # [batch_size * seq_len, 1]
filtered_pred = torch.masked_select(pred_1d, mask)
filtered_target = torch.masked_select(target_1d, mask) - 1
loss = torch.nn.functional.binary_cross_entropy_with_logits(filtered_pred, filtered_target.float())
return loss, torch.sigmoid(filtered_pred), filtered_target.float()
| 3,932 | 44.206897 | 129 | py |
DKVMN | DKVMN-main/model/__init__.py | 0 | 0 | 0 | py |
|
probabilistic-ensemble | probabilistic-ensemble-main/noise_mnist_utils.py | import tensorflow as tf
import tensorflow_probability as tfp
import os
import numpy as np
tfd = tfp.distributions
def normal_parse_params(params, min_sigma=0.0):
"""
将输入拆分成两份, 分别代表 mean 和 std.
min_sigma 是对 sigma 最小值的限制
"""
n = params.shape[0]
d = params.shape[-1] # channel
mu = params[..., :d // 2] # 最后一维的通道分成两份, 分别为均值和标准差
sigma_params = params[..., d // 2:]
sigma = tf.math.softplus(sigma_params)
sigma = tf.clip_by_value(t=sigma, clip_value_min=min_sigma, clip_value_max=1e5)
distr = tfd.Normal(loc=mu, scale=sigma) # 高斯
# proposal 网络的输出 (None,None,256), mu.shape=(None,None,128), sigma.shape=(None,None,128)
return distr
def rec_log_prob(rec_params, s_next, min_sigma=1e-2):
# distr.mean.shape = distr.std.shape = (None, 28, 28, 1). 前一半参数代表均值, 后一半参数代表标准差.
distr = normal_parse_params(rec_params, min_sigma)
log_prob = distr.log_prob(s_next) # (None, 28, 28, 1)
assert len(log_prob.get_shape().as_list()) == 4
return tf.reduce_sum(log_prob, axis=[1,2])
| 1,087 | 33 | 91 | py |
probabilistic-ensemble | probabilistic-ensemble-main/baseline_train.py | import numpy as np
import matplotlib.pyplot as plt
import tensorflow as tf
import tensorflow_probability as tfp
from ensemble_model import BaselineModel
from noise_mnist_utils import normal_parse_params, rec_log_prob
import pickle
tfd = tfp.distributions
config = tf.ConfigProto()
config.gpu_options.allow_growth = True
tf.enable_eager_execution(config=config)
class MnistBaseline(tf.keras.Model):
def __init__(self, ensemble_num=3):
super().__init__()
self.ensemble_num = ensemble_num
self.baseline_model = [BaselineModel() for _ in range(ensemble_num)]
def ensemble_loss(self, obs, out_obs):
total_loss = []
for idx in range(self.ensemble_num):
single_loss = self.single_loss(tf.convert_to_tensor(obs), tf.convert_to_tensor(out_obs), idx)
total_loss.append(single_loss * np.random.random())
return total_loss
# return tf.convert_to_tensor(total_loss)
def single_loss(self, obs, out_obs, i=0):
""" 输出 variational lower bound, 训练目标是最大化该值. 输出维度 (batch,)
"""
rec_params = self.baseline_model[i](obs)
rec_loss = -1.0 * rec_log_prob(rec_params=rec_params, s_next=out_obs)
loss = tf.reduce_mean(rec_loss)
return loss
def build_dataset(train_images, train_labels, storage0=5, storage1=10):
image_dict = {}
# dict of image and label
for idx in range(len(train_labels)):
label = train_labels[idx]
if label not in image_dict.keys():
image_dict[label] = []
else:
image_dict[label].append(idx)
# 构造数字0的样本
obs_idx0 = image_dict[0] # 抽取数字 0 的所有序号
np.random.shuffle(obs_idx0)
train_x0, train_y0 = [], []
for idx in obs_idx0:
for i in range(storage0):
train_x0.append(idx)
trans_to_idx = np.random.choice(image_dict[1])
train_y0.append(trans_to_idx)
print("training data x0:", len(train_x0))
print("training data y0:", len(train_y0))
# 构造数字1的样本
obs_idx1 = image_dict[1] # 抽取数字 1 的所有序号
np.random.shuffle(obs_idx1)
train_x1, train_y1 = [], []
for idx in obs_idx1:
for i in range(storage1):
train_x1.append(idx)
trans_to_label = np.random.randint(low=2, high=10)
trans_to_idx = np.random.choice(image_dict[trans_to_label])
train_y1.append(trans_to_idx)
print("training data x1:", len(train_x1))
print("training data y1:", len(train_y1))
train_x0_img = train_images[train_x0]
train_y0_img = train_images[train_y0]
print("\ntraining data x0:", train_x0_img.shape)
print("training data y0:", train_y0_img.shape)
train_x1_img = train_images[train_x1]
train_y1_img = train_images[train_y1]
print("\ntraining data x1:", train_x1_img.shape)
print("training data y1:", train_y1_img.shape)
train_x_img = np.vstack([train_x0_img, train_x1_img])
train_y_img = np.vstack([train_y0_img, train_y1_img])
print("\ntraining data x:", train_x_img.shape)
print("training data y:", train_y_img.shape)
return train_x_img, train_y_img
def mnist_data(build_train=True):
# data
(train_images, train_labels), (test_images, test_labels) = tf.keras.datasets.mnist.load_data()
train_images = np.expand_dims((train_images / 255.).astype(np.float32), axis=-1)
test_images = np.expand_dims((test_images / 255.).astype(np.float32), axis=-1)
# Binarization
train_images[train_images >= .5] = 1.
train_images[train_images < .5] = 0.
test_images[test_images >= .5] = 1.
test_images[test_images < .5] = 0.
# train
if build_train:
print("Generating training data:")
train_x, train_y = build_dataset(train_images, train_labels, storage0=5, storage1=50)
np.save('data/train_x.npy', train_x)
np.save('data/train_y.npy', train_y)
else:
train_dataset = None
print("Generating testing data:")
test_x, test_y = build_dataset(test_images, test_labels, storage0=5, storage1=10)
np.save('data/test_x.npy', test_x)
np.save('data/test_y.npy', test_y)
print("dataset done.")
def load_mnist_data():
train_x = np.load("data/train_x.npy")
train_y = np.load("data/train_y.npy")
train_dataset = tf.data.Dataset.from_tensor_slices((train_x, train_y)).shuffle(500000)
train_dataset = train_dataset.batch(512, drop_remainder=True)
test_x = tf.convert_to_tensor(np.load("data/test_x.npy"))
test_y = tf.convert_to_tensor(np.load("data/test_y.npy"))
return train_dataset, test_x, test_y
def train():
# model
optimizer = tf.train.AdamOptimizer(learning_rate=1e-5)
mnist_baseline = MnistBaseline()
# data
# mnist_data(build_train=True) # 先 run 这个来保存到本地
train_dataset, test_x, test_y = load_mnist_data()
# start train
Epochs = 500
test_loss = []
for epoch in range(Epochs):
print("Epoch: ", epoch)
for i, (batch_x, batch_y) in enumerate(train_dataset):
with tf.GradientTape() as tape: # train
loss_ensemble = mnist_baseline.ensemble_loss(batch_x, batch_y)
loss = tf.reduce_mean(loss_ensemble)
if i % 10 == 0:
print(i, ", loss_ensemble:", [x.numpy() for x in loss_ensemble], ", loss:", loss.numpy(), flush=True)
gradients = tape.gradient(loss, mnist_baseline.trainable_variables)
# gradients, _ = tf.clip_by_global_norm(gradients, 1.)
optimizer.apply_gradients(zip(gradients, mnist_baseline.trainable_variables))
# test
t_loss = tf.reduce_mean(mnist_baseline.ensemble_loss(test_x, test_y))
test_loss.append(t_loss)
print("Test Loss:", t_loss)
# save
mnist_baseline.save_weights("baseline_model/model_"+str(epoch)+".h5")
np.save("baseline_model/test_loss.npy", np.array(test_loss))
if __name__ == '__main__':
train()
| 6,002 | 34.732143 | 121 | py |
probabilistic-ensemble | probabilistic-ensemble-main/baseline_generate.py | import numpy as np
import matplotlib.pyplot as plt
import tensorflow as tf
import tensorflow_probability as tfp
from ensemble_model import BaselineModel
from noise_mnist_utils import normal_parse_params, rec_log_prob
tfd = tfp.distributions
config = tf.ConfigProto()
config.gpu_options.allow_growth = True
tf.enable_eager_execution(config=config)
class MnistBaselineTest(tf.keras.Model):
def __init__(self, ensemble_num=3):
super().__init__()
self.ensemble_num = ensemble_num
self.baseline_model = [BaselineModel() for _ in range(ensemble_num)]
def ensemble_loss(self, obs, out_obs):
total_loss = []
for idx in range(self.ensemble_num):
single_loss = self.single_loss(tf.convert_to_tensor(obs), tf.convert_to_tensor(out_obs), idx)
total_loss.append(single_loss * np.random.random())
return total_loss
def single_loss(self, obs, out_obs, i=0):
""" 输出 variational lower bound, 训练目标是最大化该值. 输出维度 (batch,)
"""
rec_params = self.baseline_model[i](obs)
rec_loss = -1.0 * rec_log_prob(rec_params=rec_params, s_next=out_obs)
loss = tf.reduce_mean(rec_loss)
return loss
def generate_samples_params(self, obs):
""" k 代表采样的个数. 从 prior network 输出分布中采样, 随后输入到 generative network 中采样
"""
samples = []
for idx in range(self.ensemble_num):
sample_params = self.baseline_model[idx](obs) # (batch,28,28,1)
samples.append(sample_params[..., 0:1]) # take the mean
return samples
def build_test_dataset():
# data
(_, _), (test_images, test_labels) = tf.keras.datasets.mnist.load_data()
test_images = np.expand_dims((test_images / 255.).astype(np.float32), axis=-1)
test_images[test_images >= .5] = 1.
test_images[test_images < .5] = 0.
image_dict = {}
# dict of image and label
for idx in range(len(test_labels)):
label = test_labels[idx]
if label not in image_dict.keys():
image_dict[label] = []
else:
image_dict[label].append(idx)
# 随机选择
idx0_random = np.random.choice(image_dict[0]) # 抽取数字 0 的所有序号
idx1_random = np.random.choice(image_dict[1]) # 抽取数字 1 的所有序号
test_x0 = test_images[idx0_random] # 转为图像
test_x1 = test_images[idx1_random] # 转为图像
return np.expand_dims(test_x0, axis=0), np.expand_dims(test_x1, axis=0) # shape=(1,28,28,1)
def generate_0(model):
test_x, _ = build_test_dataset() # 取到数字0
# sample
samples = model.generate_samples_params(test_x)
print([s.shape.as_list() for s in samples])
# plot
plt.figure(figsize=(10, 10))
plt.subplot(1, len(samples) + 1, 1)
plt.axis('off')
plt.imshow(test_x[0, :, :, 0], cmap='gray')
plt.title("input", fontsize=20)
idx = 1
for sample in samples:
sample = tf.nn.sigmoid(sample).numpy()
# sample[sample >= 0.0] = 1.
# sample[sample < 0.0] = 0.
assert sample.shape == (1, 28, 28, 1)
plt.subplot(1, len(samples)+1, idx+1)
plt.axis('off')
plt.imshow(sample[0, :, :, 0], cmap='gray')
plt.title("model "+str(idx), fontsize=20)
# plt.subplots_adjust(wspace=0., hspace=0.1)
idx += 1
plt.savefig("baseline_model/Mnist-Ensemble-res0.pdf")
# plt.show()
plt.close()
def generate_1(model):
_, test_x = build_test_dataset() # 取到数字0
# sample
samples = model.generate_samples_params(test_x)
print([s.shape.as_list() for s in samples])
# plot
plt.figure(figsize=(10, 10))
plt.subplot(1, len(samples) + 1, 1)
plt.axis('off')
plt.imshow(test_x[0, :, :, 0], cmap='gray')
plt.title("input", fontsize=20)
idx = 1
for sample in samples:
sample = tf.nn.sigmoid(sample).numpy()
# sample[sample >= 0.0] = 1.
# sample[sample < 0.0] = 0.
assert sample.shape == (1, 28, 28, 1)
plt.subplot(1, len(samples)+1, idx+1)
plt.axis('off')
plt.imshow(sample[0, :, :, 0], cmap='gray')
plt.title("model "+str(idx), fontsize=20)
# plt.subplots_adjust(wspace=0., hspace=0.1)
idx += 1
plt.savefig("baseline_model/Mnist-Ensemble-res1.pdf")
# plt.show()
plt.close()
if __name__ == '__main__':
# initialize model and load weights
test_x0, test_x1 = build_test_dataset()
ensemble_model = MnistBaselineTest()
ensemble_model.ensemble_loss(tf.convert_to_tensor(test_x0), tf.convert_to_tensor(test_x0))
print("load weights...")
ensemble_model.load_weights("baseline_model/model.h5")
print("load done")
# generate 0
print("Generate number 0")
generate_0(ensemble_model)
# generate 1
print("Generate number 1")
generate_1(ensemble_model)
| 4,805 | 31.255034 | 105 | py |
probabilistic-ensemble | probabilistic-ensemble-main/ensemble_model.py | import numpy as np
import tensorflow as tf
from noise_mnist_utils import normal_parse_params, rec_log_prob
layers = tf.keras.layers
tf.enable_eager_execution()
class ResBlock(tf.keras.Model):
"""
Usual full pre-activation ResNet bottleneck block.
"""
def __init__(self, outer_dim, inner_dim):
super(ResBlock, self).__init__()
data_format = 'channels_last'
self.net = tf.keras.Sequential([
layers.BatchNormalization(axis=-1),
layers.LeakyReLU(),
layers.Conv2D(inner_dim, (1, 1)),
layers.BatchNormalization(axis=-1),
layers.LeakyReLU(),
layers.Conv2D(inner_dim, (3, 3), padding='same'),
layers.BatchNormalization(axis=-1),
layers.LeakyReLU(),
layers.Conv2D(outer_dim, (1, 1))])
def call(self, x):
return x + self.net(x)
class MLPBlock(tf.keras.Model):
def __init__(self, inner_dim):
super(MLPBlock, self).__init__()
self.net = tf.keras.Sequential([
layers.BatchNormalization(),
layers.LeakyReLU(),
layers.Conv2D(inner_dim, (1, 1))])
def call(self, x):
return x + self.net(x)
class EncoderNetwork(tf.keras.Model):
def __init__(self):
super(EncoderNetwork, self).__init__()
self.net1 = tf.keras.Sequential([layers.Conv2D(8, 1),
ResBlock(8, 8), ResBlock(8, 8), ResBlock(8, 8), ResBlock(8, 8)])
self.net2 = tf.keras.Sequential([layers.AveragePooling2D(2, 2), layers.Conv2D(16, 1),
ResBlock(16, 8), ResBlock(16, 8), ResBlock(16, 8), ResBlock(16, 8)])
self.net3 = tf.keras.Sequential([layers.AveragePooling2D(2, 2), layers.Conv2D(32, 1),
ResBlock(32, 16), ResBlock(32, 16), ResBlock(32, 16), ResBlock(32, 16)])
self.pad3 = tf.keras.layers.ZeroPadding2D(padding=((1, 0), (1, 0)))
self.net4 = tf.keras.Sequential([layers.AveragePooling2D(2, 2), layers.Conv2D(64, 1),
ResBlock(64, 32), ResBlock(64, 32), ResBlock(64, 32), ResBlock(64, 32)])
self.net5 = tf.keras.Sequential([layers.AveragePooling2D(2, 2), layers.Conv2D(128, 1),
ResBlock(128, 64), ResBlock(128, 64), ResBlock(128, 64), ResBlock(128, 64)])
self.net6 = tf.keras.Sequential([layers.AveragePooling2D(2, 2), layers.Conv2D(128, 1),
MLPBlock(128), MLPBlock(128), MLPBlock(128), MLPBlock(128)])
def call(self, x):
# 当输入是 (None, 28, 28, 2),
x = self.net1(x) # (b, 28, 28, 8)
x = self.net2(x) # (b, 14, 14, 16)
x = self.net3(x) # (b, 7, 7, 32)
x = self.pad3(x) # (b, 8, 8, 32)
x = self.net4(x) # (b, 4, 4, 64)
x = self.net5(x) # (b, 2, 2, 128)
x = self.net6(x) # (b, 1, 1, 128)
return x
class DecoderNetwork(tf.keras.Model):
def __init__(self):
super(DecoderNetwork, self).__init__()
self.net1 = tf.keras.Sequential([layers.Conv2D(128, 1),
MLPBlock(128), MLPBlock(128), MLPBlock(128), MLPBlock(128),
layers.Conv2D(128, 1), layers.UpSampling2D((2, 2))])
self.net2 = tf.keras.Sequential([layers.Conv2D(128, 1),
ResBlock(128, 64), ResBlock(128, 64), ResBlock(128, 64), ResBlock(128, 64),
layers.Conv2D(64, 1), layers.UpSampling2D((2, 2))])
self.net3 = tf.keras.Sequential([layers.Conv2D(64, 1),
ResBlock(64, 32), ResBlock(64, 32), ResBlock(64, 32), ResBlock(64, 32),
layers.Conv2D(32, 1), layers.UpSampling2D((2, 2))])
self.net4 = tf.keras.Sequential([layers.Conv2D(32, 1),
ResBlock(32, 16), ResBlock(32, 16), ResBlock(32, 16), ResBlock(32, 16),
layers.Conv2D(16, 1), layers.UpSampling2D((2, 2))])
self.net5 = tf.keras.Sequential([layers.Conv2D(16, 1),
ResBlock(16, 8), ResBlock(16, 8), ResBlock(16, 8), ResBlock(16, 8),
layers.Conv2D(8, 1), layers.UpSampling2D((2, 2))])
self.net6 = tf.keras.Sequential([layers.Conv2D(8, 1),
ResBlock(8, 8), ResBlock(8, 8), ResBlock(8, 8), ResBlock(8, 8),
layers.Conv2D(4, 1)])
self.net7 = tf.keras.Sequential([layers.Conv2D(2, 1),
ResBlock(2, 2), ResBlock(2, 2), ResBlock(2, 2),
layers.Conv2D(2, 1)])
def call(self, x): # input=(b, 1, 1, 128)
x = self.net1(x) # (b, 2, 2, 128)
x = self.net2(x) # (b, 4, 4, 64)
x = self.net3(x) # (b, 8, 8, 32)
x = x[:, :-1, :-1, :] # (b, 7, 7, 32)
x = self.net4(x) # (b, 14, 14, 16)
x = self.net5(x) # (b, 28, 28, 8)
x = self.net6(x) # (b, 28, 28, 4)
x = self.net7(x) # (b, 28, 28, 2)
return x
class BaselineModel(tf.keras.Model):
def __init__(self):
super(BaselineModel, self).__init__()
self.encoder_network = EncoderNetwork()
self.decoder_network = DecoderNetwork()
def call(self, x):
en = self.encoder_network(x)
de = self.decoder_network(en)
return de
if __name__ == '__main__':
encoder_network = EncoderNetwork()
x1 = tf.convert_to_tensor(np.random.random((2, 28, 28, 2)), tf.float32)
y2 = encoder_network(x1)
print("output of encoder network:", y2.shape)
decoder_network = DecoderNetwork()
x2 = tf.convert_to_tensor(np.random.random((2, 1, 1, 128)), tf.float32)
y2 = decoder_network(x2)
print("output of decoder network:", y2.shape)
baseline_model = BaselineModel()
x3 = tf.convert_to_tensor(np.random.random((2, 28, 28, 1)), tf.float32)
y3 = baseline_model(x3)
print("output of baseline networks:", y3.shape)
print("Parameters:", np.sum([np.prod(v.shape.as_list()) for v in encoder_network.trainable_variables]))
print("Parameters:", np.sum([np.prod(v.shape.as_list()) for v in decoder_network.trainable_variables]))
print("Total Para:", np.sum([np.prod(v.shape.as_list()) for v in baseline_model.trainable_variables]))
rec_params = baseline_model(x3)
rec_loss = -1.0 * rec_log_prob(rec_params=rec_params, s_next=x3)
print("rec_loss:", rec_loss.shape)
loss = tf.reduce_mean(rec_loss)
print("loss:", loss)
| 6,326 | 38.055556 | 107 | py |
probabilistic-ensemble | probabilistic-ensemble-main/__init__.py | 0 | 0 | 0 | py |
|
wind_system | wind_system-main/Server/test.py | import numpy as np
x = np.linspace(1,1,201)
y = np.random.random(201)
header = "FAN DATA\n"
header += "PWM x15, TACHO x15"
with open('FAN_data.dat', 'wb') as f: #w-writing mode, b- binary mode
np.savetxt(f, [], header=header)
for i in range(201):
data = np.column_stack((x[i],y[i]))
np.savetxt(f, data)
f.flush()
#sleep(0.1)
| 368 | 23.6 | 69 | py |
wind_system | wind_system-main/Server/server.py | import random
import socket
import struct
import time
DEBUG = True
FANDATA_FMT = "hhhhhhhhhh"
fan_ip = [ '192.168.1.101','192.168.1.102', '192.168.1.103', '192.168.1.104', '192.168.1.105','192.168.1.106','192.168.1.107','192.168.1.108','192.168.1.109','192.168.1.110','192.168.1.111','192.168.1.112','192.168.1.113','192.168.1.114','192.168.1.115']
def setfans(sock, number, data):
# Target
target_port = 8888
target_ip = fan_ip[number]
# Data
if DEBUG:
print('Fans ', number, ' -> ' ,target_ip, ': ', data)
# Sending
try:
ip_data = struct.pack(FANDATA_FMT, data[0], data[1], data[2], data[3], data[4], data[5], data[6], data[7], data[8], data[9])
sock.sendto(ip_data, (target_ip, target_port))
except Exception as e:
if DEBUG:
print('Failed to send to',target_ip, e)
def getfans(sock):
try:
rcv_data, addr = sock.recvfrom(20)
status, rpm1, rpm2, rpm3, rpm4, rpm5, rpm6, rpm7, rpm8, rpm9 = struct.unpack(FANDATA_FMT, rcv_data)
if DEBUG:
print('I received', status, rpm1, rpm2, rpm3, rpm4, rpm5, rpm6, rpm7, rpm8, rpm9,"from", addr)
return True
except Exception as e:
return False
ROWS = 9
COLS = 15
# TODO: modulate the wall
fans = [ [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15],
[1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15],
[1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15],
[1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15],
[1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15],
[1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15],
[1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15],
[1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15],
[1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15],
]
def fan_init( val ):
global fans
for x in range(0,COLS):
for y in range(0,ROWS):
fans[y][x] = val
fan_init(10)
def fan_print(fans):
for r in fans:
print(r)
fan_print(fans)
def loop(port):
sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
sock.setblocking(False)
sock.bind(('', port))
count = 0
timetick = time.time() + 1
while True:
if time.time() > timetick:
timetick += 2.0 # in seconds: e.g. 0.05 = 20Hz
val = random.randint(0,100)
data = [count, 400, 0, 400, 0, 1000, 0, 400 , 0, 400]
data = [count, 400, 0, 400, 0, 1000, 0, 400 , 0, 400]
data = [count, 400, 0, 0, 0, 0, 0, 0 , 0, 0]
data = [count, 400, 400, 400, 400, 400, 400, 400 , 400, 400]
#data = [count, 800, 800, 800, 800, 800, 800, 800 , 800, 800]
# Send to all modules
i = 0
for ip in fan_ip:
setfans(sock, i, data)
i+=1
count += 1
#time.sleep(1)
for ip in fan_ip:
gotdata = getfans(sock)
loop(8000)
| 3,122 | 27.390909 | 254 | py |
wind_system | wind_system-main/Server/server_gui.py | import random
import socket
import struct
import time
import numpy as np
from tkinter import *
#sudo apt-get install python-tk
# global variable
fan_value = 0
pwmValues = " ";
rpmValues = " ";
DEBUG = True
FANDATA_FMT = "HHHHHHHHHH"
fan_ip = [ '192.168.1.101','192.168.1.102', '192.168.1.103', '192.168.1.104', '192.168.1.105','192.168.1.106','192.168.1.107','192.168.1.108','192.168.1.109','192.168.1.110','192.168.1.111','192.168.1.112','192.168.1.113','192.168.1.114','192.168.1.115']
def setfans(sock, number, data):
# Target
target_port = 8888
target_ip = fan_ip[number]
# Data
if DEBUG:
printPWM(number, target_ip, data)
# Sending
try:
ip_data = struct.pack(FANDATA_FMT, data[0], data[1], data[2], data[3], data[4], data[5], data[6], data[7], data[8], data[9])
sock.sendto(ip_data, (target_ip, target_port))
except Exception as e:
if DEBUG:
print('Failed to send to',target_ip, e)
def rpm(val):
if val <= 0:
return 0
return round(15000000 / val,1)
def getfans(sock):
try:
rcv_data, addr = sock.recvfrom(20)
status, rpm1, rpm2, rpm3, rpm4, rpm5, rpm6, rpm7, rpm8, rpm9 = struct.unpack(FANDATA_FMT, rcv_data)
if DEBUG:
printRPM(status, rpm(rpm1), rpm(rpm2), rpm(rpm3), rpm(rpm4), rpm(rpm5), rpm(rpm6), rpm(rpm7), rpm(rpm8), rpm(rpm9), addr)
return True
except Exception as e:
return False
#def printRpm(rpm1, rpm2, rpm3,rpm4,rpm5,rpm6,rpm7,rpm8,rpm9,addr):
# rpmValues = rpmvalues.join('I received', status, rpm(rpm1), rpm(rpm2), rpm(rpm3), rpm(rpm4), rpm(rpm5), rpm(rpm6), rpm(rpm7), rpm(rpm8), rpm(rpm9),"from", addr,"/n")
def printPWM(number, target_ip, data):
global pwmValues
pwmValues = pwmValues + 'PWM Module ' + str(number) + ' -> '
for ip in target_ip:
pwmValues += str(ip)
pwmValues = pwmValues + ': '
for d in data:
pwmValues += str(d) + " "
pwmValues = pwmValues + "\n"
print('Fans!', number, ' -> ' ,target_ip, ': ', data)
savePWM(pwmValues)
def printRPM(status, rpm1, rpm2, rpm3, rpm4, rpm5, rpm6, rpm7, rpm8, rpm9, addr):
global rpmValues
rpmValues += str(status) + ", " \
+ str((rpm1)) + ", "\
+ str((rpm2)) + ", "\
+ str((rpm3)) + ", "\
+ str((rpm4)) + ", "\
+ str((rpm5)) + ", "\
+ str((rpm6)) + ", "\
+ str((rpm7)) + ", "\
+ str((rpm7)) + ", "\
+ str((rpm8)) + ", "\
+ str((rpm9)) + ", "\
+ str(addr) + "\n"
saveRPM(rpmValues)
###################################
## ETHERNET
sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
sock.setblocking(False)
sock.bind(('', 8000))
def FANWALL_Send():
global sock, fan_value
data = [fan_value, fan_value, fan_value, fan_value, fan_value, fan_value, fan_value, fan_value, fan_value, fan_value]
# Send to all modules
i = 0
for ip in fan_ip:
setfans(sock, i, data)
i+=1
def FANWALL_Send_Thread():
FANWALL_Send()
root.after(250, FANWALL_Send_Thread)
def FANWALL_Read_Thread():
gotdata = getfans(sock)
root.after(1, FANWALL_Read_Thread)
def throttle(var):
global fan_value
fan_value = int(var)
FANWALL_Send()
status = 0
def toggle():
global status
if status == 0:
throttle(1023)
status = 1
else:
throttle(400)
status = 0
root.after(2000, toggle)
def step():
global status
if status == 0:
throttle(1023)
status = 1
else:
throttle(0)
status = 0
root.after(20000, step)
def continous():
throttle(300)
root.after(20000,continous)
# wait for certain number of seconds before changing PWM
WAIT_GAP = 10
def step_increase():
start_time = timer()
current_time = timer()
while True:
elapsed_time = current_time - start_time
if elapsed_time <= WAIT_GAP:
throttle(200)
current_time = timer()
elif WAIT_GAP < elapsed_time <= 2 * WAIT_GAP:
throttle(250)
current_time = timer()
elif 2 * WAIT_GAP < elapsed_time <= 3 * WAIT_GAP:
throttle(300)
current_time = timer()
elif 3 * WAIT_GAP < elapsed_time <= 4 * WAIT_GAP:
throttle(350)
current_time = timer()
else:
throttle(350)
break
def savePWM(Values):
with open('PWM_data.dat', 'w') as f: #w-writing mode, b- binary mode
f.write(" FAN DATA \n PWM x15\n")
f.write(Values)
f.flush()
def saveRPM(Values):
with open('RPM_data.dat', 'w') as f: #w-writing mode, b- binary mode
f.write(" FAN DATA \n TACHO x15 \n")
f.write(Values)
f.flush()
root = Tk()
root.title("FAN WALL")
root.geometry("400x400")
vertical = Scale(root, from_=0, to=1023, command=throttle)
vertical.pack()
root.after(0, FANWALL_Send_Thread)
root.after(0, FANWALL_Read_Thread)
#root.after(0, toggle)
#root.after(0, step)
#root.after(0, continous)
root.after(0,step_increase)
root.mainloop()
loop(8000)
| 5,336 | 24.782609 | 254 | py |
wind_system | wind_system-main/Tests/test.py | # import socket
#
# def send(data, port=50000, addr='239.192.1.100'):
# """send(data[, port[, addr]]) - multicasts a UDP datagram."""
# # Create the socket
# s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
# # Make the socket multicast-aware, and set TTL.
# s.setsockopt(socket.IPPROTO_IP, socket.IP_MULTICAST_TTL, 20) # Change TTL (=20) to suit
# # Send the data
# s.sendto(data, (addr, port))
#
# def recv(port=50000, addr="239.192.1.100", buf_size=1024):
# """recv([port[, addr[,buf_size]]]) - waits for a datagram and returns the data."""
#
# # Create the socket
# s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
#
# # Set some options to make it multicast-friendly
# s.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
# try:
# s.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEPORT, 1)
# except AttributeError:
# pass # Some systems don't support SO_REUSEPORT
# s.setsockopt(socket.SOL_IP, socket.IP_MULTICAST_TTL, 20)
# s.setsockopt(socket.SOL_IP, socket.IP_MULTICAST_LOOP, 1)
#
# # Bind to the port
# s.bind(('', port))
#
# # Set some more multicast options
# intf = socket.gethostbyname(socket.gethostname())
# s.setsockopt(socket.SOL_IP, socket.IP_MULTICAST_IF, socket.inet_aton(intf))
# s.setsockopt(socket.SOL_IP, socket.IP_ADD_MEMBERSHIP, socket.inet_aton(addr) + socket.inet_aton(intf))
#
# # Receive the data, then unregister multicast receive membership, then close the port
# data, sender_addr = s.recvfrom(buf_size)
# s.setsockopt(socket.SOL_IP, socket.IP_DROP_MEMBERSHIP, socket.inet_aton(addr) + socket.inet_aton('0.0.0.0'))
# s.close()
# return data
#
# while True:
# print(recv(8888, "169.254.179.148", 1024))
import socket
UDP_IP = "169.254.179.148"
UDP_PORT = 8000
sock = socket.socket(socket.AF_INET, # Internet
socket.SOCK_DGRAM) # UDP
sock.bind((UDP_IP, UDP_PORT))
while True:
print("aaa")
try:
# This is where the shit happens
data, addr = sock.recvfrom(1024) # buffer size is 1024 bytes
print(data)
except(ValueError):
continue
| 2,301 | 36.737705 | 118 | py |
wind_system | wind_system-main/Tests/send_and_listen.py | import socket
# print(socket.gethostname())
HOST = '' # '169.254.179.148' # '192.168.0.177' # '169.254.255.255'
PORT = 8888
with socket.socket(socket.AF_INET, socket.SOCK_DGRAM) as sock:
sock.connect((HOST, PORT))
print("Binded!")
while True:
print("while...")
rcv_data, rcv_addr = sock.recvfrom(1024)
print("Received", repr(rcv_data))
| 379 | 20.111111 | 67 | py |
finer | finer-main/run_experiment.py | import click
import os
import logging
from configurations.configuration import Configuration
from finer import FINER
logging.getLogger('tensorflow').setLevel(logging.ERROR)
logging.getLogger('transformers').setLevel(logging.ERROR)
LOGGER = logging.getLogger(__name__)
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'
os.environ['TOKENIZERS_PARALLELISM'] = 'true'
cli = click.Group()
@cli.command()
@click.option('--method', default='transformer')
@click.option('--mode', default='train')
def run_experiment(method, mode):
"""
Main function that instantiates and runs a new experiment
:param method: Method to run ("bilstm", "transformer", "transformer_bilstm")
:param mode: Mode to run ("train", "evaluate")
"""
# Instantiate the Configuration class
Configuration.configure(method=method, mode=mode)
experiment = FINER()
def log_parameters(parameters):
LOGGER.info(f'\n---------------- {parameters.split("_")[0].capitalize()} Parameters ----------------')
for param_name, value in Configuration[parameters].items():
if isinstance(value, dict):
LOGGER.info(f'{param_name}:')
for p_name, p_value in value.items():
LOGGER.info(f'\t{p_name}: {p_value}')
else:
LOGGER.info(f'{param_name}: {value}')
if mode == 'train':
LOGGER.info('\n---------------- Train ----------------')
LOGGER.info(f"Log Name: {Configuration['task']['log_name']}")
for params in ['train_parameters', 'general_parameters', 'hyper_parameters', 'evaluation']:
log_parameters(parameters=params)
LOGGER.info('\n')
experiment.train()
elif mode == 'evaluate':
LOGGER.info('\n---------------- Evaluate Pretrained Model ----------------')
for params in ['train_parameters', 'general_parameters', 'evaluation']:
log_parameters(parameters=params)
LOGGER.info('\n')
experiment.evaluate_pretrained_model()
if __name__ == '__main__':
run_experiment()
| 2,062 | 32.819672 | 110 | py |
finer | finer-main/finer.py | import itertools
import logging
import os
import time
import re
import datasets
import numpy as np
import tensorflow as tf
import wandb
from copy import deepcopy
from tqdm import tqdm
from gensim.models import KeyedVectors
from seqeval.metrics import classification_report
from seqeval.scheme import IOB2
from tensorflow.keras.preprocessing.sequence import pad_sequences
from transformers import BertTokenizer, AutoTokenizer
from wandb.keras import WandbCallback
from configurations import Configuration
from data import DATA_DIR, VECTORS_DIR
from models import BiLSTM, Transformer, TransformerBiLSTM
from models.callbacks import ReturnBestEarlyStopping, F1MetricCallback
LOGGER = logging.getLogger(__name__)
class DataLoader(tf.keras.utils.Sequence):
def __init__(self, dataset, vectorize_fn, batch_size=8, max_length=128, shuffle=False):
self.dataset = dataset
self.vectorize_fn = vectorize_fn
self.batch_size = batch_size
if Configuration['general_parameters']['debug']:
self.indices = np.arange(100)
else:
self.indices = np.arange(len(dataset))
self.max_length = max_length
self.shuffle = shuffle
if self.shuffle:
np.random.shuffle(self.indices)
def __len__(self):
"""Denotes the numbers of batches per epoch"""
return int(np.ceil(len(self.indices) / self.batch_size))
def __getitem__(self, index):
"""Generate one batch of data"""
# Generate indexes of the batch
indices = self.indices[index * self.batch_size:(index + 1) * self.batch_size]
# Find list of batch's sequences + targets
samples = self.dataset[indices]
x_batch, y_batch = self.vectorize_fn(samples=samples, max_length=self.max_length)
return x_batch, y_batch
def on_epoch_end(self):
"""Updates indexes after each epoch"""
if self.shuffle:
np.random.shuffle(self.indices)
class FINER:
def __init__(self):
self.general_params = Configuration['general_parameters']
self.train_params = Configuration['train_parameters']
self.hyper_params = Configuration['hyper_parameters']
self.eval_params = Configuration['evaluation']
self.tag2idx, self.idx2tag = FINER.load_dataset_tags()
self.n_classes = len(self.tag2idx)
if Configuration['task']['mode'] == 'train':
display_name = Configuration['task']['log_name']
if Configuration['task']['model'] == 'transformer':
display_name = f"{display_name}_{self.train_params['model_name']}".replace('/', '-')
elif Configuration['task']['model'] == 'bilstm':
display_name = f"{display_name}_bilstm_{self.train_params['embeddings']}"
wandb.init(
entity=self.general_params['wandb_entity'],
project=self.general_params['wandb_project'],
id=Configuration['task']['log_name'],
name=display_name
)
shape_special_tokens_path = os.path.join(DATA_DIR, 'shape_special_tokens.txt')
with open(shape_special_tokens_path) as fin:
self.shape_special_tokens = [shape.strip() for shape in fin.readlines()]
self.shape_special_tokens_set = set(self.shape_special_tokens)
if Configuration['task']['model'] == 'bilstm':
if 'subword' in self.train_params['embeddings']:
self.train_params['token_type'] = 'subword'
else:
self.train_params['token_type'] = 'word'
word_vector_path = os.path.join(VECTORS_DIR, self.train_params['embeddings'])
if not os.path.exists(word_vector_path):
import wget
url = f"https://zenodo.org/record/6571000/files/{self.train_params['embeddings']}"
wget.download(url=url, out=word_vector_path)
if not os.path.exists(word_vector_path):
raise Exception(f"Unable to download {self.train_params['embeddings']} embeddings")
if word_vector_path.endswith('.vec') or word_vector_path.endswith('.txt'):
word2vector = KeyedVectors.load_word2vec_format(word_vector_path, binary=False)
else:
word2vector = KeyedVectors.load_word2vec_format(word_vector_path, binary=True)
if self.train_params['token_type'] == 'subword':
import tempfile
with tempfile.NamedTemporaryFile(mode='w') as tmp:
vocab_tokens = ['[PAD]', '[CLS]', '[SEP]', '[MASK]'] + list(word2vector.index_to_key)
tmp.write('\n'.join(vocab_tokens))
additional_special_tokens = []
if 'num' in self.train_params['embeddings']:
additional_special_tokens.append('[NUM]')
elif 'shape' in self.train_params['embeddings']:
additional_special_tokens.append('[NUM]')
additional_special_tokens.extend(self.shape_special_tokens)
# TODO: Check AutoTokenizer
self.tokenizer = BertTokenizer(
vocab_file=tmp.name,
use_fast=self.train_params['use_fast_tokenizer']
)
if additional_special_tokens:
self.tokenizer.additional_special_tokens = additional_special_tokens
if self.train_params['token_type'] == 'word':
self.word2index = {'[PAD]': 0, '[UNK]': 1}
self.word2index.update({word: i + 2 for i, word in enumerate(word2vector.index_to_key)})
self.word2vector_weights = np.concatenate(
[
np.mean(word2vector.vectors, axis=0).reshape((1, word2vector.vectors.shape[-1])),
word2vector.vectors
],
axis=0
)
self.word2vector_weights = np.concatenate(
[
np.zeros((1, self.word2vector_weights.shape[-1]), dtype=np.float32),
self.word2vector_weights
],
axis=0
)
if self.train_params['token_type'] == 'subword':
self.word2index = {'[PAD]': 0}
self.word2index.update({word: i + 1 for i, word in enumerate(word2vector.index_to_key)})
self.word2vector_weights = np.concatenate(
[
np.zeros((1, word2vector.vectors.shape[-1]), dtype=np.float32),
word2vector.vectors
],
axis=0
)
self.index2word = {v: k for k, v in self.word2index.items()}
elif Configuration['task']['model'] == 'transformer':
additional_special_tokens = []
if self.train_params['replace_numeric_values']:
additional_special_tokens.append('[NUM]')
if self.train_params['replace_numeric_values'] == 'SHAPE':
additional_special_tokens.extend(self.shape_special_tokens)
self.tokenizer = AutoTokenizer.from_pretrained(
pretrained_model_name_or_path=self.train_params['model_name'],
additional_special_tokens=additional_special_tokens,
use_fast=self.train_params['use_fast_tokenizer']
)
@staticmethod
def load_dataset_tags():
dataset = datasets.load_dataset('nlpaueb/finer-139', split='train', streaming=True)
dataset_tags = dataset.features['ner_tags'].feature.names
tag2idx = {tag: int(i) for i, tag in enumerate(dataset_tags)}
idx2tag = {idx: tag for tag, idx in tag2idx.items()}
return tag2idx, idx2tag
def is_numeric_value(self, text):
digits, non_digits = 0, 0
for char in str(text):
if char.isdigit():
digits = digits + 1
else:
non_digits += 1
return (digits + 1) > non_digits
def vectorize(self, samples, max_length):
if Configuration['task']['model'] == 'bilstm' and self.train_params['token_type'] == 'word':
sample_tokens = [
[
token.lower()
for token in sample
]
for sample in samples['tokens']
]
if 'word.num' in self.train_params['embeddings']:
sample_tokens = [
[
'[NUM]' if re.fullmatch(r'(\d+[\d,.]*)|([,.]\d+)', token)
else token
for token in sample
]
for sample in sample_tokens
]
elif 'word.shape' in self.train_params['embeddings']:
for sample_idx, _ in enumerate(sample_tokens):
for token_idx, _ in enumerate(sample_tokens[sample_idx]):
if re.fullmatch(r'(\d+[\d,.]*)|([,.]\d+)', sample_tokens[sample_idx][token_idx]):
shape = '[' + re.sub(r'\d', 'X', sample_tokens[sample_idx][token_idx]) + ']'
if shape in self.shape_special_tokens_set:
sample_tokens[sample_idx][token_idx] = shape
else:
sample_tokens[sample_idx][token_idx] = '[NUM]'
word_indices = [
[
self.word2index[token]
if token in self.word2index
else self.word2index['[UNK]']
for token in sample
]
for sample in sample_tokens
]
word_indices = pad_sequences(
sequences=word_indices,
maxlen=max_length,
padding='post',
truncating='post'
)
x = word_indices
elif Configuration['task']['model'] == 'transformer' \
or (Configuration['task']['model'] == 'bilstm' and self.train_params['token_type'] == 'subword'):
sample_tokens = samples['tokens']
sample_labels = samples['ner_tags']
batch_token_ids, batch_tags, batch_subword_pooling_mask = [], [], []
for sample_idx in range(len(sample_tokens)):
sample_token_ids, sample_tags, subword_pooling_mask = [], [], []
sample_token_idx = 1 # idx 0 is reserved for [CLS]
for token_idx in range(len(sample_tokens[sample_idx])):
if (Configuration['task']['model'] == 'transformer' and self.train_params['model_name'] == 'nlpaueb/sec-bert-num') \
or (Configuration['task']['model'] == 'bilstm' and 'subword.num' in self.train_params['embeddings']):
if re.fullmatch(r'(\d+[\d,.]*)|([,.]\d+)', sample_tokens[sample_idx][token_idx]):
sample_tokens[sample_idx][token_idx] = '[NUM]'
if (Configuration['task']['model'] == 'transformer' and self.train_params['model_name'] == 'nlpaueb/sec-bert-shape') \
or (Configuration['task']['model'] == 'bilstm' and 'subword.shape' in self.train_params['embeddings']):
if re.fullmatch(r'(\d+[\d,.]*)|([,.]\d+)', sample_tokens[sample_idx][token_idx]):
shape = '[' + re.sub(r'\d', 'X', sample_tokens[sample_idx][token_idx]) + ']'
if shape in self.shape_special_tokens_set:
sample_tokens[sample_idx][token_idx] = shape
else:
sample_tokens[sample_idx][token_idx] = '[NUM]'
if self.train_params['replace_numeric_values']:
if self.is_numeric_value(sample_tokens[sample_idx][token_idx]):
if re.fullmatch(r'(\d+[\d,.]*)|([,.]\d+)', sample_tokens[sample_idx][token_idx]):
if self.train_params['replace_numeric_values'] == 'NUM':
sample_tokens[sample_idx][token_idx] = '[NUM]'
elif self.train_params['replace_numeric_values'] == 'SHAPE':
shape = '[' + re.sub(r'\d', 'X', sample_tokens[sample_idx][token_idx]) + ']'
if shape in self.shape_special_tokens_set:
sample_tokens[sample_idx][token_idx] = shape
else:
sample_tokens[sample_idx][token_idx] = '[NUM]'
token = sample_tokens[sample_idx][token_idx]
# Subword pooling (As in BERT or Acs et al.)
if 'subword_pooling' in self.train_params:
label_to_assign = self.idx2tag[sample_labels[sample_idx][token_idx]]
if self.train_params['subword_pooling'] == 'all': # First token is B-, rest are I-
if label_to_assign.startswith('B-'):
remaining_labels = 'I' + label_to_assign[1:]
else:
remaining_labels = label_to_assign
elif self.train_params['subword_pooling'] in ['first', 'last']:
remaining_labels = 'O'
else:
raise Exception(f'Choose a valid subword pooling ["all", "first" and "last"] in the train parameters.')
# Assign label to all (multiple) generated tokens, if any
token_ids = self.tokenizer(token, add_special_tokens=False).input_ids
sample_token_idx += len(token_ids)
sample_token_ids.extend(token_ids)
for i in range(len(token_ids)):
if self.train_params['subword_pooling'] in ['first', 'all']:
if i == 0:
sample_tags.append(label_to_assign)
subword_pooling_mask.append(1)
else:
if self.train_params['subword_pooling'] == 'first':
subword_pooling_mask.append(0)
sample_tags.append(remaining_labels)
elif self.train_params['subword_pooling'] == 'last':
if i == len(token_ids) - 1:
sample_tags.append(label_to_assign)
subword_pooling_mask.append(1)
else:
sample_tags.append(remaining_labels)
subword_pooling_mask.append(0)
if Configuration['task']['model'] == 'transformer': # if 'bert' in self.general_params['token_type']:
CLS_ID = self.tokenizer.vocab['[CLS]']
SEP_ID = self.tokenizer.vocab['[SEP]']
PAD_ID = self.tokenizer.vocab['[PAD]']
sample_token_ids = [CLS_ID] + sample_token_ids + [SEP_ID]
sample_tags = ['O'] + sample_tags + ['O']
subword_pooling_mask = [1] + subword_pooling_mask + [1]
# Append to batch_token_ids & batch_tags
batch_token_ids.append(sample_token_ids)
batch_tags.append(sample_tags)
batch_subword_pooling_mask.append(subword_pooling_mask)
if Configuration['task']['model'] == 'bilstm' and self.train_params['token_type'] == 'subword':
for sent_idx, _ in enumerate(batch_token_ids):
for tok_idx, _ in enumerate(batch_token_ids[sent_idx]):
token_subword = self.tokenizer.convert_ids_to_tokens(
batch_token_ids[sent_idx][tok_idx], skip_special_tokens=True)
batch_token_ids[sent_idx][tok_idx] = self.word2index[token_subword] \
if token_subword in self.word2index else self.word2index['[UNK]']
# Pad, truncate and verify
# Returns an np.array object of shape ( len(batch_size) x max_length ) that contains padded/truncated gold labels
batch_token_ids = pad_sequences(
sequences=batch_token_ids,
maxlen=max_length,
padding='post',
truncating='post'
)
# Replace last column with SEP special token if it's not PAD
if Configuration['task']['model'] == 'transformer':
batch_token_ids[np.where(batch_token_ids[:, -1] != PAD_ID)[0], -1] = SEP_ID
x = batch_token_ids
else:
x = None
if Configuration['task']['model'] == 'bilstm' and self.train_params['token_type'] == 'word':
y = pad_sequences(
sequences=samples['ner_tags'],
maxlen=max_length,
padding='post',
truncating='post'
)
elif Configuration['task']['model'] == 'transformer' \
or (Configuration['task']['model'] == 'bilstm' and self.train_params['token_type'] == 'subword'):
batch_tags = [[self.tag2idx[tag] for tag in sample_tags] for sample_tags in batch_tags]
# Pad/Truncate the rest tags/labels
y = pad_sequences(
sequences=batch_tags,
maxlen=max_length,
padding='post',
truncating='post'
)
if Configuration['task']['model'] == 'transformer':
y[np.where(x[:, -1] != PAD_ID)[0], -1] = 0
if self.train_params['subword_pooling'] in ['first', 'last']:
batch_subword_pooling_mask = pad_sequences(
sequences=batch_subword_pooling_mask,
maxlen=max_length,
padding='post',
truncating='post'
)
return [np.array(x), batch_subword_pooling_mask], y
else:
return np.array(x), y
def build_model(self, train_params=None):
if Configuration['task']['model'] == 'bilstm':
model = BiLSTM(
n_classes=self.n_classes,
n_layers=train_params['n_layers'],
n_units=train_params['n_units'],
dropout_rate=train_params['dropout_rate'],
crf=train_params['crf'],
word2vectors_weights=self.word2vector_weights,
)
elif Configuration['task']['model'] == 'transformer':
model = Transformer(
model_name=train_params['model_name'],
n_classes=self.n_classes,
dropout_rate=train_params['dropout_rate'],
crf=train_params['crf'],
tokenizer=self.tokenizer if self.train_params['replace_numeric_values'] else None,
subword_pooling=self.train_params['subword_pooling']
)
elif Configuration['task']['model'] == 'transformer_bilstm':
model = TransformerBiLSTM(
model_name=train_params['model_name'],
n_classes=self.n_classes,
dropout_rate=train_params['dropout_rate'],
crf=train_params['crf'],
n_layers=train_params['n_layers'],
n_units=train_params['n_units'],
tokenizer=self.tokenizer if self.train_params['replace_numeric_values'] else None,
)
else:
raise Exception(f"The model type that you entered isn't a valid one.")
return model
def get_monitor(self):
monitor_metric = self.general_params['loss_monitor']
if monitor_metric == 'val_loss':
monitor_mode = 'min'
elif monitor_metric in ['val_micro_f1', 'val_macro_f1']:
monitor_mode = 'max'
else:
raise Exception(f'Unrecognized monitor: {self.general_params["loss_monitor"]}')
return monitor_metric, monitor_mode
def train(self):
train_dataset = datasets.load_dataset(path='nlpaueb/finer-139', split='train')
train_generator = DataLoader(
dataset=train_dataset,
vectorize_fn=self.vectorize,
batch_size=self.general_params['batch_size'],
max_length=self.train_params['max_length'],
shuffle=True
)
validation_dataset = datasets.load_dataset(path='nlpaueb/finer-139', split='validation')
validation_generator = DataLoader(
dataset=validation_dataset,
vectorize_fn=self.vectorize,
batch_size=self.general_params['batch_size'],
max_length=self.train_params['max_length'],
shuffle=False
)
test_dataset = datasets.load_dataset(path='nlpaueb/finer-139', split='test')
test_generator = DataLoader(
dataset=test_dataset,
vectorize_fn=self.vectorize,
batch_size=self.general_params['batch_size'],
max_length=self.train_params['max_length'],
shuffle=False
)
train_params = deepcopy(self.train_params)
train_params.update(self.hyper_params)
# Build model
model = self.build_model(train_params=train_params)
LOGGER.info('Model Summary')
model.print_summary(print_fn=LOGGER.info)
optimizer = tf.keras.optimizers.Adam(learning_rate=train_params['learning_rate'], clipvalue=5.0)
if train_params['crf']:
model.compile(
optimizer=optimizer,
loss=model.crf_layer.loss,
run_eagerly=self.general_params['run_eagerly']
)
else:
model.compile(
optimizer=optimizer,
loss=tf.keras.losses.SparseCategoricalCrossentropy(from_logits=False),
run_eagerly=self.general_params['run_eagerly']
)
monitor, monitor_mode = self.get_monitor()
# Init callbacks
callbacks = []
f1_metric = F1MetricCallback(
train_params=train_params,
idx2tag=self.idx2tag,
validation_generator=validation_generator,
subword_pooling=self.train_params['subword_pooling'],
calculate_train_metric=False
)
callbacks.append(f1_metric)
callbacks.append(
ReturnBestEarlyStopping(
monitor=monitor,
mode=monitor_mode,
patience=self.general_params['early_stopping_patience'],
restore_best_weights=True,
verbose=1
)
)
callbacks.append(
tf.keras.callbacks.ReduceLROnPlateau(
monitor=monitor,
mode=monitor_mode,
factor=0.5,
cooldown=self.general_params['reduce_lr_cooldown'],
patience=self.general_params['reduce_lr_patience'],
verbose=1
)
)
if Configuration['task']['model'] == 'transformer':
wandb.config.update(
{
'model': 'transformer',
'model_name': self.train_params['model_name'],
}
)
elif Configuration['task']['model'] == 'bilstm':
wandb.config.update(
{
'model': 'bilstm',
'embedddings': self.train_params['embeddings'],
}
)
wandb.config.update(
{
'max_length': self.train_params['max_length'],
'replace_numeric_values': self.train_params['replace_numeric_values'],
'subword_pooling': self.train_params['subword_pooling'],
'epochs': self.general_params['epochs'],
'batch_size': self.general_params['batch_size'],
'loss_monitor': self.general_params['loss_monitor'],
'early_stopping_patience': self.general_params['early_stopping_patience'],
'reduce_lr_patience': self.general_params['reduce_lr_patience'],
'reduce_lr_cooldown': self.general_params['reduce_lr_cooldown']
}
)
wandb.config.update(self.hyper_params)
callbacks.append(
WandbCallback(
monitor=monitor,
mode=monitor_mode,
)
)
# Train model
start = time.time()
history = model.fit(
x=train_generator,
validation_data=validation_generator,
callbacks=callbacks,
epochs=self.general_params['epochs'],
workers=self.general_params['workers'],
max_queue_size=self.general_params['max_queue_size'],
use_multiprocessing=self.general_params['use_multiprocessing']
)
# Loss Report
self.loss_report(history.history)
# Save model
weights_save_path = os.path.join(Configuration['experiment_path'], 'model', 'weights.h5')
LOGGER.info(f'Saving model weights to {weights_save_path}')
model.save_weights(filepath=weights_save_path)
# Evaluate
self.evaluate(model, validation_generator, split_type='validation')
self.evaluate(model, test_generator, split_type='test')
training_time = time.time() - start
training_days = int(training_time / (24 * 60 * 60))
if training_days:
LOGGER.info(f'Training time: {training_days} days {time.strftime("%H:%M:%S", time.gmtime(training_time))} sec\n')
else:
LOGGER.info(f'Training time: {time.strftime("%H:%M:%S", time.gmtime(training_time))} sec\n')
def evaluate(self, model, generator, split_type):
"""
:param model: the trained TF model
:param generator: the generator for the split type to evaluate on
:param split_type: validation or test
:return:
"""
LOGGER.info(f'\n{split_type.capitalize()} Evaluation\n{"-" * 30}\n')
LOGGER.info('Calculating predictions...')
y_true, y_pred = [], []
for x_batch, y_batch in tqdm(generator, ncols=100):
if self.train_params['subword_pooling'] in ['first', 'last']:
pooling_mask = x_batch[1]
x_batch = x_batch[0]
y_prob_temp = model.predict(x=[x_batch, pooling_mask])
else:
pooling_mask = x_batch
y_prob_temp = model.predict(x=x_batch)
# Get lengths and cut results for padded tokens
lengths = [len(np.where(x_i != 0)[0]) for x_i in x_batch]
if model.crf:
y_pred_temp = y_prob_temp.astype('int32')
else:
y_pred_temp = np.argmax(y_prob_temp, axis=-1)
for y_true_i, y_pred_i, l_i, p_i in zip(y_batch, y_pred_temp, lengths, pooling_mask):
if Configuration['task']['model'] == 'transformer':
if self.train_params['subword_pooling'] in ['first', 'last']:
y_true.append(np.take(y_true_i, np.where(p_i != 0)[0])[1:-1])
y_pred.append(np.take(y_pred_i, np.where(p_i != 0)[0])[1:-1])
else:
y_true.append(y_true_i[1:l_i - 1])
y_pred.append(y_pred_i[1:l_i - 1])
elif Configuration['task']['model'] == 'bilstm':
if self.train_params['subword_pooling'] in ['first', 'last']:
y_true.append(np.take(y_true_i, np.where(p_i != 0)[0]))
y_pred.append(np.take(y_pred_i, np.where(p_i != 0)[0]))
else:
y_true.append(y_true_i[:l_i])
y_pred.append(y_pred_i[:l_i])
# Indices to labels in one flattened list
seq_y_pred_str = []
seq_y_true_str = []
for y_pred_row, y_true_row in zip(y_pred, y_true): # For each sequence
seq_y_pred_str.append(
[self.idx2tag[idx] for idx in y_pred_row.tolist()]) # Append list with sequence tokens
seq_y_true_str.append(
[self.idx2tag[idx] for idx in y_true_row.tolist()]) # Append list with sequence tokens
flattened_seq_y_pred_str = list(itertools.chain.from_iterable(seq_y_pred_str))
flattened_seq_y_true_str = list(itertools.chain.from_iterable(seq_y_true_str))
assert len(flattened_seq_y_true_str) == len(flattened_seq_y_pred_str)
# TODO: Check mode (strict, not strict) and scheme
cr = classification_report(
y_true=[flattened_seq_y_true_str],
y_pred=[flattened_seq_y_pred_str],
zero_division=0,
mode=None,
digits=3,
scheme=IOB2
)
LOGGER.info(cr)
def evaluate_pretrained_model(self):
train_params = deepcopy(self.train_params)
train_params.update(self.hyper_params)
# Build model and load weights manually
model = self.build_model(train_params=train_params)
# Fake forward pass to get variables
LOGGER.info('Model Summary')
model.print_summary(print_fn=LOGGER.info)
# Load weights by checkpoint
model.load_weights(os.path.join(self.eval_params['pretrained_model_path'], 'weights.h5'))
for split in self.eval_params['splits']:
if split not in ['train', 'validation', 'test']:
raise Exception(f'Invalid split selected ({split}). Valid options are "train", "validation", "test"')
dataset = datasets.load_dataset(path='nlpaueb/finer-139', split=split)
generator = DataLoader(
dataset=dataset,
vectorize_fn=self.vectorize,
batch_size=self.general_params['batch_size'],
max_length=self.train_params['max_length'],
shuffle=False
)
self.evaluate(model=model, generator=generator, split_type=split)
def loss_report(self, history):
"""
Prints the loss report of the trained model
:param history: The history dictionary that tensorflow returns upon completion of fit function
"""
best_epoch_by_loss = np.argmin(history['val_loss']) + 1
n_epochs = len(history['val_loss'])
val_loss_per_epoch = '- ' + ' '.join('-' if history['val_loss'][i] < np.min(history['val_loss'][:i])
else '+' for i in range(1, len(history['val_loss'])))
report = f'\nBest epoch by Val Loss: {best_epoch_by_loss}/{n_epochs}\n'
report += f'Val Loss per epoch: {val_loss_per_epoch}\n\n'
loss_dict = {
'loss': 'Loss',
'val_loss': 'Val Loss',
'val_micro_f1': 'Val Micro F1',
'val_macro_f1': 'Val Macro F1'
}
monitor_metric, monitor_mode = self.get_monitor()
if monitor_metric != 'val_loss':
argmin_max_fn = np.argmin if monitor_mode == 'min' else np.argmax
min_max_fn = np.min if monitor_mode == 'min' else np.max
best_epoch_by_monitor = argmin_max_fn(history[monitor_metric]) + 1
val_monitor_per_epoch = '- ' if monitor_mode == 'min' else '+ ' + ' '.join(
'-' if history[monitor_metric][i] < min_max_fn(history[monitor_metric][:i])
else '+' for i in range(1, len(history[monitor_metric])))
monitor_metric_str = " ".join([s.capitalize() for s in monitor_metric.replace('val_', '').split("_")])
val_monitor_metric_str = " ".join([s.capitalize() for s in monitor_metric.split("_")])
report += f'Best epoch by {val_monitor_metric_str}: {best_epoch_by_monitor}/{n_epochs}\n'
report += f'{val_monitor_metric_str} per epoch: {val_monitor_per_epoch}\n\n'
# loss_dict[monitor_metric.replace('val_', '')] = monitor_metric_str
# loss_dict[monitor_metric] = val_monitor_metric_str
report += f"Loss & {monitor_metric_str} Report\n{'-' * 100}\n"
else:
report += f"Loss Report\n{'-' * 100}\n"
report += f"Loss Report\n{'-' * 120}\n"
report += 'Epoch | '
report += ' | '.join([f"{loss_nick:<17}" for loss_name, loss_nick in loss_dict.items() if loss_name in history])
report += ' | Learning Rate' + '\n'
for n_epoch in range(len(history['loss'])):
report += f'Epoch #{n_epoch + 1:3.0f} | '
for loss_name in loss_dict.keys():
if loss_name in history:
report += f'{history[loss_name][n_epoch]:1.6f}' + ' ' * 10
report += '| '
report += f'{history["lr"][n_epoch]:.3e}' + '\n'
LOGGER.info(report)
| 33,261 | 42.881266 | 138 | py |
finer | finer-main/models/callbacks.py | import logging
import numpy as np
import itertools
from tqdm import tqdm
from seqeval.metrics.sequence_labeling import precision_recall_fscore_support
from tensorflow.keras.callbacks import Callback, EarlyStopping
from configurations import Configuration
LOGGER = logging.getLogger(__name__)
class ReturnBestEarlyStopping(EarlyStopping):
def __init__(self, **kwargs):
super(ReturnBestEarlyStopping, self).__init__(**kwargs)
def on_train_end(self, logs=None):
if self.stopped_epoch > 0:
if self.verbose > 0:
print(f'\nEpoch {self.stopped_epoch + 1}: early stopping')
elif self.restore_best_weights:
if self.verbose > 0:
print('Restoring model weights from the end of the best epoch.')
self.model.set_weights(self.best_weights)
class F1MetricCallback(Callback):
def __init__(
self,
train_params,
idx2tag,
train_generator=None,
validation_generator=None,
subword_pooling='all',
calculate_train_metric=False
):
super(F1MetricCallback, self).__init__()
if validation_generator is None:
raise Exception(f'F1MetricCallback: Please provide a validation generator')
if calculate_train_metric and train_generator is None:
raise Exception(f'F1MetricCallback: Please provide a train generator')
self.train_params = train_params
self.idx2tag = idx2tag
self.train_generator = train_generator
self.validation_generator = validation_generator
self.subword_pooling = subword_pooling
self.calculate_train_metric = calculate_train_metric
def on_epoch_end(self, epoch, logs=None):
if logs is None:
logs = {}
if self.calculate_train_metric:
train_micro_precision, train_micro_recall, train_micro_f1, \
train_macro_precision, train_macro_recall, train_macro_f1, train_support = \
self.evaluate(generator=self.train_generator)
logs[f'micro_precision'] = train_micro_precision
logs[f'micro_recall'] = train_micro_recall
logs[f'micro_f1'] = train_micro_f1
logs[f'macro_precision'] = train_macro_precision
logs[f'macro_recall'] = train_macro_recall
logs[f'macro_f1'] = train_macro_f1
val_micro_precision, val_micro_recall, val_micro_f1, \
val_macro_precision, val_macro_recall, val_macro_f1, val_support = \
self.evaluate(generator=self.validation_generator)
logs[f'val_micro_precision'] = val_micro_precision
logs[f'val_micro_recall'] = val_micro_recall
logs[f'val_micro_f1'] = val_micro_f1
logs[f'val_macro_precision'] = val_macro_precision
logs[f'val_macro_recall'] = val_macro_recall
logs[f'val_macro_f1'] = val_macro_f1
def evaluate(self, generator):
y_true, y_pred = [], []
for x_batch, y_batch in tqdm(generator, ncols=100):
if self.subword_pooling in ['first', 'last']:
pooling_mask = x_batch[1]
x_batch = x_batch[0]
y_prob_temp = self.model.predict(x=[x_batch, pooling_mask])
else:
pooling_mask = x_batch
y_prob_temp = self.model.predict(x=x_batch)
# Get lengths and cut results for padded tokens
lengths = [len(np.where(x_i != 0)[0]) for x_i in x_batch]
if self.model.crf:
y_pred_temp = y_prob_temp.astype('int32')
else:
y_pred_temp = np.argmax(y_prob_temp, axis=-1)
for y_true_i, y_pred_i, l_i, p_i in zip(y_batch, y_pred_temp, lengths, pooling_mask):
if Configuration['task']['model'] == 'transformer':
if self.subword_pooling in ['first', 'last']:
y_true.append(np.take(y_true_i, np.where(p_i != 0)[0])[1:-1])
y_pred.append(np.take(y_pred_i, np.where(p_i != 0)[0])[1:-1])
else:
y_true.append(y_true_i[1:l_i - 1])
y_pred.append(y_pred_i[1:l_i - 1])
elif Configuration['task']['model'] == 'bilstm':
if self.subword_pooling in ['first', 'last']:
y_true.append(np.take(y_true_i, np.where(p_i != 0)[0]))
y_pred.append(np.take(y_pred_i, np.where(p_i != 0)[0]))
else:
y_true.append(y_true_i[:l_i])
y_pred.append(y_pred_i[:l_i])
# Indices to labels list of lists
seq_y_pred_str = []
seq_y_true_str = []
for y_pred_row, y_true_row in zip(y_pred, y_true):
seq_y_pred_str.append([self.idx2tag[idx] for idx in y_pred_row.tolist()])
seq_y_true_str.append([self.idx2tag[idx] for idx in y_true_row.tolist()])
flattened_seq_y_pred_str = list(itertools.chain.from_iterable(seq_y_pred_str))
flattened_seq_y_true_str = list(itertools.chain.from_iterable(seq_y_true_str))
assert len(flattened_seq_y_true_str) == len(flattened_seq_y_pred_str)
precision_micro, recall_micro, f1_micro, support = precision_recall_fscore_support(
y_true=[flattened_seq_y_true_str],
y_pred=[flattened_seq_y_pred_str],
average='micro',
warn_for=('f-score',),
beta=1,
zero_division=0
)
precision_macro, recall_macro, f1_macro, support = precision_recall_fscore_support(
y_true=[flattened_seq_y_true_str],
y_pred=[flattened_seq_y_pred_str],
average='macro',
warn_for=('f-score',),
beta=1,
zero_division=0
)
return precision_micro, recall_micro, f1_micro, precision_macro, recall_macro, f1_macro, support
| 5,967 | 39.053691 | 104 | py |
finer | finer-main/models/transformer_bilstm.py | import tensorflow as tf
import numpy as np
from transformers import AutoTokenizer, TFAutoModel
from tf2crf import CRF
class TransformerBiLSTM(tf.keras.Model):
def __init__(
self,
model_name,
n_classes,
dropout_rate=0.1,
crf=False,
n_layers=1,
n_units=128,
tokenizer=None,
subword_pooling='all'
):
super().__init__()
self.n_classes = n_classes
self.dropout_rate = dropout_rate
self.crf = crf
self.n_layers = n_layers
self.n_units = n_units
self.subword_pooling = subword_pooling
self.encoder = TFAutoModel.from_pretrained(
pretrained_model_name_or_path=model_name
)
if tokenizer:
self.encoder.resize_token_embeddings(
new_num_tokens=len(tokenizer.vocab))
self.bilstm_layers = [
tf.keras.layers.Bidirectional(
tf.keras.layers.LSTM(
units=n_units,
activation='tanh',
recurrent_activation='sigmoid',
return_sequences=True,
name=f'BiLSTM_{i + 1}'
)
) for i in range(n_layers)
]
if self.crf:
self.classifier = tf.keras.layers.Dense(
units=n_classes,
activation=None
)
# Pass logits to a custom CRF Layer
self.crf_layer = CRF(output_dim=n_classes, mask=True)
else:
self.classifier = tf.keras.layers.Dense(
units=n_classes,
activation='softmax'
)
def call(self, inputs, training=None, mask=None):
if self.subword_pooling in ['first', 'last']:
pooling_mask = inputs[1]
inputs = inputs[0]
encodings = self.bert_encoder(inputs)[0]
encodings = tf.keras.layers.SpatialDropout1D(
rate=self.dropout_rate
)(encodings, training=training)
for i, bilstm_layer in enumerate(self.bilstm_layers):
encodings = bilstm_layer(encodings)
encodings = tf.keras.layers.SpatialDropout1D(
rate=self.dropout_rate
)(encodings, training=training)
outputs = self.classifier(encodings)
if self.crf:
outputs = self.crf_layer(outputs, mask=tf.not_equal(inputs, 0))
if self.subword_pooling in ['first', 'last']:
outputs = tf.cast(tf.expand_dims(pooling_mask, axis=-1), dtype=tf.float32) * outputs
return outputs
def print_summary(self, line_length=None, positions=None, print_fn=None):
# Fake forward pass to build graph
batch_size, sequence_length = 1, 32
inputs = np.ones((batch_size, sequence_length), dtype=np.int32)
if self.subword_pooling in ['first', 'last']:
pooling_mask = np.ones((batch_size, sequence_length), dtype=np.int32)
inputs = [inputs, pooling_mask]
self.predict(inputs)
self.summary(line_length=line_length, positions=positions, print_fn=print_fn)
if __name__ == '__main__':
from tensorflow.keras.preprocessing.sequence import pad_sequences
# Init random seeds
np.random.seed(1)
tf.random.set_seed(1)
model_name = 'nlpaueb/sec-bert-base'
# Build test model
model = TransformerBiLSTM(
model_name=model_name,
n_classes=10,
dropout_rate=0.2,
crf=False,
n_layers=1,
n_units=128,
subword_pooling='all'
)
# inputs = pad_sequences(np.random.randint(0, 30000, (5, 32)), maxlen=64, padding='post', truncating='post')
inputs = [
'This is the first sentence',
'This is the second sentence',
'This is the third sentence',
'This is the fourth sentence',
'This is the last sentence, this is a longer sentence']
tokenizer = AutoTokenizer.from_pretrained(
pretrained_model_name_or_path=model_name,
use_fast=True
)
inputs = tokenizer.batch_encode_plus(
batch_text_or_text_pairs=inputs,
add_special_tokens=False,
max_length=64,
padding='max_length',
return_tensors='tf'
).input_ids
outputs = pad_sequences(np.random.randint(0, 10, (5, 32)), maxlen=64, padding='post', truncating='post')
optimizer = tf.keras.optimizers.Adam(learning_rate=1e-5, clipvalue=5.0)
if model.crf:
model.compile(
optimizer=optimizer,
loss=model.crf_layer.loss,
run_eagerly=True
)
else:
model.compile(
optimizer=optimizer,
loss=tf.keras.losses.SparseCategoricalCrossentropy(from_logits=False),
run_eagerly=True
)
print(model.print_summary(line_length=150))
model.fit(x=inputs, y=outputs, batch_size=2)
model.predict(inputs, batch_size=1)
predictions = model.predict(inputs, batch_size=2)
print(predictions)
| 5,047 | 29.409639 | 112 | py |
Subsets and Splits