content
stringlengths 5
1.05M
|
---|
import numpy as np
from sklearn import ensemble, tree, neural_network, svm
models = {
# https://scikit-learn.org/stable/modules/generated/sklearn.ensemble.RandomForestRegressor.html
"rf": {
"model": ensemble.RandomForestRegressor,
"param": {
"n_jobs": lambda: np.random.choice([-1]),
"n_estimators": lambda: np.random.randint(100, 300+1),
"max_features": lambda: np.random.randint(1, 100+1),
"max_depth": lambda: np.random.randint(1, 100+1),
"min_samples_leaf": lambda: np.random.randint(1, 3+1)
}
},
# https://scikit-learn.org/stable/modules/generated/sklearn.ensemble.GradientBoostingRegressor.html
"gb": {
"model": ensemble.GradientBoostingRegressor,
"param": {
"loss": lambda: np.random.choice(["huber"]),
"learning_rate": lambda: np.random.uniform(0.03, 0.3),
"n_estimators": lambda: np.random.randint(100, 300+1),
"max_depth": lambda: np.random.randint(1, 5+1),
"max_features": lambda: np.random.randint(1, 100+1),
"subsample": lambda: np.random.uniform(0.1, 1.0),
}
},
# https://scikit-learn.org/stable/modules/generated/sklearn.ensemble.AdaBoostRegressor.html
"ab": {
"model": ensemble.AdaBoostRegressor,
"param": {
"base_estimator": lambda: np.random.choice([
tree.DecisionTreeRegressor(max_depth=1),
tree.DecisionTreeRegressor(max_depth=2),
tree.DecisionTreeRegressor(max_depth=3),
tree.DecisionTreeRegressor(max_depth=4),
tree.DecisionTreeRegressor(max_depth=5),
]),
"n_estimators": lambda: np.random.randint(20, 300+1),
"learning_rate": lambda: np.random.uniform(0.1, 2.0),
}
},
# https://scikit-learn.org/stable/modules/generated/sklearn.svm.SVR.html
"svm": {
"model": svm.SVR,
"param": {
'max_iter': lambda: 250000,
"gamma": lambda: 10**np.random.uniform(-6, -1),
"C": lambda: 10**np.random.uniform(-2, 3),
"epsilon": lambda: np.random.choice([0.05, 0.1, 0.2])
}
},
}
class ModelGenerator():
def __init__(self, model_name, num_iterations=20, random_seed=42):
from itertools import product
from copy import copy
self.model = models[model_name]["model"]
self.params = models[model_name]["param"]
self.num_iterations = num_iterations
self.iteration = 0
self.random_seed = random_seed
def __len__(self):
return self.num_iterations
def __iter__(self):
return self
def __next__(self):
if self.iteration < self.num_iterations:
np.random.seed(self.random_seed + self.iteration)
self.iteration += 1
self.param = {
k:v() for (k,v) in self.params.items()
}
return self.model(**self.param)
raise StopIteration
|
"""
Functions for simplifying the creation of a local dask cluster.
License
-------
The code in this notebook is licensed under the Apache License,
Version 2.0 (https://www.apache.org/licenses/LICENSE-2.0). Digital Earth
Africa data is licensed under the Creative Commons by Attribution 4.0
license (https://creativecommons.org/licenses/by/4.0/).
Contact
-------
If you need assistance, please post a question on the Open Data
Cube Slack channel (http://slack.opendatacube.org/) or on the GIS Stack
Exchange (https://gis.stackexchange.com/questions/ask?tags=open-data-cube)
using the `open-data-cube` tag (you can view previously asked questions
here: https://gis.stackexchange.com/questions/tagged/open-data-cube).
If you would like to report an issue with this script, you can file one on
Github https://github.com/digitalearthafrica/deafrica-sandbox-notebooks/issues
.. autosummary::
:nosignatures:
:toctree: gen
"""
import os
import dask
from datacube.utils.dask import start_local_dask
from datacube.utils.rio import configure_s3_access
from aiohttp import ClientConnectionError
def create_local_dask_cluster(
spare_mem="3Gb", aws_unsigned=True, display_client=True, **kwargs
):
"""
Using the datacube utils function 'start_local_dask', generate
a local dask cluster.
Example use :
from deafrica_tools.dask import create_local_dask_cluster
create_local_dask_cluster(spare_mem='4Gb')
Parameters
----------
spare_mem : String, optional
The amount of memory, in Gb, to leave for the notebook to run.
This memory will not be used by the cluster. e.g '3Gb'
aws_unsigned : Bool, optional
This parameter determines if credentials for S3 access are required and
passes them on to processing threads, either local or on dask cluster.
Set to True if working with publicly available datasets, and False if
working with private data. i.e if loading Landsat C2 provisional data set
this to aws_unsigned=False
display_client : Bool, optional
An optional boolean indicating whether to display a summary of
the dask client, including a link to monitor progress of the
analysis. Set to False to hide this display.
**kwargs:
Additional keyword arguments that will be passed to start_local_dask().
E.g. n_workers can be set to be greater than 1.
"""
# configure dashboard link to go over proxy
dask.config.set(
{
"distributed.dashboard.link": os.environ.get(
"JUPYTERHUB_SERVICE_PREFIX", "/"
)
+ "proxy/{port}/status"
}
)
# start up a local cluster
client = start_local_dask(mem_safety_margin=spare_mem, **kwargs)
## Configure GDAL for s3 access
configure_s3_access(aws_unsigned=aws_unsigned, client=client)
# Show the dask cluster settings
if display_client:
display(client)
try:
from dask_gateway import Gateway
def create_dask_gateway_cluster(profile='r5_XL', workers=2):
"""
Create a cluster in our internal dask cluster.
Parameters
----------
profile : str
Possible values are: XL (2 cores, 15GB memory), 2XL (4 cores, 31GB memory), 4XL (8 cores, 62GB memory)
workers : int
Number of workers in the cluster.
"""
try:
gateway = Gateway()
options = gateway.cluster_options()
options['profile'] = profile
## This Configuration is used for dask-worker pod labels
options['jupyterhub_user'] = os.getenv('JUPYTERHUB_USER')
cluster = gateway.new_cluster(options)
cluster.scale(workers)
return cluster
except ClientConnectionError:
raise ConnectionError("access to dask gateway cluster unauthorized")
except ImportError:
def create_dask_gateway_cluster(*args, **kwargs):
raise NotImplementedError
|
from dolfin import *
import numpy as np
from petsc4py import PETSc
class DirichletBoundary(SubDomain):
def inside(self, x, on_boundary):
return on_boundary
def build_nullspace(V, x):
"""Function to build null space for 3D elasticity"""
# Create list of vectors for null space
nullspace_basis = [x.copy() for i in range(6)]
# Build translational null space basis
V.sub(0).dofmap().set(nullspace_basis[0], 1.0);
V.sub(1).dofmap().set(nullspace_basis[1], 1.0);
V.sub(2).dofmap().set(nullspace_basis[2], 1.0);
# Build rotational null space basis
V.sub(0).set_x(nullspace_basis[3], -1.0, 1);
V.sub(1).set_x(nullspace_basis[3], 1.0, 0);
V.sub(0).set_x(nullspace_basis[4], 1.0, 2);
V.sub(2).set_x(nullspace_basis[4], -1.0, 0);
V.sub(2).set_x(nullspace_basis[5], 1.0, 1);
V.sub(1).set_x(nullspace_basis[5], -1.0, 2);
for x in nullspace_basis:
x.apply("insert")
# Create vector space basis and orthogonalize
basis = VectorSpaceBasis(nullspace_basis)
basis.orthonormalize()
return basis
# Class for equilibrium solution of polycrystal
class elasGrains:
"""
Solve incompatibility problem for post-processing HEDM results
using FEniCS
"""
tol = 1E-9
def __init__(self,mesh_file,rotations_file,strains_file):
global dof_min, dof_max
parameters["linear_algebra_backend"] = "PETSc"
self.mesh = Mesh(mesh_file + '.xml')
self.grains = MeshFunction('size_t',self.mesh,
mesh_file + '_physical_region.xml')
# Function spaces
self.V = VectorFunctionSpace(self.mesh, 'P', 1)
self.V0 = TensorFunctionSpace(self.mesh, 'DG', 0)
self.TFS = TensorFunctionSpace(self.mesh, 'DG', 0)
self.VFS = VectorFunctionSpace(self.mesh, 'DG', 0)
self.I_TFS_1 = TensorFunctionSpace(self.mesh, 'CG', 1) # Used in grad z problem
# mesh dimensions
self.dim = self.V.dim()
self.N = self.mesh.geometry().dim()
dof_coordinates = self.V.tabulate_dof_coordinates()
dof_coordinates.resize((self.dim, self.N))
self.dof_min = dof_coordinates.min(axis=0)
self.dof_max = dof_coordinates.max(axis=0)
print(self.dof_min)
print(self.dof_max)
# Set up grain orientation
self.rots = np.loadtxt(rotations_file)
self.orient = Function(self.V0)
# Vectorized version, used for processing averages/differences
self.cell_num = np.arange(len(self.grains.array()))
self.subdomain_num = self.grains.array()[:] - 1
for n in range(9):
cell_num_list = list((9*self.cell_num)+n)
self.orient.vector()[cell_num_list] = self.rots[self.subdomain_num,n]
# Strains from hexrd
self.exp_strn = np.loadtxt(strains_file)
self.sim_avg = np.zeros((self.grains.array().max(),9))
self.ref_strain = np.zeros( (len(self.grains.array()),9) )
self.dVol = np.fromiter( (c.volume() for c in cells(self.mesh)), float, count=self.mesh.num_cells() )
self.dVol = self.dVol / self.dVol.sum()
# For difference between lattice strain and experimental average
self.strain_diff_1 = Function(self.VFS)
self.strain_diff_2 = Function(self.VFS)
self.strain_diff_3 = Function(self.VFS)
# To reconstruct tensor from (three) solutions to incompatibility problem
self.x_id = Expression(("1.0", "0.0", "0.0"), degree=1)
self.y_id = Expression(("0.0", "1.0", "0.0"), degree=1)
self.z_id = Expression(("0.0", "0.0", "1.0"), degree=1)
# Helper functions for rotations and symmetric tensors (Don Boyce)
def to6vector(self,w3x3):
# return 6-vector form of 3x3 matrix
return as_vector([w3x3[0,0], w3x3[1,1], w3x3[2,2],
w3x3[1,2], w3x3[2,0], w3x3[0,1]])
def totensor(self,w6):
# reconstruct tensor from 6-vector
return as_matrix([[w6[0], w6[5], w6[4]],
[w6[5], w6[1], w6[3]],
[w6[4], w6[3], w6[2]]])
def tocrystal(self,w3x3):
return self.orient.T*w3x3*self.orient
def tosample(self,w3x3):
return self.orient*w3x3*self.orient.T
def sigc6(self,w):
# for w a 3d vector field
return dot(self.estf, self.to6vector(self.tocrystal(sym(grad(w)))))
def sigc3x3(self,w):
# for w a 3d vector field (displacement)
return self.totensor(self.sigc6(w))
def sigs3x3(self,w):
# stress in sample frame from displacement
return self.tosample(self.sigc3x3(w))
# Factor of 2, following Boyce; see elasticity3d.cpp (parameter VALFAC)
def Chcp(self,c11, c33, c44, c12, c13):
c = as_vector( [c11, c12, c13, c33, c44, (c11-c12)/2.0] )
return as_matrix( [[c[0], c[1], c[2], 0, 0, 0],
[c[1], c[0], c[2], 0, 0, 0],
[c[2], c[2], c[3], 0, 0, 0],
[0, 0, 0, 2*c[4], 0, 0],
[0, 0, 0, 0, 2*c[4], 0],
[0, 0, 0, 0, 0, 2*c[5]]] )
def Ccubic(self,c11, c12, c44):
c = as_vector( [c11, c12, c44] )
return as_matrix( [[c[0], c[1], c[1], 0, 0, 0],
[c[1], c[0], c[1], 0, 0, 0],
[c[1], c[1], c[0], 0, 0, 0],
[0, 0, 0, 2*c[2], 0, 0],
[0, 0, 0, 0, 2*c[2], 0],
[0, 0, 0, 0, 0, 2*c[2]]] )
# To derive stress from strain tensor
def sigc6_e(self,eps):
# For a strain tensor
return dot(self.estf, self.to6vector(self.tocrystal(eps)) )
def sigs_e(self,eps):
return self.tosample(self.totensor(self.sigc6_e(eps)))
def X_0(self,u,v,w):
return outer(self.x_id,u) + outer(self.y_id,v) + outer(self.z_id,w)
def sym_dev(self,U):
E = sym(U)
return E - ( (1./3)*tr(E)*Identity(3) )
def applyBC(self,bc_list=None):
self.bc_elas = bc_list
def elasticity_problem(self,reuse_PC=False, rtol=1e-8, atol=1e-12):
"""Setup the elasticity solver.
The petsc_amg preconditioner is used, with code taken from
the (undocumented) FEniCS example demo_elasticity.py
Keyword Arguments:
reuse_PC -- reuse the preconditioner (default False)
"""
self.u = TrialFunction(self.V)
self.d = self.u.geometric_dimension() # space dimension
self.v = TestFunction(self.V)
self.L_elas = dot(Constant((0,0,0)),self.v)*ds
# Create PETSC smoothed aggregation AMG preconditioner
self.pc_Eq = PETScPreconditioner("petsc_amg")
# Use Chebyshev smoothing for multigrid
PETScOptions.set("mg_levels_ksp_type", "chebyshev")
PETScOptions.set("mg_levels_pc_type", "jacobi")
# Improve estimate of eigenvalues for Chebyshev smoothing
PETScOptions.set("mg_levels_esteig_ksp_type", "cg")
PETScOptions.set("mg_levels_ksp_chebyshev_esteig_steps", 50)
# Create CG Krylov solver and turn convergence monitoring on
self.elasticity_solver = PETScKrylovSolver("cg", self.pc_Eq)
self.elasticity_solver.parameters["monitor_convergence"] = True
self.elasticity_solver.parameters["relative_tolerance"] = rtol
self.elasticity_solver.parameters["absolute_tolerance"] = atol
if reuse_PC:
self.elasticity_solver.set_reuse_preconditioner(True)
# For the solution
self.ue = Function(self.V)
# self.Ue = self.ue.vector()
# Create near null space basis (required for smoothed aggregation
# AMG). The solution vector is passed so that it can be copied to
# generate compatible vectors for the nullspace.
self.null_space = build_nullspace(self.V, self.ue.vector())
print(self.null_space)
def solve_elas(self,x,E_p=None):
"""Solve the elasticity equilibrium problem.
Keyword Arguments:
E_p -- plastic distortion to be included in RHS (default None)
Returns:
res -- the residual error between experimental and simulated grain averages
"""
if x['Crystal_Structure'] == "Cubic":
self.estf = self.Ccubic( x['Stiffness'][0], x['Stiffness'][1], x['Stiffness'][2] )
elif x['Crystal_Structure'] == "HCP":
self.estf = self.Chcp( x['Stiffness'][0], x['Stiffness'][1], x['Stiffness'][2], x['Stiffness'][3], x['Stiffness'][4] )
# Update orientation
for n in range(9):
cell_num_list = list((9*self.cell_num)+n)
self.orient.vector()[cell_num_list] = self.rots[self.subdomain_num,n]
self.a = inner(self.sigs3x3(self.u), sym(grad(self.v)))*dx
if E_p:
# Note use of sym(), assuming E_p to be the \chi field
L_elas_rhs = self.L_elas + inner(self.sigs_e(sym(E_p)), sym(grad(self.v)))*dx
else:
L_elas_rhs = self.L_elas
self.A_elas, self.b_elas = assemble_system(self.a, L_elas_rhs, self.bc_elas)
# Attach near nullspace to matrix
as_backend_type(self.A_elas).set_near_nullspace(self.null_space)
# Set matrix operator
self.elasticity_solver.set_operator(self.A_elas);
# Compute solution
self.elasticity_solver.solve(self.ue.vector(), self.b_elas);
if E_p:
self.Ue_sym = project( sym(grad(self.ue) - E_p), self.TFS, solver_type="cg", preconditioner_type="ilu")
else:
self.Ue_sym = project( sym(grad(self.ue)), self.TFS, solver_type="cg", preconditioner_type="ilu")
self.sim_strn = np.reshape(self.Ue_sym.vector().get_local(),(len(self.grains.array()),9))
for grain_no in range(self.grains.array().max()):
# Grain numbering is 1 index origin
cell_subset = self.grains.array()==(grain_no+1)
if np.any(cell_subset):
self.sim_avg[grain_no,:] = np.average(self.sim_strn[cell_subset,:],
axis=0,weights=self.dVol[cell_subset])
deps = self.exp_strn - self.sim_avg
resid = np.linalg.norm(deps.ravel())
print(resid) #,self.its)
return resid
def incompatibility_problem(self,reuse_PC=False, rtol=1e-8, atol=1e-12):
"""Setup the incompatibility solver.
Keyword Arguments:
reuse_PC -- reuse the preconditioner (default False)
"""
P1 = VectorFunctionSpace(self.mesh, 'CG', 1)
self.PN = FunctionSpace(self.mesh, "Nedelec 1st kind H(curl)", 1)
# Define test and trial functions
self.inc_v0 = TestFunction(self.PN)
u0 = TrialFunction(self.PN)
self.T1 = Function(self.PN) # Solution for the curl curl problem
self.T2 = Function(self.PN) # Solution for the curl curl problem
self.T3 = Function(self.PN) # Solution for the curl curl problem
# Boundary condition
zero = Expression(("0.0", "0.0", "0.0"), degree=1)
self.bc_X = DirichletBC(self.PN, zero, DirichletBoundary())
# LHS
self.a_X = inner(curl(u0), curl(self.inc_v0))*dx
# Create PETSc Krylov solver (from petsc4py)
self.ksp_X = PETSc.KSP()
self.ksp_X.create(PETSc.COMM_WORLD)
# Set the Krylov solver type and set tolerances
self.ksp_X.setType("cg")
# self.ksp_X.setTolerances(rtol=1.0e-6, atol=1.0e-10, divtol=1.0e10, max_it=50)
self.ksp_X.setTolerances(rtol=rtol, atol=atol, divtol=1.0e10, max_it=50)
# Get the preconditioner and set type (HYPRE AMS)
self.pc_X = self.ksp_X.getPC()
self.pc_X.setType("hypre")
self.pc_X.setHYPREType("ams")
# Build discrete gradient
PL = FunctionSpace(self.mesh, "Lagrange", 1)
G = DiscreteOperators.build_gradient(self.PN, PL)
# Attach discrete gradient to preconditioner
self.pc_X.setHYPREDiscreteGradient(as_backend_type(G).mat())
# Build constants basis for the Nedelec space
constants = [Function(self.PN) for i in range(3)]
for i, c in enumerate(constants):
direction = [1.0 if i == j else 0.0 for j in range(3)]
c.interpolate(Constant(direction))
# Inform preconditioner of constants in the Nedelec space
cvecs = [as_backend_type(constant.vector()).vec() for constant in constants]
self.pc_X.setHYPRESetEdgeConstantVectors(cvecs[0], cvecs[1], cvecs[2])
# no 'mass' term)
self.pc_X.setHYPRESetBetaPoissonMatrix(None)
# preconditioner does not change
if reuse_PC:
self.pc_X.setReusePreconditioner(True)
# Set options prefix
self.ksp_X.setOptionsPrefix("inc_")
# Turn on monitoring of residual
self.opts = PETSc.Options()
self.opts.setValue("inc_ksp_monitor_true_residual", None)
# Tolerances are set above, could be modified using inc_ prefix
# self.opts.setValue("inc_ksp_rtol", 1e-10)
# self.opts.setValue("inc_ksp_atol", 1e-16)
self.pc_X.setOptionsPrefix("inc_")
self.pc_X.setFromOptions()
# Solve eddy currents equation (using potential T)
self.ksp_X.setFromOptions()
def incompatibility_solve_cg(self, useAMS=True):
"""Solve the incompatibility problem.
Keyword Arguments:
useAMS -- use the HYPRE AMS preconditioner (default True)
[alternative is jacobi preconditioner
"""
zero = Expression(("0.0", "0.0", "0.0"), degree=1)
bc = DirichletBC(self.PN, zero, DirichletBoundary())
T1 = Function(self.PN) # Solution for the curl curl problem
T2 = Function(self.PN) # Solution for the curl curl problem
T3 = Function(self.PN) # Solution for the curl curl problem
if useAMS:
# Set operator for the linear solver
L_X = inner(self.strain_diff_1, curl(self.inc_v0))*dx
A_X, b_X = assemble_system(self.a_X, L_X, bc)
self.ksp_X.setOperators(as_backend_type(A_X).mat())
self.ksp_X.solve(as_backend_type(b_X).vec(), as_backend_type(T1.vector()).vec())
# Show linear solver details
self.ksp_X.view()
# Solve 2nd system
L_X = inner(self.strain_diff_2, curl(self.inc_v0))*dx
A_X, b_X = assemble_system(self.a_X, L_X, bc)
self.ksp_X.setOperators(as_backend_type(A_X).mat())
self.ksp_X.solve(as_backend_type(b_X).vec(), as_backend_type(T2.vector()).vec())
# Solve 3nd system
L_X = inner(self.strain_diff_3, curl(self.inc_v0))*dx
A_X, b_X= assemble_system(self.a_X, L_X, bc)
self.ksp_X.setOperators(as_backend_type(A_X).mat())
self.ksp_X.solve(as_backend_type(b_X).vec(), as_backend_type(T3.vector()).vec())
else:
### vanilla CG works with potential as RHS
L_X = inner(self.strain_diff_1, curl(self.inc_v0))*dx
solve(self.a_X == L_X, T1, bc,
solver_parameters={'linear_solver': 'cg', 'preconditioner': 'jacobi'})
L_X = inner(self.strain_diff_2, curl(self.inc_v0))*dx
solve(self.a_X == L_X, T2, bc,
solver_parameters={'linear_solver': 'cg', 'preconditioner': 'jacobi'})
L_X = inner(self.strain_diff_3, curl(self.inc_v0))*dx
solve(self.a_X == L_X, T3, bc,
solver_parameters={'linear_solver': 'cg', 'preconditioner': 'jacobi'})
return project( self.X_0(curl(T1),curl(T2),curl(T3)),
self.TFS, solver_type="cg", preconditioner_type="ilu")
|
output_formats = ('txt', 'bin', 'hex')
|
import os
import torch
import pickle
import pysmiles
import matplotlib
import numpy as np
import multiprocessing as mp
import matplotlib.pyplot as plt
from model import GNN
from openbabel import pybel
from featurizer import MolEFeaturizer
from dgl.dataloading import GraphDataLoader
from sklearn.manifold import TSNE
from sklearn.decomposition import PCA
from networkx.algorithms.similarity import graph_edit_distance
from property_pred.pp_data_processing import PropertyPredDataset
matplotlib.rcParams['pdf.fonttype'] = 42
matplotlib.rcParams['ps.fonttype'] = 42
plt.figure(figsize=(7, 4))
query_smiles = 'C3=C(C2=CC=C(N1CCC(O)CC1)N=N2)C(=CC(=C3)Cl)Cl' # num 1196 molecule in BBBP dataset
query_no = 1196
query_graph = pysmiles.read_smiles(query_smiles, zero_order_bonds=False)
upper_bound = 50
timeout = 300
def get_sssr(args):
if os.path.exists('../data/' + args.dataset + '/sssr.pkl'):
print('loading GED data from ../data/' + args.dataset + '/sssr.pkl')
with open('../data/' + args.dataset + '/sssr.pkl', 'rb') as f:
res = pickle.load(f)
else:
smiles_list = []
print('processing ' + '../data/' + args.dataset + '/' + args.dataset + '.csv')
with open('../data/' + args.dataset + '/' + args.dataset + '.csv') as f:
for idx, line in enumerate(f.readlines()):
if idx == 0:
continue
items = line.strip().split(',')
if args.dataset == 'BBBP':
smiles = items[-1]
pysmiles.read_smiles(smiles)
else:
raise ValueError('unknown dataset')
smiles_list.append(smiles)
res = [len(pybel.readstring('smi', s).OBMol.GetSSSR()) for s in smiles_list]
print('saving SSSR data to ../data/' + args.dataset + '/sssr.pkl')
with open('../data/' + args.dataset + '/sssr.pkl', 'wb') as f:
pickle.dump(res, f)
return res
def get_ged(args):
if os.path.exists('../data/' + args.dataset + '/ged_wrt_' + str(query_no) + '.pkl'):
print('loading GED data from ../data/' + args.dataset + '/ged_wrt_' + str(query_no) + '.pkl')
with open('../data/' + args.dataset + '/ged_wrt_' + str(query_no) + '.pkl', 'rb') as f:
res = pickle.load(f)
else:
smiles_list = []
print('processing ' + '../data/' + args.dataset + '/' + args.dataset + '.csv')
with open('../data/' + args.dataset + '/' + args.dataset + '.csv') as f:
for idx, line in enumerate(f.readlines()):
if idx == 0:
continue
items = line.strip().split(',')
if args.dataset == 'BBBP':
smiles = items[-1]
pysmiles.read_smiles(smiles)
else:
raise ValueError('unknown dataset')
smiles_list.append(smiles)
smiles2ged = calculate_ged_with_mp(smiles_list)
res = [smiles2ged[s] for s in smiles_list]
print('saving GED data to ../data/' + args.dataset + '/ged_wrt_' + str(query_no) + '.pkl')
with open('../data/' + args.dataset + '/ged_wrt_' + str(query_no) + '.pkl', 'wb') as f:
pickle.dump(res, f)
return res
def calculate_ged_with_mp(smiles_list):
print('calculating GED using multiprocessing')
n_cores, pool, range_list = get_params_for_mp(len(smiles_list))
dict_list = pool.map(calculate_ged, zip([smiles_list[i[0]: i[1]] for i in range_list], range(n_cores)))
print('gathering results')
res = {}
for d in dict_list:
res.update(d)
return res
def get_params_for_mp(n_pairs):
n_cores = mp.cpu_count()
pool = mp.Pool(n_cores)
avg = n_pairs // n_cores
range_list = []
start = 0
for i in range(n_cores):
num = avg + 1 if i < n_pairs - avg * n_cores else avg
range_list.append([start, start + num])
start += num
return n_cores, pool, range_list
def calculate_ged(inputs):
def node_match(n1, n2):
return n1['element'] == n2['element'] and n1['charge'] == n2['charge']
def edge_match(e1, e2):
return e1['order'] == e2['order']
res = {}
smiles_list, pid = inputs
for i, smiles in enumerate(smiles_list):
graph = pysmiles.read_smiles(smiles, zero_order_bonds=False)
ged = graph_edit_distance(
graph, query_graph, node_match=node_match, edge_match=edge_match, upper_bound=upper_bound, timeout=timeout)
res[smiles] = ged
print('pid %d: %d / %d' % (pid, i, len(smiles_list)))
print('pid %d done' % pid)
return res
def draw(args):
if args.subtask == 'reaction':
model = MolEFeaturizer('../saved/' + args.pretrained_model)
emb, _ = model.transform(['CCO', 'CC=O', 'CC(=O)-O',
'CCCCCCCCO', 'CCCCCCCC=O', 'CCCCCCCC(=O)O',
'OCCO', 'O=CC=O', 'OC(=O)C(=O)O'
])
emb = PCA(n_components=2).fit_transform(emb)
color = ['red', 'darkorange', 'blue']
plt.plot(emb[0, 0], emb[0, 1], marker='o', color='red', markerfacecolor='none', markersize=8)
plt.plot(emb[1, 0], emb[1, 1], marker='^', color='red', markerfacecolor='none', markersize=8)
plt.plot(emb[2, 0], emb[2, 1], marker='s', color='red', markerfacecolor='none', markersize=8)
plt.plot(emb[3, 0], emb[3, 1], marker='o', color='darkorange', markerfacecolor='none', markersize=8)
plt.plot(emb[4, 0], emb[4, 1], marker='^', color='darkorange', markerfacecolor='none', markersize=8)
plt.plot(emb[5, 0], emb[5, 1], marker='s', color='darkorange', markerfacecolor='none', markersize=8)
plt.plot(emb[6, 0], emb[6, 1], marker='o', color='blue', markerfacecolor='none', markersize=8)
plt.plot(emb[7, 0], emb[7, 1], marker='^', color='blue', markerfacecolor='none', markersize=8)
plt.plot(emb[8, 0], emb[8, 1], marker='s', color='blue', markerfacecolor='none', markersize=8)
plt.show()
# plt.savefig('visualization/' + args.subtask + '.pdf', bbox_inches='tight')
else:
data = PropertyPredDataset(args)
path = '../saved/' + args.pretrained_model + '/'
print('loading hyperparameters of pretrained model from ' + path + 'hparams.pkl')
with open(path + 'hparams.pkl', 'rb') as f:
hparams = pickle.load(f)
print('loading pretrained model from ' + path + 'model.pt')
mole = GNN(hparams['gnn'], hparams['layer'], hparams['feature_len'], hparams['dim'])
if torch.cuda.is_available():
mole.load_state_dict(torch.load(path + 'model.pt'))
mole = mole.cuda(args.gpu)
else:
mole.load_state_dict(torch.load(path + 'model.pt', map_location=torch.device('cpu')))
dataloader = GraphDataLoader(data, batch_size=args.batch_size)
emb = []
properties = []
with torch.no_grad():
mole.eval()
for graphs_batch, labels_batch in dataloader:
embeddings_batch = mole(graphs_batch)
emb.append(embeddings_batch)
properties.append(labels_batch)
emb = torch.cat(emb, dim=0).cpu().numpy()
properties = torch.cat(properties, dim=0).cpu().numpy()
if args.subtask == 'size':
n_quantiles = 4
sizes = [g.num_nodes() for g in data.graphs]
thresholds = [np.quantile(sizes, i / n_quantiles) for i in range(1, n_quantiles)]
labels = np.zeros_like(sizes)
for i, q in enumerate(thresholds):
labels[sizes >= q] = i + 1
legend = [r'1 $\leq$ size $<$ 18', r'18 $\leq$ size $<$ 23', r'23 $\leq$ size $<$ 28', r'28 $\leq$ size']
colors = ['lightskyblue', 'gold', 'darkorange', 'maroon']
elif args.subtask == 'property':
labels = properties
thresholds = [0.5]
legend = ['non-permeable', 'permeable']
colors = ['maroon', 'gold']
elif args.subtask == 'ged':
ged = get_ged(args)
ged = np.array([d if d is not None else upper_bound + 10 for d in ged])
thresholds = [30, 50]
labels = np.zeros_like(ged)
for i, q in enumerate(thresholds):
labels[ged >= q] = i + 1
legend = [r'1 $\leq$ GED $<$ 30', r'30 $\leq$ GED $<$ 50', r'50 $\leq$ GED']
colors = ['darkorange', 'lightskyblue', 'maroon']
elif args.subtask == 'ring':
ring_cnt = np.array(get_sssr(args))
thresholds = [1, 2, 3]
labels = np.zeros_like(ring_cnt)
for i, q in enumerate(thresholds):
labels[ring_cnt >= q] = i + 1
legend = [r'# rings $=$ 0', r'# rings $=$ 1', r'# rings $=$ 2', r'# rings $\geq$ 3']
colors = ['lightskyblue', 'gold', 'darkorange', 'maroon']
else:
raise ValueError('unknown subtask')
print('calculating TSNE embeddings')
tsne = TSNE(random_state=0).fit_transform(emb)
for i in range(len(thresholds) + 1):
plt.scatter(tsne[labels == i, 0], tsne[labels == i, 1], s=3, c=colors[i])
plt.legend(legend, loc='upper right', fontsize=9, ncol=1)
plt.show()
# plt.savefig('visualization/' + args.subtask + '.pdf', bbox_inches='tight')
|
# Implementation of Shell Sort algorithm in Python
def shellSort(arr):
interval = 1
# Initializes interval
while (interval < (len(arr) // 3)):
interval = (interval * 3) + 1
while (interval > 0):
for i in range(interval, len(arr)):
# Select val to be inserted
val = arr[i]
j = i
# Shift element right
while ((j > interval - 1) and (arr[j - interval] >= val)):
arr[j] = arr[j - interval]
j -= interval
# Insert val at hole position
arr[j] = val
# Calculate interval
interval = (interval - 1) / 3
l = [4, 1, 2, 5, 3]
print("Initial list: " + str(l))
shellSort(l)
print("Sorted list: " + str(l))
|
from pensieve import Pensieve
from chronometry.progress import ProgressBar
from silverware import Link
def get_special_data(wikipedia, name, echo=1):
"""
:type wikipedia: .Wikipedia.Wikipedia
:type name: str
:rtype: list[Pensieve]
"""
if name == 'country_pages':
page = wikipedia.get_page(url='https://en.wikipedia.org/wiki/List_of_national_capitals')
countries = list(page['tables'][0]['country'])
for one_or_more_links in countries:
if isinstance(one_or_more_links, list):
for link in one_or_more_links:
if isinstance(link, Link):
yield wikipedia.get_page(url=link.url)
elif isinstance(one_or_more_links, Link):
yield wikipedia.get_page(url=one_or_more_links.url)
if name == 'country_capital_pages':
page = wikipedia.get_page(url='https://en.wikipedia.org/wiki/List_of_national_capitals')
capitals = list(page['tables'][0]['city'])
for one_or_more_links in capitals:
if isinstance(one_or_more_links, list):
for link in one_or_more_links:
if isinstance(link, Link):
yield wikipedia.get_page(url=link.url)
elif isinstance(one_or_more_links, Link):
yield wikipedia.get_page(url=one_or_more_links.url)
if name == 'countries':
page = wikipedia.get_page(url='https://en.wikipedia.org/wiki/List_of_national_capitals')
table = page['tables'][0]
def row_to_pensieve(row):
pensieve = Pensieve(safe=False, warn_unsafe=False)
pensieve['capital'] = row['city'][0] if isinstance(row['city'], list) else row['city']
pensieve['country'] = row['country'][0] if isinstance(row['country'], list) else row['country']
pensieve.store(key='capital_name', precursors=['capital'], function=lambda x: x.text, evaluate=False)
pensieve.store(
key='capital_page', precursors=['capital'], function=lambda x: wikipedia.get_page(url=x.url), evaluate=False
)
pensieve.store(key='country_name', precursors=['country'], function=lambda x: x.text, evaluate=False)
pensieve.store(
key='country_page', precursors=['country'],
function=lambda x: wikipedia.get_page(url=x.url), evaluate=False
)
return pensieve
return list(ProgressBar.apply(function=row_to_pensieve, data=table, echo=echo))
|
# -*- coding: utf-8 -*-
import sys, os
import datetime, time, json, time, random
import argparse
import operator
import boto.ec2
import boto.iam
import boto3
import win32com.client as win
import getpass
from utils import get_local_refs
from logsetup import logger, log_event
TASK_FOLDER = "\\Drift"
PYTHON_PATH = r"c:\python27\python.exe"
ROOT_PATH = os.path.abspath(os.path.join(os.path.dirname(os.path.realpath(__file__)), "..\\"))
def get_run_task_name(ref):
name = "Run ref=%s,%s" % (ref[0], ref[1])
return name
def get_run_tasks(scheduler):
objTaskFolder = scheduler.GetFolder(TASK_FOLDER)
colTasks = objTaskFolder.GetTasks(1)
ret = [(t.Definition.Actions[0].Arguments.split("--ref=")[-1].split(" ")[0], t.Name.split("=")[-1].split(",")[-1]) for t in colTasks if t.Name.startswith('Run ref=')]
return set(ret)
def remove_ref_task(scheduler, ref):
logger.warning("Removing task for ref '%s'" % str(ref))
try:
rootFolder = scheduler.GetFolder(TASK_FOLDER)
task_id = get_run_task_name(ref)
task = rootFolder.GetTask(task_id)
logger.info(" Stopping task '%s'" % str(task_id))
task.Stop(0)
task.Enabled = False
time.sleep(5.0)
logger.info(" Deleting task '%s'" % str(task_id))
rootFolder.DeleteTask(task_id, 0)
logger.info(" Killing running processes ")
from daemon import kill_processes_by_ref
kill_processes_by_ref(ref[0], ref[1])
logger.info("Done removing task for ref '%s'" % str(ref))
except Exception as e:
logger.error("Exception occurred removing task: %s" % e)
def add_ref_task(scheduler, ref):
logger.warning("Adding task for ref '%s'" % str(ref))
rootFolder = scheduler.GetFolder(TASK_FOLDER)
action_id = get_run_task_name(ref)
action_path = PYTHON_PATH
action_arguments = os.path.join(ROOT_PATH, "run.py run --ref=%s --tenant=%s" % (ref[0], ref[1]))
action_workdir = ROOT_PATH
author = getpass.getuser()
description = "Automatically created task from Drift Config"
task_id = action_id.replace('/', '_')
TASK_CREATE_OR_UPDATE = 6
TASK_ACTION_EXEC = 0
TASK_RUN_NO_FLAGS = 0
taskDef = scheduler.NewTask(0)
colTriggers = taskDef.Triggers
taskDef.Principal.UserId="NT Authority\\SYSTEM"
taskDef.Principal.RunLevel=1
trigger = colTriggers.Create(1)
trigger.StartBoundary = datetime.datetime.now().strftime("%Y-%m-%dT%H:%M:%S")
trigger.Repetition.Interval = "PT1M"
trigger.Repetition.StopAtDurationEnd = False
trigger.Enabled = True
colActions = taskDef.Actions
action = colActions.Create(TASK_ACTION_EXEC)
action.ID = action_id
action.Path = action_path
action.WorkingDirectory = action_workdir
action.Arguments = action_arguments
info = taskDef.RegistrationInfo
info.Author = 'System'
info.Description = description
settings = taskDef.Settings
settings.Enabled = True
settings.Hidden = False
settings.ExecutionTimeLimit = 'PT0S' # PT0S means run forever
result = rootFolder.RegisterTaskDefinition(task_id, taskDef, TASK_CREATE_OR_UPDATE, "", "", TASK_RUN_NO_FLAGS)
# start the task immediately
task = rootFolder.GetTask(task_id)
runningTask = task.Run("")
logger.info("Task for ref '%s' is now running" % str(ref))
def update_tasks():
scheduler = win.Dispatch("Schedule.Service")
scheduler.Connect()
actual_refs = get_run_tasks(scheduler)
wanted_refs = get_local_refs()
print "Currently installed refs: %s" % ", ".join(["%s:%s" % (r[0], r[1]) for r in actual_refs])
print "I want to run the following refs: %s" % ", ".join(["%s:%s" % (r[0], r[1]) for r in wanted_refs])
if actual_refs == wanted_refs:
logger.info('Wanted refs match installed refs. Nothing to do.')
sys.exit(0)
refs_to_remove = actual_refs - wanted_refs
refs_to_add = wanted_refs - actual_refs
for ref in refs_to_remove:
remove_ref_task(scheduler, ref)
log_event("remove_ref_task", "Removed task for ref '%s'" % ref[0], ref=ref[0], tenant_name=ref[1])
for ref in refs_to_add:
add_ref_task(scheduler, ref)
log_event("add_ref_task", "Added task for ref '%s'" % ref[0], ref=ref[0], tenant_name=ref[1])
|
# ---------------------------------------------------------
# IOU Tracker
# Copyright (c) 2017 TU Berlin, Communication Systems Group
# Licensed under The MIT License [see LICENSE for details]
# Written by Erik Bochinski
# ---------------------------------------------------------
from time import time
from util import load_mot, iou
def track_iou(detections, sigma_l, sigma_h, sigma_iou, t_min):
"""
Simple IOU based tracker.
See "High-Speed Tracking-by-Detection Without Using Image Information
by E. Bochinski, V. Eiselein, T. Sikora" for
more information.
Args:
detections (list): list of detections per frame, usually generated by
util.load_mot
sigma_l (float): low detection threshold.
sigma_h (float): high detection threshold.
sigma_iou (float): IOU threshold.
t_min (float): minimum track length in frames.
Returns:
list: list of tracks.
"""
tracks_active = []
tracks_finished = []
for frame_num, detections_frame in enumerate(detections, start=1):
# apply low threshold to detections
dets = [det for det in detections_frame if det['score'] >= sigma_l]
updated_tracks = []
for track in tracks_active:
if len(dets) > 0:
# get det with highest iou
best_match = max(dets, key=lambda x: iou(track['bboxes'][-1],
x['bbox']))
if iou(track['bboxes'][-1], best_match['bbox']) >= sigma_iou:
track['bboxes'].append(best_match['bbox'])
track['max_score'] = max(track['max_score'],
best_match['score'])
track['label'] = best_match['label']
updated_tracks.append(track)
# remove from best matching detection from detections
del dets[dets.index(best_match)]
# if track was not updated
if len(updated_tracks) == 0 or track is not updated_tracks[-1]:
# finish track when the conditions are met
if track['max_score'] >= sigma_h and len(
track['bboxes']) >= t_min:
tracks_finished.append(track)
# create new tracks
new_tracks = [{'bboxes': [det['bbox']], 'max_score': det['score'],
'start_frame': frame_num} for det in dets]
tracks_active = updated_tracks + new_tracks
# finish all remaining active tracks
tracks_finished += [track for track in tracks_active
if track['max_score'] >= sigma_h and len(
track['bboxes']) >= t_min]
return tracks_finished
def track_iou_matlab_wrapper(detections, sigma_l, sigma_h, sigma_iou, t_min):
"""
Matlab wrapper of the iou tracker for the detrac evaluation toolkit.
Args:
detections (numpy.array): numpy array of detections, usually supplied by run_tracker.m
sigma_l (float): low detection threshold.
sigma_h (float): high detection threshold.
sigma_iou (float): IOU threshold.
t_min (float): minimum track length in frames.
Returns:
float: speed in frames per second.
list: list of tracks.
"""
detections = detections.reshape((7, -1)).transpose()
dets = load_mot(detections)
start = time()
tracks = track_iou(dets, sigma_l, sigma_h, sigma_iou, t_min)
end = time()
id_ = 1
out = []
for track in tracks:
for i, bbox in enumerate(track['bboxes']):
out += [float(bbox[0]), float(bbox[1]), float(bbox[2] - bbox[0]),
float(bbox[3] - bbox[1]),
float(track['start_frame'] + i), float(id_)]
id_ += 1
num_frames = len(dets)
speed = num_frames / (end - start)
return speed, out
|
from typing import List
class Solution:
def getRow(self, rowIndex: int) -> List[int]:
result = [0] * (rowIndex + 1)
result[0] = 1
for i in range(1, rowIndex + 1):
for j in range(i, 0, -1):
result[j] += result[j - 1]
return result |
import i3
workspaces = i3.get_workspaces()
for workspace in workspaces:
if workspace['focused']:
if workspace["name"] != "1":
i3.command('move', 'container to workspace number ' + str(int(workspace["name"])-1))
else:
i3.command('move', 'container to workspace number 10')
|
import Orange
data = Orange.data.Table("lenses")
print("Attributes:", ", ".join(x.name for x in data.domain.attributes))
print("Class:", data.domain.class_var.name)
print("Data instances", len(data))
target = "soft"
print("Data instances with %s prescriptions:" % target)
atts = data.domain.attributes
for d in data:
if d.get_class() == target:
print(" ".join(["%14s" % str(d[a]) for a in atts]))
|
import tempfile
from pathlib import PosixPath
import pytest
from django.test import TestCase
from mock import Mock
from model_mommy import mommy
from miseq_portal.miseq_viewer.models import *
pytestmark = pytest.mark.django_db
def test_validate_sample_id():
assert validate_sample_id('BMH-2017-000001') is True
with pytest.raises(ValidationError):
assert validate_sample_id('VALIDATION_ERROR')
with pytest.raises(ValidationError):
assert validate_sample_id('AAAA-2017-000001')
with pytest.raises(ValidationError):
assert validate_sample_id('BMH-TEST-000001')
with pytest.raises(ValidationError):
assert validate_sample_id('BMH-2017-AAAAAA')
def test_upload_run_file():
run = Mock(spec=Run)
run.id = 1
run.run_id = "MOCK_RUN_01"
run.run_type = 'BMH'
filename = 'test_file'
assert upload_run_file(run, filename) == f'uploads/runs/{run.run_id}/{filename}'
def test_upload_interop_file():
run = Mock(spec=Run)
run.id = 1
run.run_id = "MOCK_RUN_01"
run.run_type = "BMH"
runinterop = Mock(spec=RunInterOpData)
runinterop.id = 1
runinterop.run_id = run
filename = 'test_file'
assert upload_interop_file(runinterop, filename) == f'uploads/runs/{runinterop.run_id}/InterOp/{filename}'
def test_upload_interop_dir():
run = Mock(spec=Run)
run.id = 1
run.run_id = "MOCK_RUN_01"
run.run_type = "BMH"
assert upload_interop_dir(run) == f'uploads/runs/{run.run_id}/InterOp/'
def test_upload_reads():
# Run mock
run = Mock(spec=Run)
run.id = 1
run.run_id = "MOCK_RUN_01"
filename = "test_file"
# BMH sample mock
sample1 = Mock(spec=Sample)
sample1.id = 1
sample1.run_id = run
sample1.sample_type = 'BMH'
sample1.sample_id = "BMH-2017-000001"
# MER sample mock
sample2 = Mock(spec=Sample)
sample2.id = 2
sample2.sample_type = 'MER'
sample2.sample_id = "MER-2017-000001"
assert upload_reads(sample1, filename) == f'uploads/runs/{sample1.run_id}/{sample1.sample_id}/{filename}'
assert upload_reads(sample2, filename) == f'merged_samples/{sample2.sample_id}/{filename}'
def test_upload_assembly():
filename = "test_file"
# Run mock
run = Mock(spec=Run)
run.id = 1
run.run_id = "MOCK_RUN_01"
# BMH sample mock
sample1 = Mock(spec=Sample)
sample1.id = 1
sample1.run_id = run
sample1.sample_type = 'BMH'
sample1.sample_id = "BMH-2017-000001"
# MER sample mock
sample2 = Mock(spec=Sample)
sample2.id = 2
sample2.sample_type = 'MER'
sample2.sample_id = "MER-2017-000001"
# Mock SampleAssemblyData
assembly1 = Mock(spec=SampleAssemblyData)
assembly1.sample_id = sample1
assembly2 = Mock(spec=SampleAssemblyData)
assembly2.sample_id = sample2
assert upload_assembly(assembly1,
filename) == f'uploads/runs/{assembly1.sample_id.run_id}/{assembly1.sample_id}/assembly/{filename}'
assert upload_assembly(assembly2, filename) == f'merged_samples/{assembly2.sample_id}/assembly/{filename}'
class ProjectTest(TestCase):
@staticmethod
def test_project_creation():
proj = mommy.make(Project)
assert isinstance(proj, Project)
assert proj.__str__() == proj.project_id
class UserProjectRelationshipTest(TestCase):
@staticmethod
def test_user_project_relationship_creation():
rel = mommy.make(UserProjectRelationship)
assert isinstance(rel, UserProjectRelationship)
assert rel.__str__() == str(rel.project_id) + ':' + str(rel.user_id)
class RunTest(TestCase):
def setUp(self):
self.run = mommy.make(Run)
self.test_path = '/test/path'
self.run.interop_directory_path = self.test_path
def test_project_creation(self):
assert isinstance(self.run, Run)
assert self.run.__str__() == self.run.run_id
def test_get_interop_directory(self):
assert type(self.run.get_interop_directory()) is PosixPath
assert str(self.run.get_interop_directory()) == self.test_path
class RunInterOpDataTest(TestCase):
@staticmethod
def test_run_inter_op_data_creation():
data = mommy.make(RunInterOpData)
assert isinstance(data, RunInterOpData)
assert data.__str__() == str(data.run_id) + '_InterOp'
class MergedSampleComponentGroupTest(TestCase):
@staticmethod
def test_merged_sample_component_group_creation():
component_group = mommy.make(MergedSampleComponentGroup)
assert isinstance(component_group, MergedSampleComponentGroup)
assert component_group.__str__() == f"MergedSampleComponentGroup ({str(component_group.pk)})"
class SampleTest(TestCase):
def setUp(self):
self.bmh_sample = mommy.make(Sample)
self.bmh_sample.sample_id = 'BMH-2017-000001'
self.bmh_sample.sample_name = 'test_sample_name'
self.bmh_sample.sample_type = 'BMH'
self.mer_sample = mommy.make(Sample)
self.mer_sample.sample_name = 'test_sample_name'
self.mer_sample.sample_type = 'MER'
self.mer_sample.sample_id = self.mer_sample.generate_sample_id()
def test_sample_creation(self):
assert isinstance(self.bmh_sample, Sample)
assert self.bmh_sample.__str__() == self.bmh_sample.sample_id
def test_sample_year(self):
assert self.bmh_sample.sample_year == str(self.bmh_sample.created.year)
def test_generate_sample_id(self):
assert self.mer_sample.sample_id == \
f'{self.mer_sample.sample_type}-{self.mer_sample.sample_year}-{self.mer_sample.pk:06}'
class MergedSampleComponentTest(TestCase):
@staticmethod
def test_merged_sample_component_creation():
component = mommy.make(MergedSampleComponent)
assert isinstance(component, MergedSampleComponent)
assert component.__str__() == f"{component.component_id} ({component.group_id})"
class SampleLogDataTest(TestCase):
def setUp(self):
self.sample_log_data = mommy.make(SampleLogData)
self.sample_log_data.sample_yield = 1000000
def test_sample_log_data_creation(self):
assert isinstance(self.sample_log_data, SampleLogData)
assert self.sample_log_data.__str__() == str(self.sample_log_data.sample_id)
def test_sample_yield_mbp(self):
assert self.sample_log_data.sample_yield_mbp == float(self.sample_log_data.sample_yield / 1000000)
class SampleAssemblyDataTest(TestCase):
def setUp(self):
self.sample_assembly_data = mommy.make(SampleAssemblyData)
def test_sample_assembly_data_creation(self):
assert isinstance(self.sample_assembly_data, SampleAssemblyData)
assert self.sample_assembly_data.__str__() == str(self.sample_assembly_data.sample_id)
def test_get_assembly_path(self):
assembly = tempfile.NamedTemporaryFile(mode='w+', encoding='utf-8')
assembly.write(">TEST\nATCG")
assembly.flush()
self.sample_assembly_data.assembly = str(assembly.name)
assert type(self.sample_assembly_data.get_assembly_path()) == PosixPath
def test_assembly_exists(self):
assert self.sample_assembly_data.assembly_exists() is False
# Create assembly then check if fn returns True
assembly = tempfile.NamedTemporaryFile(mode='w+', encoding='utf-8')
assembly.write(">TEST\nATCG")
assembly.flush()
self.sample_assembly_data.assembly = str(assembly.name)
assert self.sample_assembly_data.assembly_exists() is True
|
from bunny_storm import RabbitMQConnectionData
def test_connection_data_creation() -> None:
# Arrange
expected_user = "user"
expected_pass = "pass"
expected_host = "8.8.8.8"
default_port = 5672
default_vhost = "/"
expected_uri = f"amqp://{expected_user}:{expected_pass}@{expected_host}:{default_port}/"
# Act
connection_data = RabbitMQConnectionData(username=expected_user, password=expected_pass, host=expected_host)
# Assert
assert connection_data.username == expected_user
assert connection_data.password == expected_pass
assert connection_data.host == expected_host
assert connection_data.port == default_port
assert connection_data.virtual_host == default_vhost
assert connection_data.uri() == expected_uri
|
#!/usr/bin/env python3
"""Convert person detections from the patched Darknet output to a pickle format.
The patched Darknet output is like:
Enter Image Path: /some/path1.jpg: Predicted in 0.035027 seconds.
cell phone: 12.924%
Box (LTWH): 1319,367,75,120
car: 86.035%
truck: 13.739%
Box (LTWH): 1799,345,79,47
Enter Image Path: /some/path2.jpg: Predicted in 0.035093 seconds.
cell phone: 14.358%
Box (LTWH): 1320,367,1333,382
So per image it outputs multiple boxes, and for each box multiple class labels, each with its own
confidence.
"""
import argparse
import logging
import os.path
import pickle
import re
import sys
import numpy as np
def main():
flags = initialize()
logging.info(f'Opening {flags.in_path}')
with open(flags.in_path, 'r') as f:
darknet_output_text = f.read()
detections_per_image = {}
if flags.relpath_components != 'auto':
flags.relpath_components = int(flags.relpath_components)
elif not flags.root_dir:
matches = re.finditer(IMAGE_REGEX, darknet_output_text, flags=re.MULTILINE | re.DOTALL)
paths = [m['path'] for m in matches]
flags.root_dir = os.path.commonprefix(paths)
for m_image in re.finditer(IMAGE_REGEX, darknet_output_text, flags=re.MULTILINE | re.DOTALL):
relative_image_path = get_relpath(m_image['path'], flags)
detections_per_image.setdefault(relative_image_path, [])
for m_object in re.finditer(OBJECT_REGEX, m_image['objects'], flags=re.MULTILINE):
if m_object is None:
continue
bbox = m_object_to_bbox(m_object)
if not is_shape_plausible(bbox):
continue
for m_class in re.finditer(CLASS_REGEX, m_object['classes'], flags=re.MULTILINE):
if m_class['classname'] == 'person':
confidence = float(m_class['conf']) / 100
bbox_with_confidence = [*bbox, confidence]
detections_per_image[relative_image_path].append(bbox_with_confidence)
if not detections_per_image[relative_image_path]:
logging.warning(f'No detections for {relative_image_path}, {m_image["path"]}')
for relative_image_path, detections in detections_per_image.items():
if not detections:
logging.warning(f'No detections for {relative_image_path}')
logging.info(f'Number of images: {len(detections_per_image)}')
n_images_without_detections = len([1 for x in detections_per_image.values() if not x])
logging.info(f'Number of images without detections: {n_images_without_detections}')
n_detections = sum(len(v) for v in detections_per_image.values())
logging.info(f'Total number of detections: {n_detections}')
logging.info(f'Saving file to {flags.out_path}')
with open(flags.out_path, 'wb') as f:
pickle.dump(detections_per_image, f, protocol=pickle.HIGHEST_PROTOCOL)
def get_relpath(p, flags):
if flags.root_dir:
return os.path.relpath(p, flags.root_dir)
return last_path_components(p, flags.relpath_components)
def m_object_to_bbox(m_object):
x1, y1, w, h = [int(x) for x in re.findall(r'\d+', m_object['coords'])]
return np.array([x1, y1, w, h])
def is_shape_plausible(bbox):
x, y, w, h = bbox
aspect_ratio = w / h
return w > 30 and 1 / 15 < aspect_ratio < 15
def initialize():
parser = argparse.ArgumentParser()
parser.add_argument('--in-path', type=str, required=True)
parser.add_argument('--out-path', type=str, default=None)
parser.add_argument('--loglevel', type=str, default='error')
parser.add_argument('--relpath-components', default='auto')
parser.add_argument('--root-dir')
flags = parser.parse_args()
if flags.out_path is None:
flags.out_path = flags.in_path.replace('.txt', '.pickle')
loglevel = dict(error=30, warning=20, info=10)[flags.loglevel]
simple_formatter = logging.Formatter('{asctime}-{levelname:^1.1} -- {message}', style='{')
print_handler = logging.StreamHandler(sys.stdout)
print_handler.setLevel(loglevel)
print_handler.setFormatter(simple_formatter)
logging.basicConfig(level=loglevel, handlers=[print_handler])
return flags
def split_path(path):
return os.path.normpath(path).split(os.path.sep)
def last_path_components(path, n_components):
components = split_path(path)
return os.path.sep.join(components[-n_components:])
IMAGE_REGEX = r"""(Enter Image Path: )*Enter Image Path: (?P<path>.+?): Predicted in .+? seconds\.
(?P<objects>.*?)(?=Enter)"""
OBJECT_REGEX = r"""(?P<classes>(?:.+?: .+?%
)+)Box \(LTWH\): (?P<coords>.+?)
"""
CLASS_REGEX = r"""(?P<classname>.+?): (?P<conf>.+)%"""
if __name__ == '__main__':
main()
|
import numpy as np
import histoptimizer
name = 'enumerate'
def partition_generator(num_items: int, num_buckets: int) -> list:
"""
Given a number of items `num_items` and a number of buckets `num_buckets`, enumerate lists of all the possible
combinations of divider locations that partition `num_items` into `num_buckets`.
The strategy is to start at the enumeration that has each divider in its left-most possible location, and then
iterate all possible locations of the last (right-most) divider before incrementing the next-to-last and again
iterating all possible locations of the last divider.
When there are no more valid locations for the next-to-last divider, then the previous divider is incremented and
the process repeated, and so on until the first divider and all subsequent dividers are in their largest
(right-most) possible locations.
"""
num_dividers = num_buckets - 1
last_divider = num_dividers - 1
partition = [x for x in range(1, num_dividers+1)] # Start with the first valid partition.
last_partition = [x for x in range(num_items - num_dividers, num_items)] # We know what the last partition is.
current_divider = last_divider
# Deal with single-divider/two-bucket case
if num_dividers == 1:
for last_location in range(1, num_items):
partition[0] = last_location
yield partition
return
while True:
if current_divider == last_divider:
for last_location in range(partition[current_divider-1] + 1, num_items):
partition[last_divider] = last_location
yield partition
if partition == last_partition:
return
# partition[last_divider] = 0
current_divider -= 1
else:
if partition[current_divider] == 0:
partition[current_divider] = partition[current_divider-1] + 1
current_divider += 1
elif partition[current_divider] < (num_items - (last_divider - current_divider)):
partition[current_divider] += 1
current_divider += 1
else:
for divider in range(current_divider, num_dividers):
partition[divider] = 0
current_divider -= 1
# if this is the last divider, then loop through all possible values yielding each
# then decrease the current divider location and set an increment flag
# if not last divider:
# check the current location of the current divider
# if it is zero, set to the minimum valid value (previous divider location + 1)
# elif it is less than the max location value, increment it and move to the next divider location
# elif it is at the max location value, then set it and all subsequent location values to 0
# and move to previous divider.
# End loop when all dividers are set at their last possible locations.
def partition(items, num_buckets, debug_info=None, mean=None):
min_variance = np.inf
best_partition = None
n = len(items)
if mean is None:
mean = sum(items) / num_buckets
prefix_sums = [0]*len(items)
prefix_sums[0] = items[0]
for i in range(1, len(items)):
prefix_sums[i] = prefix_sums[i-1] + items[i]
previous_dividers = [0] * (num_buckets - 1)
variances = [0.0] * num_buckets
# partitition_sums = [0.0] * num_buckets
for dividers in partition_generator(n, num_buckets):
divider_index = 0
variance = 0.0
# Most of the time, only one divider location has changed.
# Retain the previous prefix sums and variances to save time.
# If there are only two buckets, the single divider location has always changed.
while num_buckets > 2 and (dividers[divider_index] == previous_dividers[divider_index]):
divider_index += 1
for partition_index in range(0, num_buckets):
if divider_index - 1 >= partition_index:
pass # variances[partition_index] already contains the correct value from the previous iteration.
elif partition_index == 0:
variances[0] = (prefix_sums[dividers[0] - 1] - mean)**2
elif partition_index == (num_buckets - 1):
variances[partition_index] = (prefix_sums[-1] - prefix_sums[dividers[-1] - 1] - mean) ** 2
else:
variances[partition_index] = (
(prefix_sums[dividers[partition_index] - 1] - prefix_sums[dividers[partition_index - 1] - 1] - mean) ** 2)
variance += variances[partition_index]
if variance < min_variance:
min_variance = variance
best_partition = dividers[:]
previous_dividers[:] = dividers[:]
return np.array(best_partition), min_variance / num_buckets
|
#encoding=utf8
import os
from wox import Wox,WoxAPI
from datetime import date
#Your class must inherit from Wox base class https://github.com/qianlifeng/Wox/blob/master/PythonHome/wox.py
#The wox class here did some works to simplify the communication between Wox and python plugin.
class Main(Wox):
obsidian_vault =os.path.join("P:\\", "obsidian")
journal_folder = "Journal"
timing_format="%Y-%m-%d"
journalfile = os.path.join(obsidian_vault, journal_folder, date.today().strftime(timing_format)+'.md')
message=""
# A function named query is necessary, we will automatically invoke this function when user query this plugin
# query is default function to receive realtime keystrokes from wox launcher
def query(self,query):
# results has a confirmation where key was added
results = []
results.append({
"Title": "Obsidian Journal",
"SubTitle": "Append to Journal today: {}".format(query),
"IcoPath":"Images/obsidian_logo.png",
"ContextData": "ctxdata",
"JsonRPCAction": {
'method': 'take_action',
'parameters': ["{}".format(query)],
'dontHideAfterAction': True
}
})
return results
# context_menu is default function called for ContextData where `data = ctxData`
def context_menu(self, data):
results = []
results.append({
"Title": "Context menu entry",
"SubTitle": "Data: {}".format(data),
"IcoPath":"Images/obsidian_logo.png"
})
return results
def take_action(self, SomeArgument):
if os.path.isfile(Main.journalfile):
try:
f = open(Main.journalfile, "a")
f.write(SomeArgument + "\n")
except IOError:
print("File not accessible")
finally:
f.close()
else:
try:
f = open(Main.journalfile, "w")
f.write(SomeArgument + "\n")
except IOError:
print("File not accessible")
finally:
f.close()
return None
#Following statement is necessary
if __name__ == "__main__":
Main() |
import xlrd
import os
import sys
# rootdir = 'D:/工作/code/electric/'
rootdir = sys.argv[1]
xlrd.Book.encoding = "gbk"
sumnum=0
filenum = 0
list = os.listdir(rootdir) #列出文件夹下所有的目录与文件
for i in range(0,len(list)):
path = os.path.join(rootdir,list[i])
if os.path.isfile(path):
print('正在处理:'+path)
data = xlrd.open_workbook(path)
table = data.sheet_by_index(0)
# table = data.sheet_by_name(u'Sheet1')
nrows = table.nrows
data.release_resources()
sumnum=sumnum+nrows
filenum=filenum+1
print('-------------------------------------------------------------------------')
print('共有%d个文件'%filenum)
print('共有%d行记录'%sumnum)
|
#!/usr/bin/env python
import sys
import subprocess
import dbus
import string
import os
import fcntl
import glib
import gobject
import dbus.service
import dbus.mainloop.glib
DBUS_NAME = 'org.openbmc.HostIpmi'
OBJ_NAME = '/org/openbmc/HostIpmi/1'
def header(seq, netfn, lun, cmd):
return (
'seq: 0x%02x\nnetfn: 0x%02x\n\nlun: 0x%02d\ncmd: 0x%02x\n') % (
seq, netfn, lun, cmd)
def print_request(seq, netfn, lun, cmd, data):
str = header(seq, netfn, lun, cmd)
str += 'data: [%s]' % ', '.join(['0x%02x' % x for x in data])
print str
def print_response(seq, netfn, lun, cmd, cc, data):
str = header(seq, netfn, lun, cmd)
str += 'cc: 0x%02x\ndata: [%s]' % (
cc, ', '.join(['0x%02x' % x for x in data])
)
print str
class IpmiDebug(dbus.service.Object):
def __init__(self, bus, name):
dbus.service.Object.__init__(self, bus, name)
@dbus.service.signal(DBUS_NAME, "yyyyay")
def ReceivedMessage(self, seq, netfn, lun, cmd, data):
print "IPMI packet from host:"
print_request(seq, netfn, lun, cmd, data)
@dbus.service.method(DBUS_NAME, "yyyyyay", "x")
def sendMessage(self, seq, netfn, lun, cmd, ccode, data):
print "IPMI packet sent to host:"
print_response(seq, netfn, lun, cmd, ccode, data)
return 0
@dbus.service.method(DBUS_NAME)
def setAttention(self):
print "IPMI SMS_ATN set"
class ConsoleReader(object):
def __init__(self, ipmi_obj):
self.buffer = ''
self.seq = 0
self.ipmi_obj = ipmi_obj
flags = fcntl.fcntl(sys.stdin.fileno(), fcntl.F_GETFL)
flags |= os.O_NONBLOCK
fcntl.fcntl(sys.stdin.fileno(), fcntl.F_SETFL, flags)
glib.io_add_watch(sys.stdin, glib.IO_IN, self.io_callback)
def io_callback(self, fd, condition):
chunk = fd.read()
for char in chunk:
self.buffer += char
if char == '\n':
self.line(self.buffer)
self.buffer = ''
return True
def line(self, data):
s = data.split(' ')
if len(s) < 2:
print "Not enough bytes to form a valid IPMI packet"
return
try:
data = [int(c, 16) for c in s]
except ValueError:
return
self.seq += 1
self.ipmi_obj.ReceivedMessage(self.seq, data[0], 0, data[1], data[2:])
def main():
dbus.mainloop.glib.DBusGMainLoop(set_as_default=True)
bus = dbus.SystemBus()
obj = IpmiDebug(bus, OBJ_NAME)
mainloop = gobject.MainLoop()
r = ConsoleReader(obj)
obj.unmask_signals()
name = dbus.service.BusName(DBUS_NAME, bus)
print ("Enter IPMI packet as hex values. First three bytes will be used"
"as netfn and cmd.\nlun will be zero.")
mainloop.run()
if __name__ == '__main__':
sys.exit(main())
# vim: tabstop=8 expandtab shiftwidth=4 softtabstop=4
|
import os
import pickle
import numpy as np
import torch
import torch.nn.functional as F
from torch.optim import Adam
from torch.utils.data import DataLoader
from molgrad.net import MPNNPredictor
from molgrad.net_utils import GraphData, collate_pair
from molgrad.utils import DATA_PATH, MODELS_PATH, LOG_PATH
from molgrad.train import (
N_MESSPASS,
BATCH_SIZE,
INITIAL_LR,
N_EPOCHS,
DEVICE,
NUM_WORKERS,
TASK_GUIDE,
rmse,
train_loop,
)
if __name__ == "__main__":
for data in TASK_GUIDE.keys():
print(f'Now training production model for {data} dataset...')
if TASK_GUIDE[data] == "regression":
loss_fn = F.mse_loss
elif TASK_GUIDE[data] == "binary":
loss_fn = F.binary_cross_entropy_with_logits
else:
raise ValueError("Task not supported")
# public training
with open(os.path.join(DATA_PATH, f"{data}", f"data_{data}.pt"), "rb") as handle:
inchis, values = pickle.load(handle)
inchis = np.array(inchis)
values = np.array(values)[:, np.newaxis]
mask = np.array([True for l in range(values.shape[0])])[:, np.newaxis]
data_train = GraphData(inchis, values, mask, add_hs=False)
sample_item = data_train[0]
a_dim = sample_item[0].ndata["feat"].shape[1]
e_dim = sample_item[0].edata["feat"].shape[1]
g_dim = len(sample_item[1])
loader_train = DataLoader(
data_train,
batch_size=BATCH_SIZE,
shuffle=True,
collate_fn=collate_pair,
num_workers=NUM_WORKERS,
)
model = MPNNPredictor(
node_in_feats=a_dim,
edge_in_feats=e_dim,
global_feats=g_dim,
n_tasks=values.shape[1],
num_step_message_passing=N_MESSPASS,
output_f=None,
).to(DEVICE)
opt = Adam(model.parameters(), lr=INITIAL_LR)
train_losses = []
for epoch_no in range(N_EPOCHS):
print("Train epoch {}/{}...".format(epoch_no + 1, N_EPOCHS))
t_l = train_loop(loader_train, model, loss_fn, opt)
train_losses.extend(t_l)
os.makedirs(MODELS_PATH, exist_ok=True)
torch.save(model.state_dict(), os.path.join(MODELS_PATH, f"{data}_noHs.pt"))
os.makedirs(LOG_PATH, exist_ok=True)
np.save(os.path.join(LOG_PATH, f'{data}_noHs.pt'), arr=train_losses)
|
import pytest
from sqlalchemy import or_
from galaxy_crawler.models import v1 as models
from .base import ModelTestBase, create_session, create_ns, \
create_provider, create_provider_ns, create_platform, \
create_tag, create_repository
class TestRoleModel(ModelTestBase):
def setup_method(self):
super(TestRoleModel, self).setup_method()
sess = create_session(self.engine)
create_provider(sess)
ns = create_ns(sess)
provider_ns = create_provider_ns(sess, namespace_id=ns.namespace_id)
create_repository(sess, provider_ns_id=provider_ns.provider_namespace_id)
create_platform(sess)
for i, name in enumerate(["development", "system", "web"]):
create_tag(sess, i, name)
sess.commit()
@pytest.mark.parametrize(
"role_json", [
{
"id": 1,
"summary_fields": {
"dependencies": [],
"namespace": {
"id": 1,
"name": "ns",
"avatar_url": "https://example.com/avatar",
"location": "Example Location",
"company": "Example Company",
"email": None,
"html_url": "https://example.com/test",
"is_vendor": False
},
"platforms": [
{
"name": "Ubuntu",
"release": "bionic"
}
],
"provider_namespace": {
"id": 1,
"name": "test"
},
"repository": {
"id": 1,
"name": "test",
"original_name": "test",
"stargazers_count": 10,
"watchers_count": 10,
"forks_count": 10,
"open_issues_count": 10,
"travis_status_url": "https://travis-ci.org/example",
"travis_build_url": "https://travis-ci.org/example",
"format": "role",
"deprecated": False,
"community_score": 3.5,
"quality_score": 5.0,
"community_survey_count": 5
},
"tags": [
"development",
"system",
"web"
],
"versions": [
{
"id": 1,
"name": "1.0.0",
"release_date": "2018-01-23T00:00:00Z"
},
]
},
"created": "2014-01-23T00:00:00.000000Z",
"modified": "2019-01-23T01:23:45.000000Z",
"name": "test",
"role_type": "ANS",
"is_valid": True,
"min_ansible_version": "2.4",
"license": "license (BSD, MIT)",
"company": "Example",
"description": "Test",
"travis_status_url": "https://travis-ci.org/example",
"download_count": 100,
"imported": "2019-01-23T00:00:00.000000-04:00",
"active": True,
"github_user": "test",
"github_repo": "test-role",
"github_branch": "master",
"stargazers_count": 10,
"forks_count": 0,
"open_issues_count": 10,
"commit": "b380413513177006b9641fd7ff960ea7d1051942",
"commit_message": "Test",
"commit_url": "https://example.com/commit",
"issue_tracker_url": "https://example.com/issues"
},
]
)
def test_insert(self, role_json):
sess = create_session(self.engine)
role = models.Role.from_json(role_json, sess) # type: models.Role
sess.add(role)
sess.commit()
assert role.role_id == role_json.get("id")
assert role.namespace.name == \
role_json['summary_fields']['namespace']['name']
assert role.repository.name == \
role_json['summary_fields']['repository']['name']
assert {l.name for l in role.licenses} == \
{"BSD", "MIT"}
assert {p.name for p in role.platforms} == \
{"Ubuntu"}
assert {v.name for v in role.versions} == \
{"1.0.0"}
|
# Copyright 2017 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License").
# You may not use this file except in compliance with the License.
# A copy of the License is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is distributed
# on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
# express or implied. See the License for the specific language governing
# permissions and limitations under the License.
"""Top-level module for sagemaker_pyspark
"""
from .wrapper import SageMakerJavaWrapper, Option
from .IAMRoleResource import IAMRole, IAMRoleFromConfig
from .SageMakerClients import SageMakerClients
from .S3Resources import S3DataPath, S3Resource, S3AutoCreatePath
from .NamePolicy import RandomNamePolicy, RandomNamePolicyFactory, \
CustomNamePolicy, CustomNamePolicyFactory, \
CustomNamePolicyWithTimeStampSuffix, \
CustomNamePolicyWithTimeStampSuffixFactory
from .SageMakerEstimator import EndpointCreationPolicy, SageMakerEstimator, SageMakerEstimatorBase
from .SageMakerModel import SageMakerModel
from .SageMakerResourceCleanup import SageMakerResourceCleanup, CreatedResources
def classpath_jars():
"""Returns a list with the paths to the required jar files.
The sagemakerpyspark library is mostly a wrapper of the scala sagemakerspark sdk and it
depends on a set of jar files to work correctly. This function retrieves the location
of these jars in the local installation.
Returns:
List of absolute paths.
"""
import pkg_resources
pkg_dir = __name__
jars_dir = "/jars/"
bundled_jars = pkg_resources.resource_listdir(pkg_dir, jars_dir)
jars = [pkg_resources.resource_filename(pkg_dir, jars_dir + jar) for jar in bundled_jars]
return jars
__all__ = ['SageMakerJavaWrapper',
'IAMRole',
'IAMRoleFromConfig',
'SageMakerClients',
'SageMakerModel',
'S3DataPath',
'S3AutoCreatePath',
'S3Resource',
'EndpointCreationPolicy',
'SageMakerEstimator',
'SageMakerEstimatorBase',
'Option',
'RandomNamePolicy',
'RandomNamePolicyFactory',
'CustomNamePolicy',
'CustomNamePolicyFactory',
'CustomNamePolicyWithTimeStampSuffix',
'CustomNamePolicyWithTimeStampSuffixFactory',
'classpath_jars',
'SageMakerResourceCleanup',
'CreatedResources',
]
|
from numpy.lib.function_base import disp
from transformers import *
import os
import torch
import json
import numpy as np
from parallel_model import MemeDialoGPT
from dataset import MODDataset, get_data
from utils import accuracy_compute, AverageMeter, meme_classify_accuracy
import torch.distributed as dist
# from apex import amp
# from apex.parallel import convert_syncbn_model
# from apex.parallel import DistributedDataParallel
from argparse import ArgumentParser
from torch.utils.data import DataLoader
import random
from utils import get_logger, try_create_dir
import logging
SPECIAL_TOKENS = [
'[BOS]', '[EOS]', '[speaker1]', '[speaker2]', '[IMG]', '[TAG]', '[PAD]'
]
SPECIAL_TOKENS_DICT = {
'bos_token': '[BOS]',
'eos_token': '[EOS]',
'additional_special_tokens':
['[speaker1]', '[speaker2]', '[IMG]', '[TAG]'],
'pad_token': '[PAD]'
}
data_dir = '../../data'
train_data_path = os.path.join(data_dir, 'dialog/train.json')
# train_data_path = 'debug.json'
val_data_path = os.path.join(data_dir, 'dialog/validation.json')
# val_data_path = 'debug.json'
feature_path = os.path.join(data_dir, 'meme/id2feature.json')
# model parameters
use_cuda = torch.cuda.is_available()
device = torch.device('cuda' if use_cuda else 'cpu')
model_path = 'ckpt/mod_gpt'
# gpt_path = 'ckpt/origin_gpt'
try_create_dir(model_path)
gpt_path = 'ckpt/gpt2-chinese-cluecorpussmall'
ckpt_usage = False
ckpt_path = './ckpt/mod_gpt/epoch_0_loss_10.701'
start_epoch = 0
lr = 6e-5
epochs = 35
gradient_accumulation_steps = 8
print_freq = 100
logger = get_logger(__name__)
logger.info(f"device:{device}")
logger.debug(f"torch version:{torch.__version__}")
def main():
parser = ArgumentParser()
parser.add_argument("--local_rank",
type=int,
default=0,
help='-1 if not distributed')
parser.add_argument("--fp16",
type=int,
default=0,
help='O0, O1, O2, or O3')
args = parser.parse_args()
random.seed(0)
torch.manual_seed(0)
np.random.seed(0)
if args.local_rank != -1:
dist.init_process_group(backend='nccl', init_method='env://')
torch.cuda.set_device(args.local_rank)
map_location = "cuda:" + str(args.local_rank)
# model initialize
if ckpt_usage == True:
tokenizer = BertTokenizer.from_pretrained('ckpt/mod_gpt',
do_lower_case=True)
tokenizer.add_special_tokens(SPECIAL_TOKENS_DICT)
model_config = GPT2Config.from_pretrained('ckpt/mod_gpt')
model = MemeDialoGPT(model_config)
# important!!! influence the length of model.named_parameters() and thus influence optimizer loading
model.tie_weights()
else:
tokenizer = BertTokenizer.from_pretrained(gpt_path, do_lower_case=True)
logger.info(f"vocab len:{len(tokenizer)}")
model = MemeDialoGPT.from_pretrained(gpt_path)
tokenizer.add_special_tokens(SPECIAL_TOKENS_DICT)
model.resize_token_embeddings(len(tokenizer))
logger.info(f"vocab len:{len(tokenizer)}")
if args.fp16:
model = convert_syncbn_model(model)
model = model.to(device)
# model.eval()
logger.debug('after creating optimizer')
if args.fp16:
model, optimizer = amp.initialize(model, optimizer, opt_level='O1')
if args.fp16:
model = DistributedDataParallel(model, delay_allreduce=True)
else:
model = torch.nn.parallel.DistributedDataParallel(
model,
device_ids=[args.local_rank],
output_device=args.local_rank,
find_unused_parameters=True)
# find_unused_parameters=False)
optimizer = AdamW(model.parameters(), lr=lr)
if ckpt_usage:
ckpt = torch.load(ckpt_path, map_location=map_location)
model.module.load_state_dict(ckpt['model'])
# for name, v in model.named_parameters():
# print(f"{name}, {v.size()}")
# print(len(list(model.named_parameters())))
optimizer.load_state_dict(ckpt['optimizer'])
logger.debug('after creating parallel model')
def display(d, level=0):
for k, v in d.items():
if isinstance(v, dict):
print('*'*level+f'{k}:')
display(v, level+1)
elif isinstance(v, torch.Tensor):
print('*'*level+f'{k}:{v.size()}')
else:
print('*'*level+f'{k}:{v}')
# if ckpt_usage:
# ckpt = torch.load(ckpt_path, map_location=map_location)
# # logger.debug(model.module)
# model.module.load_state_dict(ckpt['model'])
# display(optimizer.state_dict())
# display(ckpt['optimizer'])
# # exit()
# # for k, v in optimizer.state_dict().items():
# # logger.debug(f"{k}:{v}")
# # logger.debug('===state_dict===')
# # for k, v in ckpt['optimizer'].items():
# # logger.debug(f"{k}:{v}")
# # optimizer.load_state_dict(ckpt['optimizer'])
# logger.info('ckpt_usage True, load model and optimizer succ, start epoch:', start_epoch)
# if ckpt_usage == True:
# ckpt_path = 'ckpt/mod_gpt/model.bin'
# ckpt = torch.load(ckpt_path, map_location=map_location)
# model.module.load_state_dict(ckpt['model'])
# data read
logger.debug('before get_data')
train_dialogs, id2feature = get_data(tokenizer, train_data_path,
feature_path)
# print(len(train_dialogs))
val_dialogs, _ = get_data(tokenizer, val_data_path, feature_path)
logger.debug('after get_data')
train_dataset = MODDataset(train_dialogs, id2feature, tokenizer)
val_dataset = MODDataset(val_dialogs, id2feature, tokenizer)
logger.debug('after dataset')
train_sampler = torch.utils.data.distributed.DistributedSampler(
train_dataset)
val_sampler = torch.utils.data.sampler.SequentialSampler(val_dataset)
# val_sampler = torch.utils.data.distributed.DistributedSampler(val_dataset)
train_loader = DataLoader(train_dataset,
batch_size=1,
num_workers=8,
sampler=train_sampler)
val_loader = DataLoader(val_dataset,
batch_size=1,
num_workers=8,
sampler=val_sampler)
logger.info('finish load data')
# for name, v in model.named_parameters():
# print(f"{name}, {v.size()}")
# print(len(list(model.named_parameters())))
for epoch in range(start_epoch, epochs):
# one epoch's training
train_sampler.set_epoch(epoch)
train_loss = train(args=args,
model=model,
tokenizer=tokenizer,
optimizer=optimizer,
dataset=train_loader,
epoch=epoch)
# one epoch's validation
validate(model=model,
tokenizer=tokenizer,
dataset=val_loader,
epoch=epoch)
# break
# torch.distributed.barrier()
# save checkpoint
logger.info(f"epoch:{epoch}, local rank: {args.local_rank}")
if args.local_rank == 0:
# for name, v in model.named_parameters():
# print(f"{name}, {v.size()}")
# print(len(list(model.named_parameters())))
# print(len(list(model.module.named_parameters())))
logger.info(f"epoch:{epoch}, begin to save")
torch.save({'model': model.module.state_dict(), 'optimizer': optimizer.state_dict()},
'%s/epoch_%d_loss_%.3f' % (model_path, epoch, train_loss))
model.module.config.to_json_file(
os.path.join(model_path, 'config.json'))
tokenizer.save_vocabulary(model_path)
logger.info(f"epoch:{epoch}, finish save")
# torch.distributed.barrier()
def train(args, model, tokenizer, optimizer, dataset, epoch):
model.train()
avg_loss = AverageMeter()
avg_img_loss = AverageMeter()
avg_text_loss = AverageMeter()
avg_acc_5 = AverageMeter()
avg_acc_30 = AverageMeter()
avg_acc_90 = AverageMeter()
iteration = 0
# cat_img_features = img_feature_read(feature_path)
meme_correct_num = 1
meme_total_num = 1
for instance in dataset:
history_txt, history_img, token_type_ids, labels, meme_flag, id_labels = instance
history_txt, history_img, token_type_ids, labels, meme_flag, id_labels = history_txt.to(device).squeeze(0), history_img.to(device).squeeze(0), \
token_type_ids.to(device).squeeze(0), labels.to(device).squeeze(
0), meme_flag.to(device).squeeze(0), id_labels.to(device).squeeze(0)
history_txt_embs = model.module.transformer.wte(history_txt)
# print(history_txt_embs.size())
history_img_embs = model.module.img_ff(history_img)
# print(history_img_embs.size())
# print(token_type_ids)
# print(history_txt)
input_embs = input_construct(history_txt_embs, history_img_embs,
token_type_ids, tokenizer)
input_embs = input_embs.to(device)
if input_embs.size(-2) > 450:
input_embs = input_embs[-450:, :]
token_type_ids = token_type_ids[-450:]
labels = token_type_ids[-449:]
# continue
img_feature = history_img[-1, :].unsqueeze(0)
# logger.debug(f"{input_embs.size()}, {token_type_ids.size()}, {labels.size()}, {img_feature.size()}, {meme_flag.size()}")
# print(input_embs.size())
# print(img_feature.size())
loss, img_loss, text_loss = model(input_embs, token_type_ids, id_labels, labels, img_feature,
meme_flag)
logits = model.module.logits
if args.fp16:
with amp.scale_loss(loss, optimizer) as scale_loss:
scale_loss.backward()
else:
loss.backward()
if iteration % gradient_accumulation_steps == 0:
if args.fp16:
torch.nn.utils.clip_grad_norm_(amp.master_params(optimizer),
1.0)
else:
torch.nn.utils.clip_grad_norm_(model.parameters(), 1.0)
optimizer.step()
optimizer.zero_grad()
# if img_feature[0][0] != 0.:
# if meme_retrieval_compute(cur_img_feature, img_feature, cat_img_features):
# meme_correct_num += 1
# meme_total_num += 1
#acc = accuracy_compute(lm_logits, labels, 5)
# avg_acc.update(acc)
if id_labels.numel() > 0:
acc_5, acc_30, acc_90 = acc_compute(logits, id_labels)
avg_acc_5.update(acc_5)
avg_acc_30.update(acc_30)
avg_acc_90.update(acc_90)
avg_loss.update(loss.item())
if img_loss.item() > 0:
assert id_labels.numel() > 0
avg_img_loss.update(img_loss.item())
avg_text_loss.update(text_loss.item())
# print status
if iteration % print_freq == 0:
print('Epoch:[{0}][{1}/{2}]\t'
'Loss {loss.avg:.4f} Image Loss {img_loss.avg:.4f} Text Loss {text_loss.avg:.4f}\t'
'Retrieval Acc {acc_5.avg:.3f} | {acc_30.avg:.3f} | {acc_90.avg:.3f}'.format(epoch, iteration, len(dataset),
loss=avg_loss, img_loss=avg_img_loss, text_loss=avg_text_loss, acc_5=avg_acc_5, acc_30=avg_acc_30, acc_90=avg_acc_90))
iteration += 1
# logger.info(f"iteration:{iteration}, local rank: {args.local_rank}")
# print(loss)
# break
return avg_loss.avg
def acc_compute(logits, labels):
_, idx = torch.sort(logits.squeeze(0))
idx = idx.tolist()
labels = labels.item()
return int(labels in idx[-5:]), int(labels in idx[-30:]), int(labels in idx[-90:])
# concatenate the input
def input_construct(history_txt_embs, history_img_embs, token_type_ids,
tokenizer):
bos, eos, speaker1, speaker2, img, tag = tokenizer.convert_tokens_to_ids(
SPECIAL_TOKENS[:-1])
emb_length = token_type_ids.size(-1)
emb_dim = history_txt_embs.size(-1)
img_num = history_img_embs.size(0)
input_embs = torch.zeros((emb_length, emb_dim))
txt_idx = 0
img_idx = 0
left_idx = 0
right_idx = 0
while right_idx < emb_length:
# if right_idx == emb_length-1 and token_type_ids[right_idx] == img:
# break
if right_idx < emb_length - 1 and token_type_ids[right_idx] == img:
txt_length = right_idx - left_idx
input_embs[left_idx:right_idx, :] = history_txt_embs[
txt_idx:txt_idx + txt_length, :]
txt_idx += txt_length
input_embs[right_idx, :] = history_img_embs[img_idx, :]
img_idx += 1
left_idx = right_idx + 1
right_idx += 1
txt_length = right_idx - left_idx
if txt_length > 0:
input_embs[left_idx:right_idx, :] = history_txt_embs[txt_idx:, :]
# img_feature = history_img_embs[img_idx,:]
return input_embs
def validate(model, tokenizer, dataset, epoch):
model.eval()
avg_loss = AverageMeter()
avg_img_loss = AverageMeter()
avg_text_loss = AverageMeter()
avg_acc_5 = AverageMeter()
avg_acc_30 = AverageMeter()
avg_acc_90 = AverageMeter()
avg_bleu = AverageMeter()
iteration = 1
cat_img_features = img_feature_read(feature_path)
meme_correct_num = 0
meme_total_num = 0
with torch.no_grad():
for instance in dataset:
history_txt, history_img, token_type_ids, labels, meme_flag, id_labels = instance
history_txt, history_img, token_type_ids, labels, meme_flag, id_labels = history_txt.to(device).squeeze(0), history_img.to(device).squeeze(0), \
token_type_ids.to(device).squeeze(0), labels.to(device).squeeze(
0), meme_flag.to(device).squeeze(0), id_labels.to(device).squeeze(0)
history_txt_embs = model.module.transformer.wte(history_txt)
history_img_embs = model.module.img_ff(history_img)
input_embs = input_construct(history_txt_embs, history_img_embs,
token_type_ids, tokenizer)
input_embs = input_embs.to(device)
if input_embs.size(-2) > 450:
continue
img_feature = history_img[-1, :].unsqueeze(0)
loss, img_loss, text_loss = model(input_embs, token_type_ids, id_labels, labels, img_feature,
meme_flag)
logits = model.module.logits
if id_labels.numel() > 0:
acc_5, acc_30, acc_90 = acc_compute(logits, id_labels)
avg_acc_5.update(acc_5)
avg_acc_30.update(acc_30)
avg_acc_90.update(acc_90)
avg_loss.update(loss.item())
if img_loss.item() > 0:
avg_img_loss.update(img_loss.item())
avg_text_loss.update(text_loss.item())
# print status
if iteration % print_freq == 0:
print('Epoch:[{0}][{1}/{2}]\t'
'Loss {loss.avg:.4f} Image Loss {img_loss.avg:.4f} Text Loss {text_loss.avg:.4f}\t'
'Retrieval Acc {acc_5.avg:.3f} | {acc_30.avg:.3f} | {acc_90.avg:.3f}'.format(epoch, iteration, len(dataset),
loss=avg_loss, img_loss=avg_img_loss, text_loss=avg_text_loss, acc_5=avg_acc_5, acc_30=avg_acc_30, acc_90=avg_acc_90))
iteration += 1
# loss, mf_logits, lm_logits, cur_img_feature = model(
# input_embs, token_type_ids, labels, img_feature, meme_flag,
# 'val')
# # compare cur_img_feature is among topk with img_feature
# # print(cur_img_feature.size())
# if img_feature[0][0] != 0.:
# if meme_retrieval_compute(cur_img_feature, img_feature,
# cat_img_features):
# meme_correct_num += 1
# meme_total_num += 1
# #acc = accuracy_compute(lm_logits, labels, k=5)
# acc = meme_classify_accuracy(mf_logits, meme_flag).item()
# avg_acc.update(acc)
# avg_loss.update(loss.item())
# if iteration % print_freq == 0:
# print('Epoch:[{0}][{1}/{2}]\t'
# 'Loss {loss.val:.4f} ({loss.avg:.4f})\t'
# 'Meme Classification {acc.val:.3f} ({acc.avg:.3f})\t'
# 'Meme Retrieval {mac:.3f}'.format(
# epoch,
# iteration,
# len(dataset),
# loss=avg_loss,
# acc=avg_acc,
# mac=float(meme_correct_num / meme_total_num)))
# iteration += 1
# break
logger.info(
f"validate epoch {epoch} end, Loss {avg_loss.avg}, Meme Retrieval {avg_acc_5.avg} | {avg_acc_30.avg} | {avg_acc_90.avg}"
)
def img_feature_read(feature_path):
with open(feature_path, 'r', encoding='utf-8') as f:
id2feature_dict = json.load(f)
img_features = []
for id in id2feature_dict.keys():
img_features.append(id2feature_dict[id])
img_features = np.array(img_features)
img_features = torch.from_numpy(img_features).float().to(device)
return img_features
def meme_retrieval_compute(cur_img_feature, target_img_feature,
cat_img_features):
# (1, 512)
cur_dist = torch.dist(cur_img_feature, target_img_feature, p=2)
# print(cat_img_features.size())
cur_img_list = cur_img_feature.repeat(307, 1)
total_dist = torch.sqrt(
torch.sum((cur_img_list - cat_img_features)**2, dim=1))
# print(total_dist)
sorted_total, _ = torch.sort(total_dist)
# print(sorted_total)
return torch.gt(sorted_total[30], cur_dist)
# print(cur_dist)
if __name__ == '__main__':
main()
|
from snakeoil.demandload import demand_compile_regexp
from snakeoil.strings import pluralism as _pl
from .. import results, sources
from . import Check
demand_compile_regexp('indent_regexp', '^\t* \t+')
class _Whitespace(results.VersionResult, results.Warning):
@property
def lines_str(self):
return f"line{_pl(self.lines)}: {', '.join(str(x) for x in self.lines)}"
class WhitespaceFound(_Whitespace):
"""Leading or trailing whitespace found."""
def __init__(self, leadtrail, lines, **kwargs):
super().__init__(**kwargs)
self.lines = tuple(lines)
self.leadtrail = leadtrail
@property
def desc(self):
return f"ebuild has {self.leadtrail} whitespace on {self.lines_str}"
class WrongIndentFound(_Whitespace):
"""Incorrect indentation whitespace found."""
def __init__(self, lines, **kwargs):
super().__init__(**kwargs)
self.lines = tuple(lines)
@property
def desc(self):
return f"ebuild has whitespace in indentation on {self.lines_str}"
class DoubleEmptyLine(_Whitespace):
"""Unneeded blank lines found."""
def __init__(self, lines, **kwargs):
super().__init__(**kwargs)
self.lines = tuple(lines)
@property
def desc(self):
return f"ebuild has unneeded empty {self.lines_str}"
class TrailingEmptyLine(results.VersionResult, results.Warning):
"""Unneeded trailing blank lines found."""
desc = "ebuild has trailing blank line(s)"
class NoFinalNewline(results.VersionResult, results.Warning):
"""Ebuild's last line does not have a final newline."""
desc = "ebuild lacks an ending newline"
class WhitespaceCheck(Check):
"""Scan ebuild for useless whitespace."""
_source = sources.EbuildFileRepoSource
known_results = frozenset([
WhitespaceFound, WrongIndentFound, DoubleEmptyLine,
TrailingEmptyLine, NoFinalNewline,
])
def feed(self, pkg):
lastlineempty = False
trailing = []
leading = []
indent = []
double_empty = []
for lineno, line in enumerate(pkg.lines, 1):
if line != '\n':
lastlineempty = False
if line[-2:-1] == ' ' or line[-2:-1] == '\t':
trailing.append(lineno)
elif line[0] == ' ':
leading.append(lineno)
if indent_regexp.match(line):
indent.append(lineno)
elif lastlineempty:
double_empty.append(lineno)
else:
lastlineempty = True
if trailing:
yield WhitespaceFound('trailing', trailing, pkg=pkg)
if leading:
yield WhitespaceFound('leading', leading, pkg=pkg)
if indent:
yield WrongIndentFound(indent, pkg=pkg)
if double_empty:
yield DoubleEmptyLine(double_empty, pkg=pkg)
if lastlineempty:
yield TrailingEmptyLine(pkg=pkg)
# Dealing with empty ebuilds is just paranoia
if pkg.lines and not pkg.lines[-1].endswith('\n'):
yield NoFinalNewline(pkg=pkg)
|
import sentry_sdk
from sentry_sdk.integrations.celery import CeleryIntegration
from sentry_sdk.integrations.flask import FlaskIntegration
from sentry_sdk.integrations.redis import RedisIntegration
from sentry_sdk.integrations.sqlalchemy import SqlalchemyIntegration
from extensions import celery
from app import create_app
import config as c
app = create_app(for_celery=True)
app.app_context().push()
# add more external integrations below
if c.CELERY_SENTRY_DSN:
sentry_sdk.init(
c.CELERY_SENTRY_DSN,
integrations=[
CeleryIntegration(),
FlaskIntegration(),
RedisIntegration(),
SqlalchemyIntegration(),
],
)
|
# Copyright 2020 OpenRCA Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import abc
import time
import cotyledon
from orca import exceptions
from orca.common import config, logger
from orca.graph import graph
from orca.topology import linker, upstream
CONFIG = config.CONFIG
LOG = logger.get_logger(__name__)
class ProbeRunner(cotyledon.Service):
"""Runs entity probe from given probe bundle."""
def __init__(self, worker_id, probe_bundle, graph_lock):
super().__init__(worker_id)
self._worker_id = worker_id
self._probe_bundle = probe_bundle
self._graph_lock = graph_lock
self.__graph = None
@property
def _graph(self):
if not self.__graph:
self.__graph = graph.Graph.get(self._graph_lock)
return self.__graph
def run(self):
probe = self._initialize_probe()
linkers = self._initialize_linkers()
self._setup_event_dispatcher(linkers)
probe.run()
def _initialize_probe(self):
probe_module = self._probe_bundle.probe
return probe_module.get(self._graph)
def _initialize_linkers(self):
linkers = []
linker_modules = self._probe_bundle.linkers
for linker_module in linker_modules:
linkers.append(linker_module.get(self._graph))
return linkers
def _setup_event_dispatcher(self, linkers):
event_dispatcher = linker.EventDispatcher()
for linker_instance in linkers:
event_dispatcher.add_linker(linker_instance)
self._graph.add_listener(event_dispatcher)
class Probe(abc.ABC):
"""Base class for entity probes."""
def __init__(self, graph):
super().__init__()
self._graph = graph
@abc.abstractmethod
def run(self):
"""Starts entity probe."""
@classmethod
def get(cls, graph):
return cls(graph)
class PullProbe(Probe):
"""Periodically pulls all entities from the upstream into the graph."""
def __init__(self, graph, upstream_proxy, extractor, synchronizer, resync_period=60):
super().__init__(graph)
self._upstream_proxy = upstream_proxy
self._extractor = extractor
self._synchronizer = synchronizer
self._resync_period = resync_period
def run(self):
extended_kind = self._extractor.get_extended_kind()
while True:
LOG.info("Starting sync for entity: %s", extended_kind)
start_time = time.time()
self._synchronize()
sync_time = time.time() - start_time
LOG.info("Finished sync for entity: %s (%.2f seconds)", extended_kind, sync_time)
time.sleep(self._resync_period)
def _synchronize(self):
nodes_in_graph = self._get_nodes_in_graph()
upstream_nodes = self._get_upstream_nodes()
self._synchronizer.synchronize(nodes_in_graph, upstream_nodes)
def _get_nodes_in_graph(self):
properties = {'origin': self._extractor.origin, 'kind': self._extractor.kind}
return self._graph.get_nodes(properties=properties)
def _get_upstream_nodes(self):
entities = self._upstream_proxy.get_all()
upstream_nodes = []
for entity in entities:
try:
node = self._extractor.extract(entity)
upstream_nodes.append(node)
except exceptions.OrcaError as ex:
LOG.warning("Error while processing an entity: %s", ex)
return upstream_nodes
class PushProbe(Probe, upstream.EventHandler):
"""Consumes events pushed by the upstream."""
def __init__(self, graph, upstream_proxy, extractor):
super().__init__(graph)
self._upstream_proxy = upstream_proxy
self._extractor = extractor
def run(self):
extended_kind = self._extractor.get_extended_kind()
LOG.info("Consuming events for entity: %s", extended_kind)
self._upstream_proxy.get_events(handler=self)
def on_added(self, entity):
node = self._extractor.extract(entity)
self._graph.add_node(node)
def on_updated(self, entity):
node = self._extractor.extract(entity)
self._graph.update_node(node)
def on_deleted(self, entity):
node = self._extractor.extract(entity)
self._graph.delete_node(node.id)
|
from django.shortcuts import render, redirect
from django.contrib.auth.forms import UserCreationForm
from django.contrib.auth.forms import UserCreationForm
from django.contrib.auth import authenticate, login, logout
from django.contrib import messages
from django.contrib.auth.decorators import login_required
# Create your views here.
from .models import *
from .forms import CreateUserForm
from .forms import *
def home_view(request):
context={}
return render(request, 'web/main.html', context)
@login_required(login_url='sign_view')
def profile_view(request):
context={}
return render(request, 'web/profile.html', context)
def connections_view(request):
context={}
return render(request, 'web/connections.html', context)
def events_view(request):
context={}
return render(request, 'web/events.html', context)
def jobs_view(request):
context={}
return render(request, 'web/jobs.html', context)
def contact_view(request):
context={}
return render(request, 'web/contact.html', context)
def sign_view(request):
if request.user.is_authenticated:
return redirect('home_view')
else:
if request.method == 'POST':
username = request.POST.get('username')
password =request.POST.get('password')
user = authenticate(request, username=username, password=password)
if user is not None:
login(request, user)
return redirect('home_view')
else:
messages.info(request, 'Username OR password is incorrect')
context = {}
return render(request, 'web/signin.html', context)
def register_view(request):
if request.user.is_authenticated:
return redirect('home_view')
else:
form = CreateUserForm()
if request.method == 'POST':
form = CreateUserForm(request.POST)
if form.is_valid():
form.save()
user = form.cleaned_data.get('username')
messages.success(request, 'Account was created for ' + user)
return redirect('sign_view')
context = {'form':form}
return render(request, 'web/register.html', context)
def logoutUser(request):
logout(request)
return redirect('home_view')
def team_view(request):
context={}
return render(request, 'web/team.html', context)
def profile_form(request):
form = ProfileForm(request.POST or None)
if form.is_valid():
fs= form.save(commit=False)
fs.user= request.user
form.save()
form=ProfileForm()
context={
'form':form
}
return render(request, 'web/profile_form.html', context) |
import os
import subprocess
import pandas as pd
from utils.combine import merge_by_subject
from utils.save_data import write_csv
def add_log_k(file_trial_input, file_subjects_to_merge, path):
path_matlab_fit_k = os.path.join('data_prep', 'add_variables', 'fit_k')
path_input = os.path.join(path, file_trial_input)
path_to_merge = os.path.join(path, file_subjects_to_merge)
# noinspection SpellCheckingInspection
run_matlab = \
f"""matlab -wait -nojvm -nosplash -nodesktop -r "fit_discount_k('""" + \
path_input + """', '""" + path + """'); exit"""
print(f"""Fitting discounting parameter log(k) in Matlab. \n"""
f"""Run Matlab from console: \n"""
f"""{run_matlab} \n""")
os.chdir(path_matlab_fit_k)
subprocess.run(run_matlab, shell=True, check=True)
os.chdir(os.path.join('../..', '..'))
root = "C:/Users/User/GitHub/WebET_Analysis"
log_k = pd.read_csv(os.path.join(path, 'log_k.csv'))
print('Imported data from ' + path_to_merge + ':')
data_subject = pd.read_csv(path_to_merge)
data_subject = merge_by_subject(data_subject, log_k, 'logK', 'noise')
missing_values = data_subject.loc[
pd.isna(data_subject['logK']),
['run_id', 'prolificID', 'choice_rt', 'choseLL', 'choseTop', 'logK',
'noise']]
if len(missing_values) > 0:
print(f"""n={len(data_subject)} participants. """
f"""{len(missing_values)} missing logK values. \n"""
f"""{missing_values}""")
write_csv(data=missing_values, file_name='missing_log_k.csv',
path=path)
else:
print('All participants could be fitted to hyperbolic discounting')
print('Data saved to ' + path_to_merge + ':')
data_subject.to_csv(os.path.join(path_to_merge), index=False, header=True)
return data_subject |
import pandas as pd
import numpy as np
def email (name,receiver,file,cc,password):
import email, smtplib, ssl
from email import encoders
from email.mime.base import MIMEBase
from email.mime.multipart import MIMEMultipart
from email.mime.text import MIMEText
subject = "Automated Test Mail - Python"
body = 'Hi {}, \nThis is an automated mail'.format(name)
sender_email = "[email protected]" #enter your email here
receiver_email = receiver
cc = cc
password = password
df = pd.read_csv(file)
df_html =df.to_html(index=False)
df_part = MIMEText(df_html,'html')
# Create a multipart message and set headers
message = MIMEMultipart()
message["From"] = sender_email
message["To"] = ','.join(receiver_email)
message["Subject"] = subject
message["Cc"] = ','.join(cc) # Recommended for mass emails
# Add body to email
message.attach(MIMEText(body, "plain"))
message.attach(df_part)
filename = file # In same directory as script
# Open csv file in binary mode
with open(filename, "rb") as attachment:
# Add file as application/octet-stream
# Email client can usually download this automatically as attachment
part = MIMEBase("application", "octet-stream")
part.set_payload(attachment.read())
# Encode file in ASCII characters to send by email
encoders.encode_base64(part)
# Add header as key/value pair to attachment part
part.add_header(
"Content-Disposition",
f"attachment; filename= {filename}",
)
# Add attachment to message and convert message to string
message.attach(part)
text = message.as_string()
# Log in to server using secure context and send email
context = ssl.create_default_context()
with smtplib.SMTP_SSL("smtp.gmail.com", 465, context=context) as server:
server.login(sender_email, password)
server.sendmail(sender_email, receiver_email+cc, text)
mailer_list = pd.read_csv('mailer_list.csv')
password = input('Input password here')
for i in range(len(mailer_list)):
list1 = mailer_list.loc[i]['cc'].split()
list2 = mailer_list.loc[i]['email'].split()
email(name = mailer_list.loc[i]['name'],receiver = list2 ,file = mailer_list.loc[i]['file'],cc=list1,password = password)
print('E-mail sent to {} with following {} as cc with attachment name {}'.format(list2,list1,mailer_list.loc[i]['file']))
|
import os
import sys
import numbers
from ..cosmology import get_ccl_cosmology, RESERVED_CCL_PARAMS
from ..loglike import compute_loglike
from ..parser_constants import FIRECROWN_RESERVED_NAMES
import numpy as np
import cosmosis
# these keys are ignored by cosmosis
RESERVED_NAMES_COSMOSIS = FIRECROWN_RESERVED_NAMES + ['priors']
def run_cosmosis(config, data, output_dir):
"""Run CosmoSIS on the problem.
This requires the following parameters 'cosmosis' section
of the config:
sampler - name of sampler to use, e.g. emcee, multinest, grid, ...
output - name of file to save to
a section with the same name as the sampler, selecting options
for that sampler.
Parameters
----------
config : dict
Configuration info, usually read directly from the YAML file
data : dict
The result of calling `firecrown.config.parse` on an input YAML
config.
output_dir : pathlib.Path
Directory in which to put output.
"""
# Extract the bits of the config file that
# cosmosis wants
ini = _make_cosmosis_params(config, output_dir)
values = _make_cosmosis_values(config)
pool = _make_parallel_pool(config)
priors = _make_cosmosis_priors(config)
pipeline = _make_cosmosis_pipeline(data, ini, values, priors, pool)
# Actually run the thing
cosmosis.main.run_cosmosis(None, pool=pool, ini=ini,
pipeline=pipeline, values=values)
if pool is not None:
pool.close()
def _make_parallel_pool(config):
"""Set up a parallel process pool.
Will look for the 'mpi' key in the config cosmosis section.
Parameters
----------
config: dict
The data object parse'd from an input yaml file.
This is passed as-is to the likelihood function
Returns
-------
pool : CosmoSIS MPIPool object
parallel process pool
"""
cosmosis_config = config['cosmosis']
# There is a reason to make the user actively
# request to use MPI rather than just checking -
# on many systems, including, importantly, NERSC,
# trying to import MPI when not running under the
# MPI environment will cause a crash
use_mpi = cosmosis_config.get('mpi', False)
if use_mpi:
pool = cosmosis.MPIPool()
if pool.size == 1:
print("Have mpi=True, but only running a single process.")
print("I will ignore and run in serial mode.")
pool = None
else:
pool = None
print("Running in serial mode (one process).")
return pool
def _make_cosmosis_pipeline(data, ini, values, priors, pool):
""" Build a CosmoSIS pipeline.
Parameters
----------
data : dict
The data object parse'd from an input yaml file.
This is passed as-is to the likelihood function
ini : Inifile
Cosmosis object representing the main input parameter file
values : Inifile
Cosmosis object representing the input parameter values
pool : MPIPool or None
If using MPI parallelism, a CosmoSIS pool object.
Returns
-------
pipeline : CosmoSIS pipeline objects
Instantiated pipeline ready to run.
"""
# Lie to CosmoSIS about where it is installed.
os.environ['COSMOSIS_SRC_DIR'] = '.'
# Build the pipeline that evaluates the likelihood.
# We avoid printing various bits of output info by silencing stdout on
# worker nodes.
if (pool is None) or pool.is_master():
pipeline = cosmosis.LikelihoodPipeline(ini, load=False, values=values,
priors=priors)
else:
with cosmosis.stdout_redirected():
pipeline = cosmosis.LikelihoodPipeline(ini, load=False, values=values,
priors=priors)
# Flush now to print out the master node's setup stdout
# before printing the worker likelihoods
sys.stdout.flush()
# Set up a single cosmosis module, from the functions directly
module = cosmosis.FunctionModule('firecrown', _setup, _execute)
module.setup_functions((data, ini))
pipeline.modules = [module]
return pipeline
def _make_cosmosis_params(config, output_dir):
"""Extract a cosmosis configuration object from a config dict
Parameters
----------
config : dict
The data object parse'd from an input yaml file.
This is passed as-is to the likelihood function
output_dir : pathlib.Path
Directory to put output into.
Returns
-------
cosmosis_params : Inifile
object to use to build cosmosis pipeline
"""
cosmosis_config = config['cosmosis']
# Some general options
sampler_names = cosmosis_config['sampler']
output_file = str(output_dir / 'chain.txt')
debug = cosmosis_config.get('debug', False)
quiet = cosmosis_config.get('quiet', False)
root = "" # Dummy value to stop cosmosis complaining
# Make into a pair dictionary with the right cosmosis sections
cosmosis_options = {
("runtime", "root"): root,
("runtime", "sampler"): sampler_names,
("output", "filename"): output_file,
("pipeline", "debug"): str(debug),
("pipeline", "quiet"): str(quiet),
}
# Set all the sampler configuration options from the
# appropriate section of the cosmosis_config (e.g., the "grid"
# section if using the grid sampler, etc.)
for sampler_name in sampler_names.split():
sampler_config = cosmosis_config.get(sampler_name, {})
for key, val in sampler_config.items():
cosmosis_options[sampler_name, key] = str(val)
# Override options that involve the user-specified
# output paths to put everything in the one directory
overridden_options = [
('maxlike', 'output_ini', 'output.ini'),
('maxlike', 'output_cov', 'covmat.txt'),
('multinest', 'multinest_outfile_root', 'multinest'),
('gridmax', 'output_ini', 'maxlike.ini'),
('minuit', 'output_ini', 'maxlike.ini'),
('minuit', 'save_cov', 'covmat.txt'),
('pmaxlike', 'output_ini', 'maxlike.ini'),
('pmaxlike', 'output_covmat', 'covmat.txt'),
('polychord', 'polychord_outfile_root', 'polychord'),
('polychord', 'base_dir', ''),
]
# Apply these overrides
for section, key, value in overridden_options:
# To avoid too much noise in headers, only
# copy over sections for samplers we're actually
# using
if section not in sampler_names:
continue
full_value = output_dir / value
# Only warn user if they tried to set this already
if (section, key) in cosmosis_options:
sys.stderr.write(f"NOTE: Overriding option {section}/{key}"
f" to {full_value}")
cosmosis_options[section, key] = str(full_value)
# These options are not enabled by default, because they can
# produce large output files. So we only override them if
# they are already set
optional_overrides = [
('aprior', 'save', 'save'),
('grid', 'save', 'save'),
('list', 'save', 'save'),
('minuit', 'save_dir', 'save'),
('star', 'save', 'save'),
]
# Apply these overrides
for section, key, value in optional_overrides:
# To avoid too much noise in headers, only
# copy over sections for samplers we're actually
# using
if section not in sampler_names:
continue
# Only override the option if it is already set
if (section, key) in cosmosis_options:
full_value = output_dir / value
# Still warn the user
sys.stderr.write(f"NOTE: Overriding option {section}/{key}"
f" to {full_value}")
cosmosis_options[section, key] = str(full_value)
# The string parameters in the yaml file parameters
# can't go into cosmosis values, because that is for parameters
# that might vary during a run, which string params will not.
# Instead we put these in the parameter file
for p, v in config['parameters'].items():
if isinstance(v, str):
cosmosis_options['firecrown', p] = v
# Convert into cosmosis Inifile format.
cosmosis_params = cosmosis.Inifile(None, override=cosmosis_options)
return cosmosis_params
def _make_cosmosis_values(config):
"""Extract a cosmosis values object from a config dict
Parameters
----------
config : dict
The data object parse'd from an input yaml file.
This is passed as-is to the likelihood function
Returns
-------
cosmosis_values : Inifile
Object to use to build cosmosis parameter ranges/values.
"""
params = config['parameters']
varied_params = config['cosmosis']['parameters']
# copy all the parameters into the cosmosis config structure
values = {}
# First set all the numeric parameters, fixed and varied.
# We will override the varied ones in a moment
for p, v in params.items():
if isinstance(v, numbers.Number):
values['params', p] = str(v)
# Now override the varied parameters
for p, v in varied_params.items():
v = ' '.join(str(x) for x in v)
values['params', p] = v
return cosmosis.Inifile(None, override=values)
def _make_cosmosis_priors(config):
"""Make a cosmosis priors ini file.
Parameters
----------
config : dict
The data object parse'd from an input yaml file.
This is passed as-is to the likelihood function
Returns
-------
priors : cosmosis Inifile
The cosmosis config object specifying priors.
"""
# Early return if no priors section is specified
if 'priors' not in config:
return cosmosis.Inifile(None)
P = {}
for name, p in config['priors'].items():
# FireCrown exposes any scipy distribtion as a prior.
# CosmoSIS only exposes three of these right now (plus
# a couple of others that scipy doesn't support), but
# these are by far the most common.
# This is a key used by other FireCrown tools
if name == 'module':
continue
# The
kind = p['kind']
loc = p['loc']
scale = p['scale']
# Flat
if kind == 'uniform':
upper = loc + scale
pr = f'uniform {loc} {upper}'
# Exponential, only with loc = 0
elif kind == 'expon':
# This is not currently in CosmoSIS. It's not hard to add,
# and if there is demand Joe can add it.
if loc != 0:
raise ValueError("CosmoSIS does not currently support exponential "
"priors with non-zero 'loc'. If you need this please "
"open an issue")
pr = f'exp {scale}'
# Gaussian.
elif kind == 'norm':
pr = f'norm {loc} {scale}'
else:
raise ValueError(f"CosmoSIS does not know how to use the prior kind {kind}")
# Put these all in a dictionary
P['params', name] = pr
return cosmosis.Inifile(None, override=P)
def _setup(data_ini):
# Most CosmoSIS modules do proper setup here.
# We don't need amything so just return.
return data_ini
def _execute(block, config):
data, ini = config
# Calculate the firecrown likelihood as a module
# This function, which isn't designed for end users,
# is the main connection between cosmosis and firecrown.
# CosmoSIS builds the block, and passes it to us here.
# The block contains all the sample parameters.
# Create CCL cosmology
ccl_values = {}
for p in RESERVED_CCL_PARAMS:
# First look in the block
if block.has_value('params', p):
ccl_values[p] = block['params', p]
# Then in the ini file, for string params
elif ini.has_option('firecrown', p):
ccl_values[p] = ini.get('firecrown', p)
cosmo = get_ccl_cosmology(ccl_values)
# Put all the parameters in the data dictionary,
# both CCL-related and others, like nuisance params.
all_params = data['parameters'].keys()
for p in all_params:
# string parameters are excluded here, and potentially others
if block.has_value('params', p):
data['parameters'][p] = block['params', p]
# Currently compute_loglike actually computes the posterior
# if priors are included. Prevent that from happening since
# CosmoSIS is already handling priors
if 'priors' in data:
data = data.copy()
del data['priors']
# Call out to the log likelihood
loglikes, obs, theory, covs, invs, stats = compute_loglike(cosmo=cosmo, data=data)
loglike = np.sum([v for v in loglikes.values() if v is not None])
# For Fisher, etc., we save all the data vector info that we have
for name in loglikes:
# skip some stuff
if name in RESERVED_NAMES_COSMOSIS:
continue
# Send result back to cosmosis
block['likelihoods', f'{name}_like'] = loglikes[name]
# Save whatever we have managed to collect.
# The CosmoSIS Fisher sampler and others look in this
# section to build up the Fisher data vectors.
if theory[name] is not None:
block['data_vector', f'{name}_theory'] = theory[name]
if obs[name] is not None:
block['data_vector', f'{name}_data'] = obs[name]
if covs[name] is not None:
block['data_vector', f'{name}_covariance'] = covs[name]
if invs[name] is not None:
block['data_vector', f'{name}_inverse_covariance'] = invs[name]
# Unless in quiet mode, print out what we have done
if not data['cosmosis'].get("quiet", True):
print("params = {}".format(data['parameters']))
print(f"loglike = {loglike}\n", flush=True)
# Signal success. An exception anywhere above will
# be converted to a -inf likelihood by default.
return 0
|
#XXX: for a clean exit we need to import KDT first because it initializes MPI
# in any case
import kdt
import numpy
import scipy
import unittest
from mpi4py import MPI
from skylark import io
import elem
class IO_test(unittest.TestCase):
def __init__(self, *args, **kwargs):
super(IO_test, self).__init__(*args, **kwargs)
self.ele_A = elem.DistMatrix_d()
elem.Uniform(self.ele_A, 10, 30)
self.rank = MPI.COMM_WORLD.Get_rank()
self.size = MPI.COMM_WORLD.Get_size()
if self.rank == 0:
self.np_A = numpy.random.random((20, 65))
self.sp_A = scipy.sparse.rand(20, 65, density=0.2, format='csr')
def compareMatrixNorm(self, A, B):
# Gather at root
A_CIRC_CIRC = elem.DistMatrix_d_CIRC_CIRC()
elem.Copy(A, A_CIRC_CIRC)
# Compare Frobenius norm
#FIXME: some tests fail if we use higher accuracy -- why?
if self.rank == 0:
self.assertAlmostEqual(
numpy.linalg.norm(A_CIRC_CIRC.Matrix[:], ord='fro'),
numpy.linalg.norm(B, ord='fro'), 5)
def test_mm(self):
matrix_fpath = 'test_matrix.mtx'
store = io.mtx(matrix_fpath)
# root writes its scipy-sparse matrix...
if self.rank == 0:
store.write(self.sp_A)
MPI.COMM_WORLD.barrier()
# ... all processes read back what root has written
B = store.read('combblas-sparse')
C = store.read('scipy-sparse')
D = store.read('numpy-dense')
# convert CombBLAS matrix to (coo) sparse matrix
col, row, data = B.toVec()
sp_cb = scipy.sparse.coo_matrix((data, (row, col)), shape=(20, 65))
# all processes check
if self.rank == 0:
self.assertTrue(((self.sp_A - sp_cb).todense() < 1e-7).all())
self.assertTrue((sp_cb.todense() - D < 1e-7).all())
# only root checks; owns matrix generated and subsequently written
if self.rank == 0:
self.assertTrue(((self.sp_A - C).todense() < 1e-7).all())
#XXX: needs patched KDT.
#FIXME: still crashes. Iterators are still interchanged.
#SpParMat<long, doubleint, SpDCCols<long, doubleint>
#>::SaveGathered<SpParMat<long, doubleint, SpDCCols<long, doubleint>
#>>::ScalarReadSaveHandler> (this=this@entry=0x1cb6e70, filename=...,
#handler=handler@entry=..., transpose=transpose@entry=false)
# at kdt/pyCombBLAS/../../CombBLAS/SpParMat.cpp:1521
# 1521 csr[nzit.rowid()].push_back( make_pair(colit.colid(),
# nzit.value()) ); )
#store.write(B)
#B = store.read('combblas-sparse')
#C = store.read('scipy-sparse')
#D = store.read('numpy-dense')
# convert CombBLAS matrix to (coo) sparse matrix
#col, row, data = B.toVec()
#sp_cb = scipy.sparse.coo_matrix((data, (row, col)), shape=(20, 65))
#self.assertTrue(((self.sp_A - sp_cb).todense() < 1e-7).all())
#self.assertTrue((self.sp_A.todense() - D < 1e-7).all())
#self.assertTrue(((self.sp_A - C).todense() < 1e-7).all())
def test_hdf5(self):
fpath = 'test_matrix.h5'
store = io.hdf5(fpath)
# root writes its numpy-dense matrix...
if self.rank == 0:
store.write(self.np_A)
MPI.COMM_WORLD.barrier()
# ... all processes read back what root has written
B = store.read('numpy-dense')
C = store.read('elemental-dense')
D = store.read('elemental-dense', distribution='VC_STAR')
if self.rank == 0:
self.assertTrue((self.np_A - B < 1e-7).all())
self.compareMatrixNorm(C, self.np_A)
self.compareMatrixNorm(D, self.np_A)
# ... all proccesses write their part of (distributed) elemental-dense matrix
store.write(self.ele_A)
# ... and read back in various representations: local(copies) or distributed(parts)
B = store.read('numpy-dense')
C = store.read('elemental-dense')
D = store.read('elemental-dense', distribution='VC_STAR')
self.compareMatrixNorm(self.ele_A, B)
self.compareMatrixNorm(C, B)
self.compareMatrixNorm(D, B)
# all unsupported read formats should raise an exception
with self.assertRaises(io.SkylarkIOTypeError):
store.read('combblas-sparse')
store.read('scipy-sparse')
def test_txt(self):
fpath = 'test_matrix.txt'
store = io.txt(fpath)
store.write(self.ele_A)
B = store.read('numpy-dense')
self.compareMatrixNorm(self.ele_A, B)
# root writes its numpy-dense matrix...
if self.rank == 0:
store.write(self.np_A)
MPI.COMM_WORLD.barrier()
# ... all processes read back what root has written
B = store.read('numpy-dense')
if self.rank == 0:
self.assertTrue((self.np_A - B < 1e-7).all())
# all unsupported read formats should raise an exception
with self.assertRaises(io.SkylarkIOTypeError):
store.read('elemental-dense')
store.read('combblas-sparse')
store.read('scipy-sparse')
def test_libsvm(self):
fpath = base_dir + '/python-skylark/skylark/datasets/usps.t'
store = io.libsvm(fpath)
features_matrix, labels_matrix = store.read()
matrix_info = features_matrix.shape, features_matrix.nnz, labels_matrix.shape
#FIXME: currently there is no way to test this in a sophisticated way.
# For now just test matrix_info
self.assertEqual(matrix_info, ((2007, 256), 513792, (2007,)))
if __name__ == '__main__':
import sys
#XXX: hack to get argument to libsvm test
global base_dir
base_dir = sys.argv[1]
del(sys.argv[1])
unittest.main(verbosity=2)
|
# This file is Copyright (c) 2015-2018 Florent Kermarrec <[email protected]>
# License: BSD
from migen import *
from migen.genlib.io import CRG
from litex.boards.platforms import arty
from litex.build.generic_platform import Pins, IOStandard, Misc, Subsignal, Inverted
from litex.soc.cores.uart import UARTWishboneBridge
from litex.soc.integration.soc_core import SoCCore
from litex.soc.integration.builder import *
from litex.soc.interconnect.csr import *
from litescope import LiteScopeIO, LiteScopeAnalyzer
_serial2 = [
("serial2", 0,
Subsignal("rx", Pins("ck_io:ck_io8")),
Subsignal("tx", Pins("ck_io:ck_io9"), Misc("PULLUP")),
IOStandard("LVCMOS33")
),
]
# SoCController ------------------------------------------------------------------------------------
class Scratch(Module, AutoCSR):
def __init__(self):
self._scratch0 = CSRStorage(32, reset=0x12345678)
self._scratch1 = CSRStorage(32, reset=0x9abcdef0)
self._scratch2 = CSRStorage(32, reset=0xdeadbeef)
self._scratch3 = CSRStorage(32, reset=0x55aaaa55)
class LiteScopeSoC(SoCCore):
csr_map = {
"io": 16,
"analyzer": 17
}
csr_map.update(SoCCore.csr_map)
def __init__(self, platform):
sys_clk_freq = int((1e9/platform.default_clk_period))
SoCCore.__init__(self, platform, sys_clk_freq,
cpu_type=None,
csr_data_width=32,
with_uart=False,
ident="Litescope example design", ident_version=True,
with_timer=False
)
# crg
self.submodules.crg = CRG(platform.request(platform.default_clk_name))
# bridge
platform.add_extension(_serial2)
bridge = UARTWishboneBridge(platform.request("serial2"), sys_clk_freq, baudrate=115200)
self.submodules.bridge = bridge
self.add_wb_master(bridge.wishbone)
# Litescope IO
self.submodules.io = LiteScopeIO(8)
for i in range(8):
try:
self.comb += platform.request("user_led", i).eq(self.io.output[i])
except:
pass
# Litescope Analyzer
analyzer_groups = {}
# counter group
counter = Signal(16, name_override="counter")
zero = Signal(name_override="zero")
self.sync += counter.eq(counter + 1)
self.comb += zero.eq(counter == 0)
analyzer_groups[0] = [
zero,
counter
]
# communication group
analyzer_groups[1] = [
platform.lookup_request("serial2").tx,
platform.lookup_request("serial2").rx,
bridge.wishbone
]
# fsm group
fsm = FSM(reset_state="STATE1")
self.submodules += fsm
fsm.act("STATE1",
NextState("STATE2")
)
fsm.act("STATE2",
NextState("STATE1")
)
analyzer_groups[2] = [
fsm
]
# analyzer
self.submodules.analyzer = LiteScopeAnalyzer(analyzer_groups, 512)
# test regs
self.submodules.scratch = Scratch()
self.add_csr("scratch")
def do_exit(self, vns):
self.analyzer.export_csv(vns, "analyzer.csv")
def main():
platform = arty.Platform()
soc = LiteScopeSoC(platform)
builder = Builder(soc, csr_csv="csr.csv")
builder.build()
if __name__ == "__main__":
main()
|
''' Problem Description
Given a positive integer A, return its corresponding column title as appear in an Excel sheet.
Problem Constraints
1 <= A <= 1000000000
Input Format
First and only argument is integer A.
Output Format
Return a string, the answer to the problem.
Approach: base conversion'''
class Solution:
# @param A : integer
# @return a strings
def convertToTitle(self, A):
# chr -> integer to character
# ord -> character to integer
# creating a list n[] that contains ASCII values of characters A - Z
n=[chr(i) for i in range(ord("A"),ord("Z")+1)]
# .insert(position, element)
n.insert(0,0)
# creating two empty lists
a=[];p=[]
# typical base conversion
while A>26:
k=A%26
A=A//26
if k==0:
a.append(26)
A-=1
else:
a.append(k)
a.append(int(A))
a.reverse()
for i in a:
p.append(n[i])
r="".join(p)
return r
|
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License.
"""COWC datasets."""
import abc
import csv
import os
from typing import Callable, Dict, List, Optional, cast
import matplotlib.pyplot as plt
import numpy as np
import torch
from PIL import Image
from torch import Tensor
from .geo import VisionDataset
from .utils import check_integrity, download_and_extract_archive
class COWC(VisionDataset, abc.ABC):
"""Abstract base class for the COWC dataset.
The `Cars Overhead With Context (COWC) <https://gdo152.llnl.gov/cowc/>`_ data set
is a large set of annotated cars from overhead. It is useful for training a device
such as a deep neural network to learn to detect and/or count cars.
The dataset has the following attributes:
1. Data from overhead at 15 cm per pixel resolution at ground (all data is EO).
2. Data from six distinct locations: Toronto, Canada; Selwyn, New Zealand;
Potsdam and Vaihingen, Germany; Columbus, Ohio and Utah, United States.
3. 32,716 unique annotated cars. 58,247 unique negative examples.
4. Intentional selection of hard negative examples.
5. Established baseline for detection and counting tasks.
6. Extra testing scenes for use after validation.
If you use this dataset in your research, please cite the following paper:
* https://doi.org/10.1007/978-3-319-46487-9_48
"""
@property
@abc.abstractmethod
def base_url(self) -> str:
"""Base URL to download dataset from."""
@property
@abc.abstractmethod
def filenames(self) -> List[str]:
"""List of files to download."""
@property
@abc.abstractmethod
def md5s(self) -> List[str]:
"""List of MD5 checksums of files to download."""
@property
@abc.abstractmethod
def filename(self) -> str:
"""Filename containing train/test split and target labels."""
def __init__(
self,
root: str = "data",
split: str = "train",
transforms: Optional[Callable[[Dict[str, Tensor]], Dict[str, Tensor]]] = None,
download: bool = False,
checksum: bool = False,
) -> None:
"""Initialize a new COWC dataset instance.
Args:
root: root directory where dataset can be found
split: one of "train" or "test"
transforms: a function/transform that takes input sample and its target as
entry and returns a transformed version
download: if True, download dataset and store it in the root directory
checksum: if True, check the MD5 of the downloaded files (may be slow)
Raises:
AssertionError: if ``split`` argument is invalid
RuntimeError: if ``download=False`` and data is not found, or checksums
don't match
"""
assert split in ["train", "test"]
self.root = root
self.split = split
self.transforms = transforms
self.checksum = checksum
if download:
self._download()
if not self._check_integrity():
raise RuntimeError(
"Dataset not found or corrupted. "
+ "You can use download=True to download it"
)
self.images = []
self.targets = []
with open(
os.path.join(self.root, self.filename.format(split)),
encoding="utf-8-sig",
newline="",
) as f:
reader = csv.reader(f, delimiter=" ")
for row in reader:
self.images.append(row[0])
self.targets.append(row[1])
def __getitem__(self, index: int) -> Dict[str, Tensor]:
"""Return an index within the dataset.
Args:
index: index to return
Returns:
data and label at that index
"""
sample = {"image": self._load_image(index), "label": self._load_target(index)}
if self.transforms is not None:
sample = self.transforms(sample)
return sample
def __len__(self) -> int:
"""Return the number of data points in the dataset.
Returns:
length of the dataset
"""
return len(self.targets)
def _load_image(self, index: int) -> Tensor:
"""Load a single image.
Args:
index: index to return
Returns:
the image
"""
filename = os.path.join(self.root, self.images[index])
with Image.open(filename) as img:
array: "np.typing.NDArray[np.int_]" = np.array(img)
tensor = torch.from_numpy(array)
# Convert from HxWxC to CxHxW
tensor = tensor.permute((2, 0, 1))
return tensor
def _load_target(self, index: int) -> Tensor:
"""Load a single target.
Args:
index: index to return
Returns:
the target
"""
target = int(self.targets[index])
tensor = torch.tensor(target)
return tensor
def _check_integrity(self) -> bool:
"""Check integrity of dataset.
Returns:
True if dataset files are found and/or MD5s match, else False
"""
for filename, md5 in zip(self.filenames, self.md5s):
filepath = os.path.join(self.root, filename)
if not check_integrity(filepath, md5 if self.checksum else None):
return False
return True
def _download(self) -> None:
"""Download the dataset and extract it."""
if self._check_integrity():
print("Files already downloaded and verified")
return
for filename, md5 in zip(self.filenames, self.md5s):
download_and_extract_archive(
self.base_url + filename,
self.root,
filename=filename,
md5=md5 if self.checksum else None,
)
def plot(
self,
sample: Dict[str, Tensor],
show_titles: bool = True,
suptitle: Optional[str] = None,
) -> plt.Figure:
"""Plot a sample from the dataset.
Args:
sample: a sample returned by :meth:`__getitem__`
show_titles: flag indicating whether to show titles above each panel
suptitle: optional string to use as a suptitle
Returns:
a matplotlib Figure with the rendered sample
.. versionadded:: 0.2
"""
image = sample["image"]
label = cast(str, sample["label"].item())
showing_predictions = "prediction" in sample
if showing_predictions:
prediction = cast(str, sample["prediction"].item())
else:
prediction = None
fig, ax = plt.subplots(1, 1, figsize=(10, 10))
ax.imshow(image.permute(1, 2, 0))
ax.axis("off")
if show_titles:
title = f"Label: {label}"
if prediction is not None:
title += f"\nPrediction: {prediction}"
ax.set_title(title)
if suptitle is not None:
plt.suptitle(suptitle)
return fig
class COWCCounting(COWC):
"""COWC Dataset for car counting."""
base_url = (
"https://gdo152.llnl.gov/cowc/download/cowc/datasets/patch_sets/counting/"
)
filenames = [
"COWC_train_list_64_class.txt.bz2",
"COWC_test_list_64_class.txt.bz2",
"COWC_Counting_Toronto_ISPRS.tbz",
"COWC_Counting_Selwyn_LINZ.tbz",
"COWC_Counting_Potsdam_ISPRS.tbz",
"COWC_Counting_Vaihingen_ISPRS.tbz",
"COWC_Counting_Columbus_CSUAV_AFRL.tbz",
"COWC_Counting_Utah_AGRC.tbz",
]
md5s = [
"187543d20fa6d591b8da51136e8ef8fb",
"930cfd6e160a7b36db03146282178807",
"bc2613196dfa93e66d324ae43e7c1fdb",
"ea842ae055f5c74d0d933d2194764545",
"19a77ab9932b722ef52b197d70e68ce7",
"4009c1e420566390746f5b4db02afdb9",
"daf8033c4e8ceebbf2c3cac3fabb8b10",
"777ec107ed2a3d54597a739ce74f95ad",
]
filename = "COWC_{}_list_64_class.txt"
class COWCDetection(COWC):
"""COWC Dataset for car detection."""
base_url = (
"https://gdo152.llnl.gov/cowc/download/cowc/datasets/patch_sets/detection/"
)
filenames = [
"COWC_train_list_detection.txt.bz2",
"COWC_test_list_detection.txt.bz2",
"COWC_Detection_Toronto_ISPRS.tbz",
"COWC_Detection_Selwyn_LINZ.tbz",
"COWC_Detection_Potsdam_ISPRS.tbz",
"COWC_Detection_Vaihingen_ISPRS.tbz",
"COWC_Detection_Columbus_CSUAV_AFRL.tbz",
"COWC_Detection_Utah_AGRC.tbz",
]
md5s = [
"c954a5a3dac08c220b10cfbeec83893c",
"c6c2d0a78f12a2ad88b286b724a57c1a",
"11af24f43b198b0f13c8e94814008a48",
"22fd37a86961010f5d519a7da0e1fc72",
"bf053545cc1915d8b6597415b746fe48",
"23945d5b22455450a938382ccc2a8b27",
"f40522dc97bea41b10117d4a5b946a6f",
"195da7c9443a939a468c9f232fd86ee3",
]
filename = "COWC_{}_list_detection.txt"
# TODO: add COCW-M datasets:
#
# * https://gdo152.llnl.gov/cowc/download/cowc-m/datasets/
# * https://github.com/LLNL/cowc
#
# Same as COCW datasets, but instead of binary classification there are 4 car classes:
#
# 1. Sedan
# 2. Pickup
# 3. Other
# 4. Unknown
#
# May need new abstract base class. Will need subclasses for different patch sizes.
|
import logging
from unittest import TestCase
from robot_math.types import Percent
from robot_math.types.data_packet_type import DataPacket
logging.basicConfig(format='%(asctime)s %(levelname)-8s %(module)s: %(message)s',
datefmt='%Y-%m-%d %H:%M:%S',
level=logging.DEBUG)
eq = {
'8k': '1K',
'1000K': '1M',
'8g': '1G',
'8000k': '1M',
}
summ_p = {
'2K': ('8k', '1K'),
'2M': ('1000K', '1M'),
'2G': ('8g', '1G'),
'2.1M': ('8000k', '1.1M')
}
summ_n = {
'2.1M': ('800k', '1M')
}
ne = {
'8k': '1.1K',
'1002K': '1M',
'8.1g': '1G',
'8020k': '1M',
}
class TestBitrate(TestCase):
@classmethod
def setUpClass(cls):
logging.info(f'{cls.__name__}: Start')
@classmethod
def tearDownClass(cls):
logging.info(f'{cls.__name__}: End')
def setUp(self):
logging.info(f'Test {self._testMethodName} start')
def tearDown(self):
logging.info(f'Test {self._testMethodName} end')
def test_bit_value(self):
b0 = DataPacket(0)
assert b0 == 0, "Wrong output: {}".format(b0)
logging.info("Type: {}, Value: {}".format(type(b0).__name__, b0))
b00 = DataPacket(float(0))
assert str(b00) == '0.0b', "Wrong output: {}".format(b00)
logging.info("Type: {}, Value: {}".format(type(b0).__name__, b0))
b1 = DataPacket('1K')
assert str(b1) == '1.0K', "Wrong output: {}".format(b1)
logging.info("Type: {}, Value: {}".format(type(b1).__name__, b1))
b2 = DataPacket('1M')
assert str(b2) == '1.0M', "Wrong output: {}".format(b2)
logging.info("Type: {}, Value: {}".format(type(b2).__name__, b2))
b3 = DataPacket(number=1, rate='K')
assert str(b3) == '1.0K', "Wrong output: {}".format(b3)
logging.info("Type: {}, Value: {}".format(type(b3).__name__, b3))
b4 = DataPacket('1G')
assert str(b4) == '1.0G', "Wrong output: {}".format(b4)
logging.info("Type: {}, Value: {}".format(type(b4).__name__, b4))
# b4 = PacketSize('1.1G')
# assert str(b4) == '1.1G', "Wrong output: {}".format(b4)
# logging.info("Type: {}, Value: {}".format(type(b4), b4))
b5 = DataPacket('1.1446564G')
logging.info("Format: {0} vs. {0:.1M}".format(b5))
# logging.info("Format: {:.2f}".format(b5))
def test_eq(self):
for _b1, _b2 in eq.items():
b1, b2 = DataPacket(_b1), DataPacket(_b2)
assert b1 == b2, "Wrong output: {} == {}".format(b1, b2)
logging.info("{} == {}".format(b1, b2))
def test_ne(self):
for _b1, _b2 in ne.items():
b1, b2 = DataPacket(_b1), DataPacket(_b2)
assert b1 != b2, "Wrong output: {} != {}".format(b1.bit_value, b2.bit_value)
logging.info("{} != {}".format(b1, b2))
def test_iadd(self):
p = DataPacket('1M')
p_add = DataPacket('1K')
logging.info(f"1M - {p:.1m}")
p += p_add
logging.info(f"8m - {p:.4m}")
p += '1M'
logging.info(f"12m - {p:.4m}")
def test_isub(self):
p = DataPacket('1M')
p_sub = DataPacket('1K')
logging.info(f"{p:.1m}")
p -= p_sub
logging.info(f"{p:.4m}")
p -= '0.1M'
logging.info(f"{p:.4m}")
def test_sum_positive(self):
errors = []
for _sum, (_b1, _b2) in summ_p.items():
try:
s, b1, b2 = DataPacket(_sum), DataPacket(_b1), DataPacket(_b2)
_b = [b1, b2]
r_s = sum(_b)
assert r_s == s, "Wrong output: {} + {} == {} (Actual: {})".format(b1, b2, s, r_s)
logging.info("{} + {} == {}".format(b1, b2, s))
except AssertionError as e:
errors.append(e)
assert len(errors) == 0, "Following iterations failed:\n{}".format(
'\n\t'.join([str(e) for e in errors])
)
def test_sum_negative(self):
for _sum, (_b1, _b2) in summ_n.items():
s, b1, b2 = DataPacket(_sum), DataPacket(_b1), DataPacket(_b2)
r_s = sum([b1, b2])
assert r_s != s, "Wrong output: {} + {} == {} (Actual: {})".format(b1, b2, s, r_s)
logging.info("{} + {} != {} (Actual: {})".format(b1, b2, s, r_s))
def test_percent(self):
packet = DataPacket('10M')
percent = Percent('10%')
packet += percent
logging.info(f"{packet}")
def test_format_conversion(self):
v = 8000000.8
p = DataPacket(number=v)
logging.info(f"{p}")
logging.info(f"{p:.1b}")
logging.info(f"{p:.1k}")
logging.info(f"{p:.2m}")
logging.info(f"{p:.1B}")
logging.info(f"{p:.1K}")
logging.info(f"{p:.1M}")
|
from setuptools import setup
version = '2.5.0'
setup(
name='cbagent',
version=version,
description='Stats collectors package for Couchbase Server monitoring',
author='Couchbase',
license='Apache Software License',
packages=[
'cbagent',
'cbagent.collectors',
'cbagent.collectors.libstats'
],
entry_points={
'console_scripts': [
'cbagent = cbagent.__main__:main',
]
},
include_package_data=True,
install_requires=[
'couchbase==1.2.1',
'decorator',
'fabric==1.8.0',
'logger',
'requests==2.1.0',
'seriesly',
'spring'
],
dependency_links=[
'git+https://github.com/couchbaselabs/spring.git#egg=spring',
]
)
|
"""UseCase for showing metrics."""
import logging
from argparse import Namespace, ArgumentParser
from typing import Final, cast
import jupiter.command.command as command
from jupiter.domain.adate import ADate
from jupiter.domain.metrics.metric_key import MetricKey
from jupiter.use_cases.metrics.find import MetricFindUseCase
from jupiter.utils.global_properties import GlobalProperties
LOGGER = logging.getLogger(__name__)
class MetricShow(command.Command):
"""UseCase for showing metrics."""
_global_properties: Final[GlobalProperties]
_command: Final[MetricFindUseCase]
def __init__(self, global_properties: GlobalProperties, the_command: MetricFindUseCase) -> None:
"""Constructor."""
self._global_properties = global_properties
self._command = the_command
@staticmethod
def name() -> str:
"""The name of the command."""
return "metric-show"
@staticmethod
def description() -> str:
"""The description of the command."""
return "Show the metrics"
def build_parser(self, parser: ArgumentParser) -> None:
"""Construct a argparse parser for the command."""
parser.add_argument("--metric", dest="metric_keys", required=False, default=[], action="append",
help="The key of the metric")
def run(self, args: Namespace) -> None:
"""Callback to execute when the command is invoked."""
metric_keys = [MetricKey.from_raw(mk) for mk in args.metric_keys] \
if len(args.metric_keys) > 0 else None
response = self._command.execute(MetricFindUseCase.Args(allow_archived=False, filter_keys=metric_keys))
for metric_response_entry in response.metrics:
metric = metric_response_entry.metric
collection_project = metric_response_entry.collection_project
metric_entries = metric_response_entry.metric_entries
print(f"{metric.key}: {metric.name}" +
(f" @{metric.collection_params.period.for_notion()} in " +
(f"{collection_project.name if collection_project else 'Default'}") +
(f" eisen={','.join(e.for_notion() for e in metric.collection_params.eisen)}"
if metric.collection_params.eisen else '')) +
(f" difficulty={metric.collection_params.difficulty.for_notion()}"
if metric.collection_params.difficulty else '') +
(f" actionable-from-day={metric.collection_params.actionable_from_day}"
if metric.collection_params.actionable_from_day else '') +
(f" actionable-from-month={metric.collection_params.actionable_from_month}"
if metric.collection_params.actionable_from_month else '') +
(f" due-at-time={metric.collection_params.due_at_time}"
if metric.collection_params.due_at_time else '') +
(f" due-at-day={metric.collection_params.due_at_day}"
if metric.collection_params.due_at_day else '') +
(f" due-at-month={metric.collection_params.due_at_month}"
if metric.collection_params.due_at_month else '')
if metric.collection_params else '' +
(f' #{metric.metric_unit.for_notion()}' if metric.metric_unit else ''))
for metric_entry in sorted(metric_entries, key=lambda me: me.collection_time):
print(f" - id={metric_entry.ref_id}" +
(f" {ADate.to_user_str(self._global_properties.timezone, metric_entry.collection_time)}") +
f" val={metric_entry.value}")
if metric_response_entry.metric_collection_inbox_tasks:
print(f" Collection Tasks:")
for inbox_task in sorted(
metric_response_entry.metric_collection_inbox_tasks,
key=lambda it: cast(ADate, it.due_date)):
print(f" -id={inbox_task.ref_id} {inbox_task.name} {inbox_task.status.for_notion()}")
|
# flake8: noqa: W403
from .base import *
DEBUG = True
|
#!/usr/bin/env python
import os
import sys
here = sys.path[0]
sys.path.insert(0, os.path.join(here, '..', '..', '..')) # root/
sys.path.insert(0, os.path.join(here, '..')) # openLbr/
sys.path.insert(0, os.path.join(here, '..', '..','eventBus','PyDispatcher-2.0.3')) # PyDispatcher-2.0.3/
import logging
import logging.handlers
import json
import pytest
import openLbr
import openvisualizer.openvisualizer_utils as u
#============================ logging =========================================
LOGFILE_NAME = 'test_utils.log'
import logging
log = logging.getLogger('test_utils')
log.setLevel(logging.ERROR)
log.addHandler(logging.NullHandler())
logHandler = logging.handlers.RotatingFileHandler(LOGFILE_NAME,
backupCount=5,
mode='w')
logHandler.setFormatter(logging.Formatter("%(asctime)s [%(name)s:%(levelname)s] %(message)s"))
for loggerName in ['test_utils',
'openLbr',]:
temp = logging.getLogger(loggerName)
temp.setLevel(logging.DEBUG)
temp.addHandler(logHandler)
#============================ defines =========================================
#============================ fixtures ========================================
#===== expectedBuf2int
EXPECTEDBUF2INT = [
# buf int
json.dumps(([0x01,0x02], 0x0102)),
json.dumps(([0xaa,0xbb], 0xaabb)),
]
@pytest.fixture(params=EXPECTEDBUF2INT)
def expectedBuf2int(request):
return request.param
#===== expectedhex2buf
EXPECTEDHEX2BUF = [
# hex buf
json.dumps(('abcd', [0xab,0xcd])),
json.dumps(('', [])),
json.dumps(('aa', [0xaa])),
]
@pytest.fixture(params=EXPECTEDHEX2BUF)
def expectedhex2buf(request):
return request.param
#===== expectedbyteinverse
EXPECTEDBYTEINVERSE = [
# b b_inverse
json.dumps((0x01,0x80)),
json.dumps((0x02,0x40)),
json.dumps((0x04,0x20)),
json.dumps((0x81,0x81)),
]
@pytest.fixture(params=EXPECTEDBYTEINVERSE)
def expectedbyteinverse(request):
return request.param
#===== expectedformatipv6
EXPECTEDFORMATIPv6 = [
json.dumps(
(
[ # list
0x01,0x23,0x45,0x67,0x89,0xab,0xcd,0xef,
0xfe,0xdc,0xba,0x98,0x76,0x54,0x32,0x10
],
'123:4567:89ab:cdef:fedc:ba98:7654:3210' # string
)
),
json.dumps(
(
[ # list
0x01,0x23,0x45,0x67,0x00,0x00,0xcd,0xef,
0xfe,0xdc,0xba,0x98,0x76,0x54,0x32,0x10
],
'123:4567:0:cdef:fedc:ba98:7654:3210' # string
)
),
json.dumps(
(
[ # list
0x01,0x23,0x45,0x67,0x00,0x00,0x00,0x00,
0xfe,0xdc,0xba,0x98,0x76,0x54,0x32,0x10
],
'123:4567:0:0:fedc:ba98:7654:3210' # string
)
),
]
@pytest.fixture(params=EXPECTEDFORMATIPv6)
def expectedformatipv6(request):
return request.param
#============================ helpers =========================================
#============================ tests ===========================================
def test_buf2int(expectedBuf2int):
(expBuf,expInt) = json.loads(expectedBuf2int)
assert u.buf2int(expBuf)==expInt
def test_hex2buf(expectedhex2buf):
(expHex,expBuf) = json.loads(expectedhex2buf)
expHex = str(expHex)
assert u.hex2buf(expHex)==expBuf
def test_byteinverse(expectedbyteinverse):
(b,b_inverse) = json.loads(expectedbyteinverse)
assert u.byteinverse(b)==b_inverse
assert u.byteinverse(b_inverse)==b
def test_formatIPv6Addr(expectedformatipv6):
(ipv6_list,ipv6_string) = json.loads(expectedformatipv6)
print ipv6_string
assert u.formatIPv6Addr(ipv6_list)==ipv6_string |
'''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''
Cookie Module
module for cookie management in webdriver
Create by Artur Spirin
https://github.com/ArturSpirin/YouTube-WebDriver-Tutorials/blob/master/Cookies.py
'''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''
import pickle
import pprint
import os
PATH = os.getcwd()
def get_file(filename):
return PATH+'\\cookies\\'+filename+'.txt'
def save_cookies(driver, filename):
pickle.dump(driver.get_cookies(), open(get_file(filename), "wb"))
def load_cookies(driver, filename, url=None):
cookies = pickle.load(open(get_file(filename), "rb"))
driver.delete_all_cookies()
# have to be on a page before you can add any cookies, any page - does not matter which
driver.get("https://google.com" if url is None else url)
for cookie in cookies:
if isinstance(cookie.get('expiry'), float):#Checks if the instance expiry a float
cookie['expiry'] = int(cookie['expiry'])# it converts expiry cookie to a int
driver.add_cookie(cookie)
def delete_cookies(driver, domains=None):
if domains is not None:
cookies = driver.get_cookies()
original_len = len(cookies)
for cookie in cookies:
if str(cookie["domain"]) in domains:
cookies.remove(cookie)
if len(cookies) < original_len: # if cookies changed, we will update them
# deleting everything and adding the modified cookie object
driver.delete_all_cookies()
for cookie in cookies:
driver.add_cookie(cookie)
else:
driver.delete_all_cookies() |
#!/usr/bin/env python
import rospy
from geometry_msgs.msg import Twist
import numpy as np
from maze.msg import Maze
import astar
from scipy.spatial.transform import Rotation as R
import sys
from fixed_path_controller import Controller
class DynamicController(Controller):
"""
This controller will be able to handle the changing maze,
with the assuption that the total size of the maze is not changed.
"""
def update(self, maze):
"""
Input:
- maze: the message received from vision
+ transformation
+ grid
+ map_shape
+ turtlebot_pos: in segmentation pixel
+ goal_pos: in segmentation pixel
+ image_shape: segmentation image
Do
- Update instance attributes
+ grid, path: only once
+ state: every message
"""
# only update the state when the message is valid
if self.validate(maze, 100):
# start = self.localize(maze.turtlebot_pos, maze.map_shape, maze.seg_img_shape)
start = self.localize_turtlebot(maze.turtlebot_pos, maze.map_shape, maze.seg_img_shape)
# else:
# print("Invalid maze, state update is estimated")
# self.estimate_state()
# return
self.state_pixel = np.asarray(maze.turtlebot_pos)
# if maze is meaningful, update every attribute
if maze.success:
# update the state
x, y = np.asarray(start)
r = maze.transform.rotation
rotation = R.from_quat([r.x,r.y,r.z,r.w]).as_euler('xyz', degrees=False)
angle = rotation[2]
self.state = (x, y, angle)
self.goal_pixel = np.asarray(maze.goal_pos)
goal = self.localize(maze.goal_pos, maze.map_shape, maze.seg_img_shape)
self.grid = np.asarray(maze.occupancy_grid).reshape(maze.map_shape[0], maze.map_shape[1])
planner = astar.Planner(self.grid, start, goal)
self.path = np.asarray(planner.findPath())
if len(self.path) >= 2:
self.end = self.path[1]
self.scale = np.asarray(maze.seg_img_shape) / maze.map_shape
self.dist_tolerance = np.min(self.scale) / self.dist_factor
if tuple(start) in self.path:
self.is_turning = True
else:
self.is_turning = False
# print('========================================')
# print('End: ', self.end)
# print('State: ', self.state)
# print('State pixel: ', self.state_pixel)
# print('End pixel: ', np.flip((self.end + [0.5, 0.5])* self.scale).astype(int))
# print('Goal pixel: ', self.goal_pixel)
# print('Grid: ', self.grid)
# print('Path: ', self.path)
# print('current motion: from ', self.state, ' to ', self.end)
# print('Scale: ', self.scale)
def localize_turtlebot(self, pos, map_shape, img_shape):
"""
localize the position of turtlebot
only use the grid position if the distance from the center of the turtlebot to the center of the rid
is within the distance tolerance
"""
temp_pos = (np.flip(pos).astype(float) / img_shape) * map_shape
if self.end is None or self.distance_to_end(pos) < self.dist_tolerance:
return temp_pos.astype(int)
else:
return np.array([self.state[0], self.state[1]])
def distance_to_end(self, pos):
"""
compute the distance from the pos to self.end
"""
end_pixel = np.flip((self.end + [0.5, 0.5])* self.scale).astype(int)
return np.linalg.norm(end_pixel - pos)
def control(self):
"""
The main control function
"""
# self.message_rate = 10
self.Kz /= 2
self.Kl /= 2
# self.angle_tolerance = 0.1
self.dist_factor = 4
self.goal_tolerance = 70
rospy.init_node("controller", anonymous=True)
r = rospy.Rate(self.message_rate)
# self.linear_factor = 0.01
while not rospy.is_shutdown() and not self.is_goal(self.goal_tolerance):
rospy.Subscriber('/maze/grid', Maze, self.update)
desired = self.compute_desired_angle()
self.linear_factor = 0.01
if desired is not None:
current_path = self.path
# desired angle is not changed until turtlebot reaches the next grid position and path changes
while not rospy.is_shutdown() and not self.is_goal(self.goal_tolerance) and np.array_equal(current_path, self.path):
angle_diff = desired - self.state[2]
dist_diff = self.distance()
# print('Path length', path_length)
print("path: ", self.path)
# keep the angle_diff in the range of -pi to pi
if angle_diff > np.pi:
angle_diff -= 2 * np.pi
if angle_diff < -np.pi:
angle_diff += 2 * np.pi
print("Angle difference: ", angle_diff)
print('current motion: from ', self.state, ' to ', self.end)
cmd = Twist()
# pure rotation at turning points
# if abs(angle_diff) > self.angle_tolerance and hasattr(self, 'is_turning') and self.is_turning:
if abs(angle_diff) > self.angle_tolerance:
self.linear = 0.0
cmd.angular.z = self.angular = self.Kz * angle_diff
# if hasattr(self, 'is_turning') and self.is_turning:
# self.linear_factor = 0.01
# rotation and translation
elif dist_diff > self.dist_tolerance:
cmd.linear.x = self.linear = self.Kl * dist_diff * self.linear_factor
cmd.angular.z = angle_diff
self.angular = 0.0
if self.linear_factor < 1:
self.linear_factor += 0.01
# elif len(self.path) < path_length:
# print("should start turning now")
# break
self.pub.publish(cmd)
# print(cmd)
r.sleep()
# keep moving turtlebot using the previous twist for certain times
if self.is_goal(self.goal_tolerance):
for _ in range(60):
self.pub.publish(cmd)
r.sleep()
print('Goal is reached!')
break
r.sleep()
if __name__ == '__main__':
controller = DynamicController(sys.argv[1])
controller.control() |
import logging
import os
import unittest
from src.dasicon_api import DaisyconApi
class TestDaisyconApi(unittest.TestCase):
def setUp(self):
# set up logging
logging.root.handlers = []
logging.basicConfig(format='%(asctime)s: %(levelname)s: %(message)s', level=logging.INFO)
logging.info('Setiting up DaisyconApi')
username = os.environ.get('DASIYCON_USERNAME')
password = os.environ.get('DASIYCON_PASSWORD')
publisher_id = os.environ.get('DASIYCON_PUBLISHER_ID')
self.api = DaisyconApi(username, password, publisher_id)
def test_get_publisher_programs(self):
pages_data, value_count = self.api.get_publisher_programs(page=1, per_page=1)
self.assertGreater(len(pages_data), 0)
self.assertGreater(value_count, 1)
|
#!/usr/bin/env python3
# pip3 install pymupdf
import fitz
import sys
import re
import os
def pdf2pic(pdf_path, output_path):
# 使用正则表达式来查找图片
checkXO = r"/Type(?= */XObject)"
checkIM = r"/Subtype(?= */Image)"
# 打开pdf
doc = fitz.open(pdf_path)
# 图片计数
imgcount = 0
lenXREF = doc._getXrefLength()
# 打印PDF的信息
print("文件名:{}, 页数: {}, 对象: {}".format(pdf_path, len(doc), lenXREF - 1))
# 遍历每一个对象
for i in range(1, lenXREF):
# 定义对象字符串
text = doc._getXrefString(i)
isXObject = re.search(checkXO, text)
# 使用正则表达式查看是否是图片
isImage = re.search(checkIM, text)
# 如果不是对象也不是图片,则continue
if not isXObject:
continue
if not isImage:
continue
imgcount += 1
# 根据索引生成图像
pix = fitz.Pixmap(doc, i)
# 根据pdf的路径生成图片的名称
new_name = "%02d.png" %imgcount
# 如果pix.n<5,可以直接存为PNG
if pix.n < 5:
pix.writePNG(os.path.join(output_path, new_name))
# 否则先转换CMYK
else:
pix0 = fitz.Pixmap(fitz.csRGB, pix)
pix0.writePNG(os.path.join(output_path, new_name))
pix0 = None
# 释放资源
pix = None
print("提取了{}张图片".format(imgcount))
def pdf2txt(pdf_path,output_path):
'''
从PDF中获取文本
'''
doc = fitz.open(pdf_path)
output_file = output_path + os.sep + 'output.txt'
print(output_file)
out = open(output_file,'w')
for d in doc:
out.write(d.getText("text")) # 可选 html / xml /json 等
out.close()
if __name__=='__main__':
# pdf路径
if len(sys.argv) == 1:
exit(-1)
pdf_path = sys.argv[1]
output_path = os.getcwd() + os.sep + os.path.basename(pdf_path)[:-4]
# 不存在则创建
if not os.path.exists(output_path):
os.mkdir(output_path)
pdf2txt(pdf_path,output_path)
pdf2pic(pdf_path,output_path) |
"""
Sequence distance metrics (:mod:`skbio.sequence.distance`)
==========================================================
.. currentmodule:: skbio.sequence.distance
This module contains functions for computing distances between scikit-bio
``Sequence`` objects. These functions can be used directly or supplied to other
parts of the scikit-bio API that accept a sequence distance metric as input,
such as :meth:`skbio.sequence.Sequence.distance` and
:meth:`skbio.stats.distance.DistanceMatrix.from_iterable`.
Functions
---------
.. autosummary::
:toctree:
hamming
kmer_distance
"""
# ----------------------------------------------------------------------------
# Copyright (c) 2013--, scikit-bio development team.
#
# Distributed under the terms of the Modified BSD License.
#
# The full license is in the file COPYING.txt, distributed with this software.
# ----------------------------------------------------------------------------
import numpy as np
import scipy.spatial.distance
import skbio
from skbio.util._decorator import experimental
@experimental(as_of='0.4.2')
def hamming(seq1, seq2):
"""Compute Hamming distance between two sequences.
The Hamming distance between two equal-length sequences is the proportion
of differing characters.
Parameters
----------
seq1, seq2 : Sequence
Sequences to compute Hamming distance between.
Returns
-------
float
Hamming distance between `seq1` and `seq2`.
Raises
------
TypeError
If `seq1` and `seq2` are not ``Sequence`` instances.
TypeError
If `seq1` and `seq2` are not the same type.
ValueError
If `seq1` and `seq2` are not the same length.
See Also
--------
scipy.spatial.distance.hamming
Notes
-----
``np.nan`` will be returned if the sequences do not contain any characters.
This function does not make assumptions about the sequence alphabet in use.
Each sequence object's underlying sequence of characters are used to
compute Hamming distance. Characters that may be considered equivalent in
certain contexts (e.g., `-` and `.` as gap characters) are treated as
distinct characters when computing Hamming distance.
Examples
--------
>>> from skbio import Sequence
>>> from skbio.sequence.distance import hamming
>>> seq1 = Sequence('AGGGTA')
>>> seq2 = Sequence('CGTTTA')
>>> hamming(seq1, seq2)
0.5
"""
_check_seqs(seq1, seq2)
# Hamming requires equal length sequences. We are checking this here
# because the error you would get otherwise is cryptic.
if len(seq1) != len(seq2):
raise ValueError(
"Hamming distance can only be computed between sequences of equal "
"length (%d != %d)" % (len(seq1), len(seq2)))
# scipy throws a RuntimeWarning when computing Hamming distance on length 0
# input.
if not seq1:
distance = np.nan
else:
distance = scipy.spatial.distance.hamming(seq1.values, seq2.values)
return float(distance)
@experimental(as_of='0.5.0')
def kmer_distance(seq1, seq2, k, overlap=True):
"""Compute the kmer distance between a pair of sequences
The kmer distance between two sequences is the fraction of kmers that are
unique to either sequence.
Parameters
----------
seq1, seq2 : Sequence
Sequences to compute kmer distance between.
k : int
The kmer length.
overlap : bool, optional
Defines whether the kmers should be overlapping or not.
Returns
-------
float
kmer distance between `seq1` and `seq2`.
Raises
------
ValueError
If `k` is less than 1.
TypeError
If `seq1` and `seq2` are not ``Sequence`` instances.
TypeError
If `seq1` and `seq2` are not the same type.
Notes
-----
kmer counts are not incorporated in this distance metric.
``np.nan`` will be returned if there are no kmers defined for the
sequences.
Examples
--------
>>> from skbio import Sequence
>>> seq1 = Sequence('ATCGGCGAT')
>>> seq2 = Sequence('GCAGATGTG')
>>> kmer_distance(seq1, seq2, 3) # doctest: +ELLIPSIS
0.9230769230...
"""
_check_seqs(seq1, seq2)
seq1_kmers = set(map(str, seq1.iter_kmers(k, overlap=overlap)))
seq2_kmers = set(map(str, seq2.iter_kmers(k, overlap=overlap)))
all_kmers = seq1_kmers | seq2_kmers
if not all_kmers:
return np.nan
shared_kmers = seq1_kmers & seq2_kmers
number_unique = len(all_kmers) - len(shared_kmers)
fraction_unique = number_unique / len(all_kmers)
return fraction_unique
def _check_seqs(seq1, seq2):
# Asserts both sequences are skbio.sequence objects
for seq in seq1, seq2:
if not isinstance(seq, skbio.Sequence):
raise TypeError(
"`seq1` and `seq2` must be Sequence instances, not %r"
% type(seq).__name__)
# Asserts sequences have the same type
if type(seq1) is not type(seq2):
raise TypeError(
"Sequences must have matching type. Type %r does not match type %r"
% (type(seq1).__name__, type(seq2).__name__))
|
# Generated by Django 2.2.13 on 2020-08-26 17:35
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('pipeline', '0061_regionaldistrict_oc_m_yr'),
]
operations = [
migrations.RemoveField(
model_name='censussubdivision',
name='eng_fr_not_known',
),
migrations.AddField(
model_name='censussubdivision',
name='fr_known',
field=models.IntegerField(null=True),
),
]
|
import argparse
import logging as log
import os
import time
import shutil
import sys
import datetime
import numpy as np
from math import ceil, floor
import torch
import torch.nn as nn
import torch.distributed as dist
import torch.optim as optim
from torch.multiprocessing import Process
from torch.autograd import Variable
from tensorboardX import SummaryWriter
import models as models
from dataloading.dataloaders import get_loader
model_names = sorted(name for name in models.__dict__
if name.islower() and not name.startswith("__")
and callable(models.__dict__[name]))
parser = argparse.ArgumentParser()
## Parameters for NVVL loader (filepaths, augmentation settings)
parser.add_argument('--root', type=str, default='/root/3DCNN/',
help='input data root folder')
parser.add_argument('--output', type=str, default='',
help='output data root folder')
parser.add_argument('--label_json', type=str, default='labels_2hourlaserbinary.json',
help='JSON label filename')
parser.add_argument('--frames', type=int, default = 16,
help='num frames in input sequence (default: 16)')
parser.add_argument('--is_cropped', action='store_true',
help='crop input frames?')
parser.add_argument('--crop_size', type=int, nargs='+', default=[112, 112],
help='[height, width] for input crop (default: [112, 112])')
parser.add_argument('--shuffle', action="store_true",
help='Shuffle batches?')
parser.add_argument('--normalized', action="store_true",
help='Normalize images from [0;255] to [0;1]?')
parser.add_argument('--random_flip', action="store_true",
help='flip the image horizontally before cropping?')
parser.add_argument('--color_space', type = str, default = "RGB",
help='Color space to use. "RGB" and "YCbCr" are available. (default: "RGB")')
parser.add_argument('--dimension_order', type = str, default = "cfhw",
help='Axis order of the channels, frames, height and width. (default: "cfhw")')
parser.add_argument('--stride', type = int, default = None,
help='Frame stride when sampling from videos. (default: None)')
parser.add_argument('--test', action='store_true',
help='Whether to test a network, and not train it')
parser.add_argument('--gpu', default=None, type=int,
help='GPU id to use. If set, only 1 GPU is used')
## Hyperparameters
parser.add_argument('--batchsize', type=int, default=10,
help='Training batch size (default: 10)')
parser.add_argument('--val_batchsize', type=int, default=4,
help='validation/test batch size (default: 4)')
parser.add_argument('--lr', default=0.1, type=float,
metavar='LR', help='initial learning rate (default 0.1)')
parser.add_argument('--momentum', default=0.9, type=float, metavar='M',
help='momentum (default: 0.9)')
parser.add_argument('--nesterov', action="store_true",
help='use Nesterov Accelerated Gradient')
parser.add_argument('--weight-decay', default=1e-4, type=float,
metavar='W', help='weight decay (default: 1e-4)')
parser.add_argument('--step_size', default=5, type=int,
help='Step size for lr schedueler (default: 5)')
parser.add_argument('--gamma', default=0.1, type=float,
help='Gamma for lr schedueler (default: 0.1)')
parser.add_argument('--epochs', default=90, type=int, metavar='N',
help='number of total epochs to run. (default: 90)')
## System settings
parser.add_argument('--seed', type=int, default=1, metavar='S',
help='random seed')
parser.add_argument('--start-epoch', default=0, type=int, metavar='N',
help='manual epoch number (useful on restarts)')
parser.add_argument('--print-freq', default=10, type=int,
metavar='N', help='print frequency (default: 10)')
parser.add_argument('--resume', default='', type=str, metavar='PATH',
help='path to latest checkpoint (default: none)')
## Network parametes
parser.add_argument('--arch', metavar='ARCH', default='c3d',
choices=model_names,
help='model architecture: ' +
' | '.join(model_names) +
' (default: c3d)')
parser.add_argument("--num_classes", default=2, type=int,
help="Number of neurons in output layer (if 1, sigmoid is last activation, otherwise softmax)")
parser.add_argument("--FCN", action="store_true",
help="Whether to use a dense validation/test approach. If not set center crop approach will be used")
def main(args):
systemInfo()
dirs = os.listdir(args.root)
if args.test:
assert "tst" in dirs, "A 'tst' directory is not in {}".format(args.root)
else:
assert "train" in dirs, "A 'train' directory is not in {}".format(args.root)
assert "val" in dirs, "A 'val' directory is not in {}".format(args.root)
assert "labels" in dirs, "A 'labels' directory is not in {}".format(args.root)
del dirs
if args.is_cropped:
assert args.crop_size[0] == args.crop_size[1], "Crop size is assumed to be square, but you supplied {}".format(args.crop_size)
args.sample_size = args.crop_size[0]
args.sample_duration = args.frames
if args.output == "":
now = datetime.datetime.now()
args.output = os.path.join("./results", now.strftime("%Y-%m-%d_%H:%M:%S"))
del now
if not os.path.exists(args.output):
os.mkdir(args.output)
os.mkdir(os.path.join(args.output, "weights"))
print("Output path: {}".format(args.output))
with open(os.path.join(args.output, "Settings.txt"), "w") as outfile:
outfile.write(str(vars(args)))
print("Setting up Tensorboard")
writer = SummaryWriter()
writer.add_text('config', str(vars(args)))
print("Tensorboard set up")
print("Setting Pytorch cuda settings")
torch.cuda.set_device(0)
torch.manual_seed(args.seed)
torch.cuda.manual_seed(args.seed)
torch.backends.cudnn.benchmark = True
print("Set Pytorch cuda settings\n")
print("Creating model '{}'".format(args.arch))
model = load_model(args)
print("Model created\n")
if args.gpu is not None:
print("Using GPU {}\n".format(args.gpu))
model = model.cuda(args.gpu)
elif torch.cuda.device_count() == 1:
print("Using a single GPU\n")
model = model.cuda()
else:
print("Using {} GPUs\n".format(torch.cuda.device_count()))
model = nn.DataParallel(model).cuda()
print("Setting up loss and optimizer")
if args.num_classes == 1:
criterion = nn.BCELoss().cuda(args.gpu)
else:
criterion = nn.NLLLoss().cuda(args.gpu)
optimizer = optim.SGD(model.parameters(), args.lr,
momentum=args.momentum,
nesterov = args.nesterov,
weight_decay=args.weight_decay)
scheduler = optim.lr_scheduler.StepLR(optimizer, args.step_size, args.gamma)
print("Optimizer and loss function setup\n")
best_accV = -1
if args.resume:
if os.path.isfile(args.resume):
print("=> loading checkpoint '{}'".format(args.resume))
checkpoint = torch.load(args.resume)
print("Loading checkpoint from epoch {} with val accuracy of {}".format(checkpoint['epoch'], checkpoint['best_accV']))
args.start_epoch = checkpoint['epoch']
best_accV = checkpoint['best_accV']
model.load_state_dict(checkpoint['state_dict'])
optimizer.load_state_dict(checkpoint['optimizer'])
print("=> loaded checkpoint '{}' (epoch {})\n"
.format(args.resume, checkpoint['epoch']))
else:
print("=> no checkpoint found at '{}'\n".format(args.resume))
if args.test:
print("Initializing testing dataloaders")
test_loader, test_batches, sampler = get_loader(args)
tst_samples_per_epoch = test_batches * args.test_batchsize
print("Test Batch size: {}\nTest batches: {}\nTest videos: {}".format(args.test_batchsize, test_batches, len(test_loader.files)))
print('Dataloaders initialized\n')
# evaluate on validation set
timeT = test(test_loader, model, args)
else:
print("Initializing training dataloaders")
train_loader, train_batches, val_loader, val_batches, sampler = get_loader(args)
trn_samples_per_epoch = train_batches * args.batchsize
val_samples_per_epoch = val_batches * args.val_batchsize
print(args.root)
print("Trn Batch size: {}\nTrn batches: {}\nTrn videos: {}\nVal Batch size: {}\nVal batches: {}\nVal videos: {}\nTrn samples per epoch: {}\nVals samples per epoch: {}".format(args.batchsize, train_batches, len(train_loader.files), args.val_batchsize, val_batches,len(val_loader.files), trn_samples_per_epoch, val_samples_per_epoch))
print('Dataloaders initialized\n')
for epoch in range(args.start_epoch, args.epochs):
_start = time.time()
scheduler.step()
writer.add_scalar('Learning Rate', optimizer.param_groups[0]["lr"], epoch)
# train for one epoch
lossT, accT, timeT = train(train_loader, model, criterion, optimizer, epoch, writer, args)
writer.add_scalar('Loss/Training-Avg', lossT, epoch)
writer.add_scalar('Accuracy/Training', accT, epoch)
writer.add_scalar('Time/Training-Avg', timeT, epoch)
print("Epoch {} training completed: {}".format(epoch, datetime.datetime.now().isoformat()))
print("Train time {}".format(timeT))
time.sleep(1)
# evaluate on validation set
lossV, accV, timeV = validate(val_loader, model, criterion, args, epoch)
writer.add_scalar('Loss/Validation-Avg', lossV, epoch)
writer.add_scalar('Accuracy/Validation', accV, epoch)
writer.add_scalar('Time/Validation-Avg', timeV, epoch)
print("Epoch {} validation completed: {}".format(epoch, datetime.datetime.now().isoformat()))
print("Val time {}".format(timeV))
# remember best acc@1 and save checkpoint
is_best = accV > best_accV
best_accV = max(accV, best_accV)
save_checkpoint({
'epoch': epoch + 1,
'arch': args.arch,
'state_dict': model.state_dict(),
'best_accV': best_accV,
'accV' : accV,
'accT' : accT,
'optimizer': optimizer.state_dict(),
}, is_best, filename='checkpoint_{}.pth.tar'.format(epoch), dir=os.path.join(args.output, "weights"))
_end = time.time()
print("Epoch {}\n\tTime: {} seconds\n\tTrain Loss: {}\n\tTrain Accuracy: {}\n\tValidation Loss: {}\n\tValidation Accuracy: {}\n".format(epoch, _end-_start, lossT, accT, lossV, accV))
print("Train time {}\nVal time {}".format(timeT, timeV))
def train(train_loader, model, criterion, optimizer, epoch, writer, args):
"""
Takes the network and hyperparameters and trains the network through an iteration of the train data
Input:
train_loader: Dataloader for training data
model: CNN model
criterion: Loss function
optimizer: Model optimizer function
epoch: The current epoch
writer: Tensorboard write
args: General script arguments
Output:
losses.avg: Average loss value
top1.avg: Average top-1 accuracy
batch_time.avg: Average processign time per batch in seconds
"""
batch_time = AverageMeter()
data_time = AverageMeter()
losses = AverageMeter()
top1 = AverageMeter()
# switch to train mode
model.train()
n_batches = len(train_loader)
end = time.time()
for i, inputs in enumerate(train_loader):
target = [x[0] for x in inputs['labels']]
input = inputs['input'] # Output shape [batchsize, channels, numFrames, height, width]
if args.num_classes == 1:
target = torch.FloatTensor(target).view(-1, 1)
else:
target = torch.LongTensor(target).view(-1,)
# measure data loading time
data_time.update(time.time() - end)
# zero the parameter gradients
optimizer.zero_grad()
# compute output
output = model(Variable(input))
loss = criterion(output, Variable(target).cuda())
# compute gradient and do optimizer step
loss.backward()
optimizer.step()
# measure accuracy and record loss
losses.update(loss.item(), input.size(0))
acc = accuracy(output, Variable(target).cuda())
top1.update(acc, input.size(0))
# measure elapsed time
batch_time.update(time.time() - end)
end = time.time()
writer.add_scalar('Loss/Training', loss.item(), epoch*n_batches+i)
writer.add_scalar('Time/Training', batch_time.val, epoch*n_batches+i)
if i % args.print_freq == 0:
print('Epoch: [{0}][{1}/{2}]\t'
'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\t'
'Data {data_time.val:.3f} ({data_time.avg:.3f})\t'
'Loss {loss.val:.4f} ({loss.avg:.4f})\t'
'Acc@1 {top1.val:.3f} ({top1.avg:.3f})'.format(
epoch, i, len(train_loader), batch_time=batch_time,
data_time=data_time, loss=losses, top1=top1))
return losses.avg, top1.avg, batch_time.avg
def validate(val_loader, model, criterion, args, epoch):
"""
Takes the network and hyperparameters and validates the network through an iteration of the validation data
The predictions are saved in a csv file in a fodler 'val_predictions'
Input:
val_loader: Dataloader for validation data
model: CNN model
criterion: Loss function
args: General script arguments
epoch: The current epoch
Output:
losses.avg: Average loss value
top1.avg: Average top-1 accuracy
batch_time.avg: Average processign time per batch in seconds
"""
batch_time = AverageMeter()
losses = AverageMeter()
top1 = AverageMeter()
# switch to evaluate mode
model.eval()
with torch.no_grad():
end = time.time()
for i, inputs in enumerate(val_loader):
target_full = inputs['labels']
input = inputs['input'] # Output shape [batchsize, channels, numFrames, height, width]
batchsize, _, _, height, width = input.shape
input = input[:,:,:,:,:width-2]
input = Variable(input)
target = [x[0] for x in target_full]
if args.num_classes == 1:
target = torch.FloatTensor(target).view(-1, 1)
else:
target = torch.LongTensor(target).view(-1,)
# Compute Output
if args.FCN:
#Fully Convolutional approach
output = model(input)
else:
#Center crop approach
c_w = width//2
c_h = height//2
h_w = args.crop_size[0]//2
h_h = args.crop_size[1]//2
output = model(input[:,:,:,c_h-h_h:c_h+h_h,c_w-h_w:c_w+h_w])
loss = criterion(output, Variable(target).cuda())
# measure accuracy and record loss
losses.update(loss.item(), batchsize)
acc = accuracy(output, Variable(target).cuda())
top1.update(acc, batchsize)
output = output.data
pred = np.argmax(output,1)
with open("./val_predictions/predictions_{}.csv".format(epoch), "a") as output_file:
for j in range(len(target)):
output_file.write("{};{};{};{}\n".format(pred[j],output[j][pred[j]], target_full[j][1], target_full[j][2]))
# measure elapsed time
batch_time.update(time.time() - end)
end = time.time()
if i % args.print_freq == 0:
print('Validation: [{0}/{1}]\t'
'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\t'
'Loss {loss.val:.4f} ({loss.avg:.4f})\t'
'Acc@1 {top1.val:.3f} ({top1.avg:.3f})'.format(
i, len(val_loader), batch_time=batch_time, loss=losses,
top1=top1))
return losses.avg, top1.avg, batch_time.avg
def test(test_loader, model, args):
"""
Takes the network and hyperparameters and tests the network on the test data
The predictions are saved in a csv file in a fodler 'test_predictions'
Input:
test_loader: Dataloader for testing data
model: CNN model
args: General script arguments
Output:
batch_time.avg: Average processign time per batch in seconds
"""
batch_time = AverageMeter()
# switch to evaluate mode
model.eval()
with torch.no_grad():
end = time.time()
for i, inputs in enumerate(test_loader):
target = inputs['labels']
input = inputs['input'] # Output shape [batchsize, channels, numFrames, height, width]
batchsize, _, _, height, width = input.shape
input = input[:,:,:,:,:width-2]
input = Variable(input)
# Compute Output
if args.FCN:
#Fully Convolutional approach
output = model(input)
else:
#Center crop approach
c_w = width//2
c_h = height//2
h_w = args.crop_size[0]//2
h_h = args.crop_size[1]//2
output = model(input[:,:,:,c_h-h_h:c_h+h_h,c_w-h_w:c_w+h_w])
output = output.data
pred = np.argmax(output,1)
with open("./test_predictions/predictions.csv", "a") as output_file:
for j in range(len(target)):
output_file.write("{};{};{};{}\n".format(pred[j],output[j][pred[j]], target[j][0], target[j][1]))
# measure elapsed time
batch_time.update(time.time() - end)
end = time.time()
if i % args.print_freq == 0:
print('Test: [{0}/{1}]\t'
'Time {batch_time.val:.3f} ({batch_time.avg:.3f})'.format(
i, len(test_loader), batch_time=batch_time))
return batch_time.avg
def save_checkpoint(state, is_best, filename='checkpoint.pth.tar', dir="./weights"):
"""
Saves the current state of the network.
Input:
state: Dict of the model and other infromation which should be saved
is_best: Boolean indicating if this is the best performance so far
filename: Filename for the output pth.tar file
dir: Path to the output directory
"""
filename = os.path.join(dir, filename)
torch.save(state, filename)
if is_best:
shutil.copyfile(filename, os.path.join(dir, 'model_best.pth.tar'))
class AverageMeter(object):
"""Computes and stores the average and current value"""
def __init__(self):
self.reset()
def reset(self):
self.val = 0
self.avg = 0
self.sum = 0
self.count = 0
def update(self, val, n=1):
self.val = val
self.sum += val * n
self.count += n
self.avg = self.sum / self.count
def accuracy(output, target):
"""Computes the accuracy"""
target = target.type(torch.cuda.LongTensor).view(-1,)
with torch.no_grad():
_, predicted = torch.max(output, 1)
total = target.size(0)
correct = predicted.eq(target).sum().item()
res = correct / total
return res
def load_model(args):
if "c3d" in args.arch:
model = models.__dict__[args.arch](num_classes=args.num_classes)
else:
raise ValueError("Supplied architecture {} is not supported".format(args.arch))
print(str(model) + "\n")
print("Total parameter count: {}".format(sum(p.numel() for p in model.parameters())))
print("Trainable parameter count: {}".format(sum(p.numel() for p in model.parameters() if p.requires_grad)))
return model
def systemInfo():
from subprocess import call
print('__Python VERSION: {}'.format(sys.version))
print('__pyTorch VERSION: {}'.format(torch.__version__))
print('__CUDA VERSION:')
call(["nvcc", "--version"])
print('__CUDNN VERSION: {}'.format(torch.backends.cudnn.version()))
print('__Number CUDA Devices: {}'.format(torch.cuda.device_count()))
print('__Devices')
call(["nvidia-smi", "--format=csv", "--query-gpu=index,name,driver_version,memory.total,memory.used,memory.free"])
print('Active CUDA Device: GPU {}'.format(torch.cuda.current_device()))
print('Available devices: {}'.format(torch.cuda.device_count()))
print('Current cuda device: {}'.format(torch.cuda.current_device()))
print()
if __name__ == "__main__":
main(parser.parse_args())
|
import string
from argparse import ArgumentParser
from pathlib import Path
import pytest
from espnet2.bin.tts_inference import Text2Speech, get_parser, main
from espnet2.tasks.tts import TTSTask
def test_get_parser():
assert isinstance(get_parser(), ArgumentParser)
def test_main():
with pytest.raises(SystemExit):
main()
@pytest.fixture()
def token_list(tmp_path: Path):
with (tmp_path / "tokens.txt").open("w") as f:
f.write("<blank>\n")
for c in string.ascii_letters:
f.write(f"{c}\n")
f.write("<unk>\n")
f.write("<sos/eos>\n")
return tmp_path / "tokens.txt"
@pytest.fixture()
def config_file(tmp_path: Path, token_list):
# Write default configuration file
TTSTask.main(
cmd=[
"--dry_run",
"true",
"--output_dir",
str(tmp_path),
"--token_list",
str(token_list),
"--token_type",
"char",
"--cleaner",
"none",
"--g2p",
"none",
"--normalize",
"none",
]
)
return tmp_path / "config.yaml"
@pytest.mark.execution_timeout(5)
def test_Text2Speech(config_file):
text2speech = Text2Speech(train_config=config_file)
text = "aiueo"
text2speech(text)
|
# coding=utf-8
try:
from src.testcase.GN_APP.case.GN_APP_REGISTER.GN_APP_REGISTER_001 import *
from src.testcase.GN_APP.case.GN_APP_REGISTER.GN_APP_REGISTER_002 import *
from src.testcase.GN_APP.case.GN_APP_REGISTER.GN_APP_REGISTER_003 import *
from src.testcase.GN_APP.case.GN_APP_REGISTER.GN_APP_REGISTER_004 import *
from src.testcase.GN_APP.case.GN_APP_REGISTER.GN_APP_REGISTER_005 import *
from src.testcase.GN_APP.case.GN_APP_REGISTER.GN_APP_REGISTER_006 import *
from src.testcase.GN_APP.case.GN_APP_REGISTER.GN_APP_REGISTER_007 import *
from src.testcase.GN_APP.case.GN_APP_REGISTER.GN_APP_REGISTER_008 import *
from src.testcase.GN_APP.case.GN_APP_REGISTER.GN_APP_REGISTER_009 import *
from src.testcase.GN_APP.case.GN_APP_REGISTER.GN_APP_REGISTER_010 import *
from src.testcase.GN_APP.case.GN_APP_REGISTER.GN_APP_REGISTER_011 import *
from src.testcase.GN_APP.case.GN_APP_REGISTER.GN_APP_REGISTER_012 import *
from src.testcase.GN_APP.case.GN_APP_REGISTER.GN_APP_REGISTER_013 import *
from src.testcase.GN_APP.case.GN_APP_REGISTER.GN_APP_REGISTER_014 import *
from src.testcase.GN_APP.case.GN_APP_REGISTER.GN_APP_REGISTER_015 import *
from src.testcase.GN_APP.case.GN_APP_REGISTER.GN_APP_REGISTER_016 import *
from src.testcase.GN_APP.case.GN_APP_REGISTER.GN_APP_REGISTER_017 import *
from src.testcase.GN_APP.case.GN_APP_REGISTER.GN_APP_REGISTER_018 import *
except ImportError as e:
print(e)
|
# -*- coding: utf-8 -*-
from .context import ENDPOINT
from .helpers import Request
# api = OKExAPI(apikey, secret)
# api.post('future_userinfo', params=None)
# api.get('userinfo', params=None)
class OKExAPI(object):
"""
基础类
"""
def __init__(self, apikey, secret):
"""
Constructor for class of OKExBase.
:param apikey: String of API KEY
:param secret: String of SECRET KEY
:return: None
"""
self._request = Request(apikey=apikey, secret=secret)
def post(self, endpoint=None, *args, **kwargs):
try:
endpoint = '/api/v1/{}.do'.format(endpoint)
res = self._request.post(endpoint, kwargs)
return res
except Exception as e:
raise e
def get(self, endpoint=None, *args, **kwargs):
try:
endpoint = '/api/v1/{}.do'.format(endpoint)
res = self._request.get(endpoint, kwargs)
return res
except Exception as e:
raise e
def call(self, endpoint=None, method='get', *args, **kwargs):
try:
endpoint = '/api/v1/{}.do'.format(endpoint)
if method == 'post':
res = self._request.post(endpoint, kwargs, True)
else:
res = self._request.get(endpoint, kwargs)
return res
except Exception as e:
raise e
|
from .book_io import *
from .character_loader import *
from .constants import *
from .parser import *
|
class Solution:
def isSubsequence(self, s: str, t: str) -> bool:
count=0
if s=="" :
return True
if t=="":
return False
for i in range (0,len(t)):
if count<len(s) and s[count]==t[i]:
count+=1
if count<len(s):
return False
return True
|
def solution(n, k, l):
l = sorted(l)
day = 0
capacity = 0
consume = 0
for a in l:
if a > day:
consume += 1
capacity += 1
if capacity >= k:
capacity = 0
day += 1
return consume
numOfTests = int(input())
for i in range(numOfTests):
N, K = tuple(map(lambda x: int(x), input().split()))
data = map(lambda x: int(x), input().split())
answer = solution(N, K, data)
print(f'Case #{i+1}: {answer}\n')
|
"""Test suite for dataset loading code."""
from os import path
import pytest
import numpy as np
import datasets
LSP_PATH = "../datasets/lsp/lsp_dataset.zip"
LSPET_PATH = "../datasets/lspet/lspet_dataset.zip"
@pytest.mark.skipif(not path.exists(LSP_PATH), reason="Need LSP .zip")
def test_lsp():
lsp = datasets.LSP(LSP_PATH)
joints = lsp.joints.locations
assert joints.shape == (2000, 14, 3)
# Should load im0042.jpg (i.e. image 41 + 1)
img_42 = lsp.load_image(41)
# It's a 134 (width) * 201 (height) image, but the image is row-major
assert img_42.shape == (201, 134, 3)
# Just skip this because it's slow. It doesn't run into memory issues,
# though.
# all_images = lsp.load_all_images()
# assert len(all_images) == len(joints)
# assert all_images[41].shape == img_42.shape
@pytest.mark.skipif(not path.exists(LSPET_PATH), reason="Need LSPET .zip")
def test_lspet():
# As above, but for the larger LSPET dataset
lsp = datasets.LSPET(LSPET_PATH)
joints = lsp.joints.locations
assert joints.shape == (10000, 14, 3)
img_412 = lsp.load_image(411)
# It's 245 (width) * 371 (height) but, again, the matrix is row-major
assert img_412.shape == (371, 245, 3)
# Commented out due to memory issues :P
# all_images = lsp.load_all_images()
# assert len(all_images) == len(joints)
# assert all_images[411] == img_412
def test_split_lists():
splitted = datasets.split_items(range(113), 8)
assert sum(map(len, splitted)) == 113
assert set(map(len, splitted)) == {14, 15}
assert set(x for l in splitted for x in l) == set(range(113))
splitted = datasets.split_items(range(12), 3)
assert sum(map(len, splitted)) == 12
assert set(map(len, splitted)) == {4}
assert set(x for l in splitted for x in l) == set(range(12))
@pytest.mark.skipif(not path.exists(LSP_PATH), reason="Need LSP .zip")
def test_split_dataset():
lsp = datasets.LSP(LSP_PATH)
train, validate, test = lsp.split(3)
assert np.any(train.joints.locations != validate.joints.locations)
assert np.any(train.joints.locations != test.joints.locations)
assert np.any(validate.joints.locations != test.joints.locations)
for d in train, validate, test:
num_ids = len(d.image_ids)
assert len(d.joints.locations) == num_ids
# 3 * 666.666... = 2000
assert num_ids == 666 or num_ids == 667
|
# /usr/bin/env python3.5
# -*- mode: python -*-
# =============================================================================
# @@-COPYRIGHT-START-@@
#
# Copyright (c) 2017-2021, Qualcomm Innovation Center, Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its contributors
# may be used to endorse or promote products derived from this software
# without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
#
# SPDX-License-Identifier: BSD-3-Clause
#
# @@-COPYRIGHT-END-@@
# =============================================================================
""" Code examples for visualization APIs """
# Visualization imports
from decimal import Decimal
import torch
from torchvision import models
import aimet_common.defs
import aimet_torch.defs
import aimet_torch.utils
from aimet_common.utils import start_bokeh_server_session
from aimet_torch.compress import ModelCompressor
from aimet_torch.visualize_serialized_data import VisualizeCompression
# End of import statements
def model_compression_with_visualization(eval_func):
"""
Code example for compressing a model with a visualization url provided.
"""
visualization_url, process = start_bokeh_server_session(8002)
input_shape = (1, 3, 224, 224)
model = models.resnet18(pretrained=True).to(torch.device('cuda'))
modules_to_ignore = [model.conv1]
greedy_params = aimet_common.defs.GreedySelectionParameters(target_comp_ratio=Decimal(0.65),
num_comp_ratio_candidates=10,
saved_eval_scores_dict=
'../data/resnet18_eval_scores.pkl')
auto_params = aimet_torch.defs.SpatialSvdParameters.AutoModeParams(greedy_params,
modules_to_ignore=modules_to_ignore)
params = aimet_torch.defs.SpatialSvdParameters(aimet_torch.defs.SpatialSvdParameters.Mode.auto, auto_params,
multiplicity=8)
# If no visualization URL is provided, during model compression execution no visualizations will be published.
ModelCompressor.compress_model(model=model, eval_callback=eval_func, eval_iterations=5,
input_shape=input_shape,
compress_scheme=aimet_common.defs.CompressionScheme.spatial_svd,
cost_metric=aimet_common.defs.CostMetric.mac, parameters=params,
visualization_url=None)
comp_ratios_file_path = './data/greedy_selection_comp_ratios_list.pkl'
eval_scores_path = '../data/resnet18_eval_scores.pkl'
# A user can visualize the eval scores dictionary and optimal compression ratios by executing the following code.
compression_visualizations = VisualizeCompression(visualization_url)
compression_visualizations.display_eval_scores(eval_scores_path)
compression_visualizations.display_comp_ratio_plot(comp_ratios_file_path) |
# encoding: utf-8
'''
The system_info table and SystemInfo mapped class store runtime-editable
configuration options.
For more details, check :doc:`maintaining/configuration`.
'''
from sqlalchemy import types, Column, Table
from six import text_type
from ckan.model import meta
from ckan.model import core
from ckan.model import domain_object
__all__ = ['system_info_table', 'SystemInfo',
'get_system_info', 'set_system_info']
system_info_table = Table(
'system_info', meta.metadata,
Column('id', types.Integer(), primary_key=True, nullable=False),
Column('key', types.Unicode(100), unique=True, nullable=False),
Column('value', types.UnicodeText),
Column('state', types.UnicodeText, default=core.State.ACTIVE),
)
class SystemInfo(core.StatefulObjectMixin,
domain_object.DomainObject):
def __init__(self, key, value):
super(SystemInfo, self).__init__()
self.key = key
self.value = text_type(value)
meta.mapper(SystemInfo, system_info_table)
def get_system_info(key, default=None):
''' get data from system_info table '''
from sqlalchemy.exc import ProgrammingError
try:
obj = meta.Session.query(SystemInfo).filter_by(key=key).first()
if obj:
return obj.value
except ProgrammingError:
meta.Session.rollback()
return default
def delete_system_info(key, default=None):
''' delete data from system_info table '''
obj = meta.Session.query(SystemInfo).filter_by(key=key).first()
if obj:
meta.Session.delete(obj)
meta.Session.commit()
def set_system_info(key, value):
''' save data in the system_info table '''
obj = None
obj = meta.Session.query(SystemInfo).filter_by(key=key).first()
if obj and obj.value == text_type(value):
return
if not obj:
obj = SystemInfo(key, value)
else:
obj.value = text_type(value)
meta.Session.add(obj)
meta.Session.commit()
return True
|
import pygame as pg
import math
# Screen Surface & Constants
pg.init()
WIDTH = 1024
HEIGHT = 850
WHITE = (255,255,255)
BLACK = (0,0,0)
PINK = (200,0,100)
RED = (240,0,0)
ORANGE = (255, 153, 0)
BLUE = (0,0,255)
GREEN = (0,255,0)
LGREEN = (30,130,100)
screen = pg.display.set_mode((WIDTH,HEIGHT))
# Sun
class Star:
def __init__(self, coords, size):
self.coords = coords #(x,y) tuple of self coordinates
self.size = size #size (radius) of this planet
def draw(self):
pg.draw.circle(screen, ORANGE, self.coords, self.size)
# Planet
class Planet:
instances = []
def __init__(self, Ocoords, Oradius, velocity, size, col, distLine):
self.__class__.instances.append(self)
self.Ocoords = Ocoords #(x,y) tuple of the coordinates this planet orbits
self.Oradius = Oradius #radius around which the planet orbits
self.velocity = velocity #speed at which it orbits
self.size = size #size (radius) of this planet
self.col = col #color of planet
self.distLine = distLine #bool, show distance lines to other planets
self.x, self.y = (Ocoords[0]-Oradius, Ocoords[1]-Oradius)
self.coords = (self.x, self.y)
self.angle = 0
def motion(self):
self.angle += 0.001*self.velocity
self.x = int(math.cos(self.angle) * self.Oradius) + self.Ocoords[0]
self.y = int(math.sin(self.angle) * self.Oradius) + self.Ocoords[1]
self.coords = (self.x, self.y)
def draw(self):
pg.draw.circle(screen, self.col, self.coords, self.size, 0)
def draw_orbit(self):
pg.draw.circle(screen, WHITE, self.Ocoords, self.Oradius, 1)
if self.distLine:
otherPlanets = Planet.instances
if self in otherPlanets:
otherPlanets.remove(self)
distlist = [math.sqrt(math.pow(instance.x - self.x, 2) + math.pow(instance.y - self.y, 2)) for instance in otherPlanets]
ex, ey = Planet.instances[distlist.index(min(distlist))].coords
pg.draw.line(screen, WHITE, (self.x, self.y), (ex, ey), 1)
def rend(self):
self.motion()
self.draw_orbit()
self.draw()
SUN = Star((round(WIDTH/2), round(HEIGHT/2)), 20)
EARTH = Planet(SUN.coords, 100, 3, 12, BLUE, True)
MARS = Planet(SUN.coords, 200, 4, 10, RED, False)
SATT = Planet(SUN.coords, 300, 5, 8, PINK, False)
SATT2 = Planet(SUN.coords, 400, 6, 10, LGREEN, False)
# Game Loop
running = True
while running:
screen.fill(BLACK)
SUN.draw()
EARTH.rend()
MARS.rend()
SATT.rend()
SATT2.rend()
pg.display.update()
# Quit Game:
for event in pg.event.get():
if event.type == pg.QUIT:
running = False
if event.type == pg.KEYDOWN:
if event.key == pg.K_ESCAPE:
running = False
|
from psycopg2 import pool
class Database:
_connection_pool = None
@staticmethod
def initialise(**kwargs):
Database._connection_pool = pool.SimpleConnectionPool(1, 10, **kwargs)
@staticmethod
def get_connection():
return Database._connection_pool.getconn()
@staticmethod
def return_connection(connection: psycopg2.Connection):
Database._connection_pool.putconn(connection)
@staticmethod
def close_all_connections():
Database._connection_pool.closeall()
class CursorFromConnectionPool:
def __init__(self):
self.conn = None
self.cursor = None
def __enter__(self):
self.conn = Database.get_connection()
self.cursor = self.conn.cursor()
return self.cursor
def __exit__(self, exception_type, exception_value, exception_traceback):
if exception_value:
self.conn.rollback()
else:
self.cursor.close()
self.conn.commit()
Database.return_connection(self.conn)
|
description_short = "Interact with google contacts"
keywords = [
"google",
"contacts",
"python",
]
|
from __future__ import annotations
import json
from dbus_next import Variant
from dbus_next.aio import MessageBus
from dbus_next.introspection import Node
from .service import StatusService
notifications_xml = '''
<node>
<interface name="org.freedesktop.Notifications">
<method name="GetCapabilities">
<arg type="as" name="capabilities" direction="out">
</arg>
</method>
<method name="Notify">
<arg type="s" name="app_name" direction="in">
</arg>
<arg type="u" name="replaces_id" direction="in">
</arg>
<arg type="s" name="app_icon" direction="in">
</arg>
<arg type="s" name="summary" direction="in">
</arg>
<arg type="s" name="body" direction="in">
</arg>
<arg type="as" name="actions" direction="in">
</arg>
<arg type="a{sv}" name="hints" direction="in">
</arg>
<arg type="i" name="expire_timeout" direction="in">
</arg>
<arg type="u" name="id" direction="out">
</arg>
</method>
<method name="CloseNotification">
<arg type="u" name="id" direction="in">
</arg>
</method>
<method name="GetServerInformation">
<arg type="s" name="name" direction="out">
</arg>
<arg type="s" name="vendor" direction="out">
</arg>
<arg type="s" name="version" direction="out">
</arg>
<arg type="s" name="spec_version" direction="out">
</arg>
</method>
<signal name="NotificationClosed">
<arg type="u" name="id">
</arg>
<arg type="u" name="reason">
</arg>
</signal>
<signal name="ActionInvoked">
<arg type="u" name="id">
</arg>
<arg type="s" name="action_key">
</arg>
</signal>
</interface>
</node>
'''
class Block:
def __init__(self, name):
self.name = name
self.notifications = None
async def connect(self) -> Block:
bus = await MessageBus().connect()
obj = bus.get_proxy_object('org.freedesktop.Notifications',
'/org/freedesktop/Notifications', notifications_xml)
self.notifications = obj.get_interface('org.freedesktop.Notifications')
obj = bus.get_proxy_object('com.dubstepdish.i3dstatus', '/com/dubstepdish/i3dstatus',
Node(interfaces=[StatusService().introspect()]))
self.i3dstatus = obj.get_interface('com.dubstepdish.i3dstatus')
config_json = await self.i3dstatus.call_get_config(self.name)
self.config = json.loads(config_json)
return self
@staticmethod
def expand_template(text, context):
if not context:
return text
for key in sorted(context.keys(), key=lambda k: len(k), reverse=True):
text = text.replace('%' + key, str(context[key]))
return text
async def clear(self, instance=None):
block = {
'name': Variant('s', self.name),
'full_text': Variant('s', ''),
}
if instance:
block['instance'] = instance
await self.i3dstatus.call_show_block(block)
async def show(self, full_text, instance=None, markup=None, context=None):
block = {
'name': Variant('s', self.name),
'full_text': Variant('s', Block.expand_template(full_text, context)),
}
if markup is True:
markup = "pango"
if markup:
block['markup'] = Variant('s', markup)
if instance:
block['instance'] = Variant('s', instance)
await self.i3dstatus.call_show_block(block)
async def notify(self, message):
if self.notifications:
# https://developer.gnome.org/notification-spec/
message = 'i3-dstatus [{generator}]: {msg}'.format(generator=self.name, msg=message)
await self.notifications.call_notify('i3dstatus', 0, '', '', message, [], {}, -1)
async def error(self, message):
# TODO error log
await self.notify(message)
|
import dataclasses
import itertools
import math
from typing import Dict, Callable, List, Optional
from flwr.server.strategy import Strategy
from tensorflow_addons.utils.types import Optimizer
from sources.experiments.experiment_metadata import ExperimentMetadata
from sources.experiments.experiment_metadata_provider_utils import ExperimentMetadataProvider
@dataclasses.dataclass
class ParameterGridResponse:
strategy_provider_list: List[Callable[[ExperimentMetadata], Strategy]]
experiment_metadata_list: List[ExperimentMetadata]
optimizer_list: List[Optimizer]
def default_suffix_provider(parameter_value_map: Dict[str, float],
log10_representation=True):
if log10_representation:
parameter_value_map = {key: math.log10(val) for key, val in parameter_value_map.items()}
return "_".join([f"{str(key)}{val:.2f}" for key, val in parameter_value_map.items()])
class ParameterGridMetadataGenerator:
def __init__(self,
parameter_value_map: Dict[str, List[float]],
strategy_provider_function: Callable[[Dict[str, float]],
Callable[[ExperimentMetadata], Strategy]],
optimizer_provider_function: Callable[[Dict[str, float]], Optimizer],
experiment_metadata_provider: ExperimentMetadataProvider,
custom_suffix_provider: Optional[Callable[[Dict[str, List[float]]],
str]] = None
):
self.parameter_value_map = parameter_value_map
self.strategy_provider_function = strategy_provider_function
self.optimizer_provider_function = optimizer_provider_function
self.experiment_metadata_provider = experiment_metadata_provider
self.custom_suffix_provider = custom_suffix_provider
def generate_grid_responses(self) -> ParameterGridResponse:
order = self.parameter_value_map.keys()
pools = [self.parameter_value_map[key] for key in order]
products = itertools.product(*pools)
response = ParameterGridResponse([], [], [])
for product in products:
current_parameter_dict = {key: val for key, val in zip(order, product)}
strategy = self.strategy_provider_function(current_parameter_dict)
experiment_metadata = self.experiment_metadata_provider(**current_parameter_dict)
if self.custom_suffix_provider is not None:
experiment_metadata.custom_suffix = self.custom_suffix_provider(
current_parameter_dict
)
optimizer = self.optimizer_provider_function(current_parameter_dict)
response.strategy_provider_list.append(strategy)
response.experiment_metadata_list.append(experiment_metadata)
response.optimizer_list.append(optimizer)
return response
|
from base64 import b64decode
from malwareconfig import crypto
from malwareconfig.common import Decoder
from malwareconfig.common import string_printable
class Arcom(Decoder):
decoder_name = "Arcom"
decoder__version = 1
decoder_author = "@kevthehermit"
decoder_description = "Arcom RAT Decoder"
def __init__(self):
self.config = {}
def get_config(self):
'''
This is the main entry
:return:
'''
key = "CVu3388fnek3W(3ij3fkp0930di"
file_data = self.file_info.file_data
coded_config = file_data.split(b"\x18\x12\x00\x00")[1][:-8]
decoded_config = b64decode(coded_config)
clear_config = crypto.decrypt_blowfish(key, decoded_config).decode('utf-8')
config_dict = {}
parts = clear_config.split('|')
if len(parts) > 3:
config_dict["Domain"] = parts[0]
config_dict["Port"] = parts[1]
config_dict["Install Path"] = parts[2]
config_dict["Install Name"] = parts[3]
config_dict["Startup Key"] = parts[4]
config_dict["Campaign ID"] = parts[5]
config_dict["Mutex Main"] = parts[6]
config_dict["Mutex Per"] = parts[7]
config_dict["YPER"] = parts[8]
config_dict["YGRB"] = parts[9]
config_dict["Mutex Grabber"] = parts[10]
config_dict["Screen Rec Link"] = parts[11]
config_dict["Mutex 4"] = parts[12]
config_dict["YVID"] = parts[13]
config_dict["YIM"] = parts[14]
config_dict["NO"] = parts[15]
config_dict["Smart Broadcast"] = parts[16]
config_dict["YES"] = parts[17]
config_dict["Plugins"] = parts[18]
config_dict["Flag1"] = parts[19]
config_dict["Flag2"] = parts[20]
config_dict["Flag3"] = parts[21]
config_dict["Flag4"] = parts[22]
config_dict["WebPanel"] = parts[23]
config_dict["Remote Delay"] = parts[24]
# Set the config to the class for use
self.config = config_dict
|
import numpy as np
class PenalizationGrid:
def __init__(self, minCoef=1e-10, maxCoef=1, length=200):
self.values = np.linspace(maxCoef, minCoef, length).tolist()
def isEmpty(self) -> bool:
return len(self.values) == 0
def getNextKCoeffs(self, k):
penalizationCoeffsForBatch = self.values[:k]
del self.values[:k]
return penalizationCoeffsForBatch
def size(self):
return len(self.values)
|
from __future__ import print_function
from models import LipRead
import torch
import toml
from training import Trainer
from validation import Validator
print("Loading options...")
with open('options.toml', 'r') as optionsFile:
options = toml.loads(optionsFile.read())
if(options["general"]["usecudnnbenchmark"] and options["general"]["usecudnn"]):
print("Running cudnn benchmark...")
torch.backends.cudnn.benchmark = True
#Create the model.
model = LipRead(options)
if(options["general"]["loadpretrainedmodel"]):
model.load_state_dict(torch.load(options["general"]["pretrainedmodelpath"]))
#Move the model to the GPU.
if(options["general"]["usecudnn"]):
model = model.cuda(options["general"]["gpuid"])
trainer = Trainer(options)
validator = Validator(options)
for epoch in range(options["training"]["startepoch"], options["training"]["epochs"]):
if(options["training"]["train"]):
trainer.epoch(model, epoch)
if(options["validation"]["validate"]):
validator.epoch(model)
|
import os
import io
import re
import datetime
import itertools
import markdown as markdown_module
import pygments.formatters
import yaml
import jinja2
import werkzeug
from flask import Flask, render_template, send_from_directory, abort, url_for
app = Flask(__name__)
app.jinja_env.undefined = jinja2.StrictUndefined
app.jinja_env.globals['today'] = datetime.date.today
# The atom.xml template uses url_for(..., _external=True)
app.config['FREEZER_BASE_URL'] = 'https://exyr.org/'
PYGMENTS_CSS = (pygments.formatters.HtmlFormatter(style='tango')
.get_style_defs('.codehilite'))
markdown_module.Markdown(extensions=['fenced_code'])
Fenced = markdown_module.extensions.fenced_code.FencedBlockPreprocessor
Fenced.FENCED_BLOCK_RE = re.compile(
Fenced.FENCED_BLOCK_RE.pattern.replace("and lang", "and lang\n(,\w+[ ]*)?"),
Fenced.FENCED_BLOCK_RE.flags,
)
@app.template_filter()
def markdown(text):
return markdown_module.markdown(
text,
['codehilite', 'footnotes', 'fenced_code'] + 2 * ['downheader'],
extension_configs={'codehilite': {'linenums': False}},
)
class Page(object):
root = os.path.join(app.root_path, u'pages')
suffix = '.markdown'
_cache = {}
@classmethod
def load(cls, year, name):
filename = os.path.join(cls.root, year, name) + cls.suffix
if not os.path.isfile(filename):
abort(404)
mtime = os.path.getmtime(filename)
page, old_mtime = cls._cache.get(filename, (None, None))
if not page or mtime != old_mtime:
with io.open(filename, encoding='utf8') as fd:
head = ''.join(itertools.takewhile(lambda x: x.strip(), fd))
body = fd.read()
page = cls(year, name, head, body)
cls._cache[filename] = (page, mtime)
return page
@classmethod
def years(cls):
for year in os.listdir(cls.root):
if year.isdigit():
yield year
@classmethod
def articles_by_year(cls, year):
directory = os.path.join(cls.root, year)
if not os.path.isdir(directory):
abort(404)
for name in os.listdir(directory):
if name.endswith(cls.suffix):
page = cls.load(year, name[:-len(cls.suffix)])
if app.config.get('EXYR_SHOW_DRAFTS') or not page.meta.get('draft'):
yield page
@classmethod
def all_articles(cls):
for year in cls.years():
for article in cls.articles_by_year(year):
yield article
def __init__(self, year, name, head, body):
self.year = year
self.name = name
self.head = head
self.body = body
@werkzeug.cached_property
def meta(self):
return yaml.safe_load(self.head) or {}
def __getitem__(self, name):
return self.meta[name]
@werkzeug.cached_property
def html(self):
return markdown(self.body)
def url(self, **kwargs):
return url_for(
'article', year=int(self.year), name=self.name, **kwargs)
def updated(self):
return self.meta.get('modified', self['published'])
@app.route('/.htaccess')
def htaccess():
return '''
RedirectMatch /tags(/.*)? /
RedirectMatch /(\d+)/?$ /#$1
RedirectMatch /2013/enumerated-types-python/slides.pdf /2013/algebraic-sum-types-python/slides.pdf
RedirectMatch /2011/Poor-man-NTP/ /2011/low-tech-ntp/
RedirectMatch /about/ /
Redirect gone /2011/hashing-passwords/
Redirect gone /2012/csswg-invited-expert/
Redirect gone /2011/git-mirrors/
Redirect gone /2011/virtualenv-HOWTO/
Redirect gone /2011/gedit-plugins-packaged/
Redirect gone /2012/weasyprint-is-bsd/
ErrorDocument 410 /gone.html
''', 200, {'Content-Type': 'application/octet-stream'}
@app.route('/gone.html')
def gone():
return '''
<title>410 Gone</title>
<h1>Gone</h1>
Some things are not meant to stay.
'''
@app.route('/')
def home():
return render_template(
'all_posts.html',
posts_by_year=itertools.groupby(
sorted(
Page.all_articles(),
reverse=True,
key=lambda p: p['published'],
),
key=lambda p: p['published'].year,
),
about=Page.load('', 'about')
)
@app.route('/<int:year>/<name>/')
def article(year, name):
return render_template('flatpage.html', page=Page.load(str(year), name))
@app.route('/<int:year>/<name>/<path:path>')
def static_in_pages(year, name, path):
return send_from_directory(Page.root, '%i/%s/%s' % (year, name, path))
@app.route('/feed.atom')
def feed():
articles = sorted(Page.all_articles(), key=lambda a: a.updated())
feed_updated = articles[0].updated()
xml = render_template('atom.xml', **locals())
return app.response_class(xml, mimetype='application/atom+xml')
def minify_css(css):
# Remove comments. *? is the non-greedy version of *
css = re.sub(r'/\*.*?\*/', '', css)
# Remove redundant whitespace
css = re.sub(r'\s+', ' ', css)
# Put back line breaks after block so that it's not just one huge line
css = re.sub(r'} ?', '}\n', css)
return css
@app.route('/style.css')
def stylesheet():
css = render_template('style.css', pygments_css=PYGMENTS_CSS)
css = minify_css(css)
# Add this after minification, would be removed otherwise.
css = (
'/*\nNon-minified version is at\n'
'https://github.com/SimonSapin/exyr.org'
'/blob/master/exyr/templates/style.css\n*/\n'
+ css
)
return app.response_class(css, mimetype='text/css')
@app.errorhandler(404)
def not_found(e):
return render_template('404.html')
|
import logging
import salt.exceptions
import salt_more
from datetime import datetime
log = logging.getLogger(__name__)
def help():
"""
Shows this help information.
"""
return __salt__["sys.doc"]("power")
def status():
"""
Get status and debug information regarding power management.
"""
ret = {
"spm": {},
"stn": {},
"rpi": {},
}
# SPM status
res = __salt__["spm.query"]("status")
ret["spm"].update({k: v for k, v in res.iteritems() if not k.startswith("_")})
# SPM sleep interval
res = __salt__["spm.query"]("sleep_interval")
ret["spm"]["sleep_interval"] = res["value"]
# SPM version
res = __salt__["spm.query"]("version")
ret["spm"]["version"] = res["value"]
# STN config
res = __salt__["stn.power_config"]()
ret["stn"]["trigger_config"] = {
k: v.split(",")[1] for k, v in res.iteritems() \
if (k.endswith("_wake") or k.endswith("_sleep")) and v.startswith("ON")
}
# STN trigger
res = __salt__["stn.power_trigger_status"]()
ret["stn"]["last_trigger"] = {
k: v for k, v in res.iteritems() if not k.startswith("_")
}
# STN battery
res = __salt__["obd.battery"]()
ret["stn"]["battery"] = {
k: v for k, v in res.iteritems() if not k.startswith("_")
}
# RPI uptime
res = __salt__["status.uptime"]()
ret["rpi"]["uptime"] = res
return ret
def sleep(interval=60, delay=10, modem_off=False, acc_off=False, confirm=False, reason="unknown", allow_auto_update=True):
"""
Power down system and put device into sleep state.
Optional arguments:
- interval (int): Sleep interval in seconds. Default is '60'.
- delay (str): Delay in seconds before powering down. Default is '10'.
- modem_off (bool): Power off 3V3 supply to modem on mPCIe slot. Default is 'False'.
- acc_off (bool): Put accelerometer into standby. Default is 'False'.
- confirm (bool): Acknowledge the execution of this command. Default is 'False'.
- reason (str): Reason code that tells why we decided to sleep. Default is 'unknown'.
"""
if __salt__["saltutil.is_running"]("power.sleep"):
raise salt.exceptions.CommandExecutionError("Sleep is already running")
if not confirm:
raise salt.exceptions.CommandExecutionError(
"This command will power down the system - add parameter 'confirm=true' to continue anyway")
ret = {}
log.info("Preparing to sleep {:} in {:} second(s)".format(
"{:} second(s)".format(interval) if interval > 0 else "infinite",
delay))
# First set SPM sleep interval
try:
__salt__["spm.query"]("sleep_interval", value=interval)
except:
log.exception("Failed to set sleep interval")
interval = 0 # Assume interval is unset
# Run shutdown SLS
try:
__salt__["minionutil.run_job"]("state.sls", "shutdown", pillar={"allow_auto_update": allow_auto_update}, _timeout=600)
except:
log.exception("Failed to run shutdown SLS")
# Kill heartbeat worker thread to enforce RPi power off if something goes south/hangs
try:
res = __salt__["spm.manage"]("worker", "kill", "_heartbeat")
if not "_heartbeat" in res.get("killed", []):
log.warn("No heartbeat worker thread found to kill")
except:
log.exception("Failed to kill heartbeat worker")
# TODO: Power off audio amp (if not done when shutting down RPi?)
# Power off 3V3 for modem/mPCIe if requested
#if modem_off:
# __salt__["spm.query"]("stop_3v3")
# Set accelerometer in standby mode
if acc_off:
try:
__salt__["acc.query"]("active", value=False)
except:
log.exception("Failed to put accelerometer into standby mode")
# Plan a system shutdown after 1 minute in case STN never sleeps
# (it could get interrupted by another STN wake trigger)
try:
__salt__["system.shutdown"](1)
except:
log.exception("Failed to plan system shutdown")
# Put STN to sleep (and thereby shutdown RPi when STN power pin goes low)
__salt__["stn.sleep"](delay)
if interval > 0:
log.warn("Intentionally going to sleep for {:} second(s) because of reason '{:}'".format(interval, reason))
else:
log.warn("Intentionally going to hibernate until next engine start because of reason '{:}'".format(reason))
# Fire a sleep or hibernate event
__salt__["event.fire"]({
"delay": delay,
"interval": interval,
"reason": reason,
"uptime": __salt__["status.uptime"]()["seconds"]
},
"system/power/{:}".format("sleep" if interval > 0 else "hibernate")
)
ret["comment"] = "Planned shutdown in {:d} second(s)".format(delay)
ret["result"] = True
return ret
def hibernate(delay=10, confirm=False, reason="unknown", allow_auto_update=True):
"""
Power down system and put device into hibernate state.
Optional arguments:
- delay (str): Delay in seconds before powering down. Default is '10'.
- confirm (bool): Acknowledge the execution of this command. Default is 'False'.
- reason (str): Reason code that tells why we decided to hibernate. Default is 'unknown'.
"""
return sleep(interval=0, delay=delay, acc_off=True, confirm=confirm, reason=reason, allow_auto_update=allow_auto_update)
def sleep_timer(enable=None, period=1800, **kwargs):
"""
Setup sleep timer to schedule power off upon inactivity.
NOTE: Do not access pillar data in this function as they will not be available when called from engines (separate processes).
Optional arguments:
- enable (bool): Enable or disable timer.
- period (int): Timer period in seconds before performing sleep. Default is '1800'.
- reason (str): Reason code that tells why we decided to sleep. Default is 'unknown'.
"""
# Helper function to get all sleep timers
def timers():
res = __salt__["schedule.list"](return_yaml=False)
return {k: v for k, v in res.iteritems() if k.startswith("_sleep_timer")}
if enable == True:
name = "_sleep_timer/{:}".format(kwargs.get("reason", "unknown"))
# Always try to delete existing timer
res = __salt__["schedule.delete"](name)
# Prepare keyword arguments
kwargs = salt_more.clean_kwargs(kwargs) # Clean up unwanted entries
kwargs["confirm"] = True # Ensure confirm is set
now = datetime.utcnow()
# Add fresh timer
res = __salt__["schedule.add"](name,
function="power.sleep",
job_kwargs=kwargs,
seconds=period,
maxrunning=1,
return_job=False, # Do not return info to master upon job completion
persist=False, # Do not persist schedule (actually this is useless because all schedules might be persisted when modified later on)
metadata={
"created": now.isoformat(),
"transient": True # Enforce schedule is never persisted on disk and thereby not surviving minion restarts (see patch 'salt/utils/schedule.py.patch')
})
elif enable == False:
# Delete all existing timers
for name in timers():
res = __salt__["schedule.delete"](name)
# Return all existing timer(s)
return timers()
def reboot(reason="unknown"):
"""
Reboot system immediately.
Optional arguments:
- reason (str): Reason code that tells why we decided to reboot. Default is 'unknown'.
"""
return request_reboot(immediately=True, reason=reason)
def request_reboot(pending=True, immediately=False, reason="unknown"):
"""
Request for a future system reboot.
Optional arguments:
- pending (bool): Default is 'True'.
- immediately (bool): Default is 'False'.
- reason (str): Reason code that tells why we decided to reboot. Default is 'unknown'.
"""
if pending or __context__.get("power.request_reboot", False):
if immediately:
log.warn("Performing system reboot immediately because of reason '{:}'".format(reason))
# Fire a reboot event
__salt__["event.fire"]({
"reason": reason,
"uptime": __salt__["status.uptime"]()["seconds"]
},
"system/power/reboot"
)
# TODO: Delay reboot 10 secs to allow cloud upload of above event
# Ensure a heatbeat has just been sent to prevent heartbeat timeout during reboot
__salt__["spm.query"]("noop")
# Perform reboot
return __salt__["system.reboot"]()
else:
log.info("Request for system reboot is pending because of reason '{:}'".format(reason))
else:
log.debug("No pending system reboot request")
# Set pending in context
__context__["power.request_reboot"] = pending
return {
"pending": pending,
}
def restart_modem():
"""
Restart modem the hard way by stopping and starting its power supply.
"""
# TODO: We also need to close all open serial conenctions to modem to prevent system freeze
return __salt__["spm.query"]("restart_3v3")
|
from dataclasses import dataclass
from omegaconf.omegaconf import MISSING
import torch
from torch.functional import Tensor
import torch.nn as nn
import torch.nn.functional as F
from rnnms.networks.vocoder import ConfRNNMSVocoder, RNNMSVocoder
@dataclass
class ConfVocoder:
"""
Args:
size_i_codebook: Size of input discrete codebook
dim_i_embedding: Dimension of embedded input
n_speakers: Number of speakers
dim_speaker_embedding: Dimension of speaker embedding
"""
size_i_codebook: int = MISSING
dim_i_embedding: int = MISSING
n_speakers: int = MISSING
dim_speaker_embedding: int = MISSING
rnnms: ConfRNNMSVocoder = ConfRNNMSVocoder()
class Vocoder(nn.Module):
"""Independently-trained vocoder conditioned on discrete VQ-CPC output.
Network is bidirectional_PreNet + WaveRNN (=RNN_MS).
"""
def __init__(self, conf: ConfVocoder):
"""
"""
super(Vocoder, self).__init__()
# (discrete) latent_code/speaker_id => (continuous) embedding space
self.code_embedding = nn.Embedding(conf.size_i_codebook, conf.dim_i_embedding)
self.speaker_embedding = nn.Embedding(conf.n_speakers, conf.dim_speaker_embedding)
self.rnnms = RNNMSVocoder(conf.rnnms)
def forward(self, x: Tensor, z: Tensor, speaker: Tensor):
"""Forward a content representation sequence at once with teacher observation sequence for AR.
Latent code and speaker ID are embedded, upsampled x2, then go to RNNMS.
Args:
x: μ-law encoded observation sequence for AR teacher signal
z: Index series of discrete content representation for conditioning
speaker: Speaker ID (discrete value)
Returns:
Energy distribution of `bits` bit μ-law value
"""
# Content embedding and upsampling
z_embed = self.code_embedding(z)
# (Batch, Time, Embed_z) => (Batch, Embed_z, 2*Time) => (Batch, 2*Time, Embed_z)
z_embed_up: Tensor = F.interpolate(z_embed.transpose(1, 2), scale_factor=2).transpose(1, 2)
# Speaker embedding and upsampling
spk_embed: Tensor = self.speaker_embedding(speaker)
# Time-directional copy (keep Batch/dim0 & Embed/dim2 by `-1` flag)
# (Batch, Embed_spk) => (Batch, 1, Embed_spk) => (Batch, 2*Time, Embed_spk)
spk_embed_up = spk_embed.unsqueeze(1).expand(-1, z_embed_up.size(1), -1)
latent_series = torch.cat((z_embed_up, spk_embed_up), dim=-1)
return self.rnnms(x, latent_series)
def generate(self, z: Tensor, speaker: Tensor):
"""Generate utterances from a batch of (latent_code, speaker_index)
"""
z_embed = self.code_embedding(z)
z_embed_up: Tensor = F.interpolate(z_embed.transpose(1, 2), scale_factor=2).transpose(1, 2)
spk_embed = self.speaker_embedding(speaker)
spk_embed_up = spk_embed.unsqueeze(1).expand(-1, z_embed_up.size(1), -1)
z_spk_series = torch.cat((z_embed_up, spk_embed_up), dim=-1)
return self.rnnms.generate(z_spk_series)
|
from os import environ
class Config:
"""Set Flask configuration vars from .env file."""
# General Config
SECRET_KEY = environ.get('SECRET_KEY')
FLASK_APP = environ.get('FLASK_APP')
FLASK_ENV = environ.get('FLASK_ENV')
DEBUG = True
# Specific Config
MODEL_FILE = 'model.plk' |
import numpy as np
from artemis.experiments.decorators import experiment_function
from matplotlib import pyplot as plt
from six.moves import xrange
__author__ = 'peter'
"""
This file demonstates Artemis's "Experiments"
When you run an experiment, all figures and console output, as well as some metadata such as total run time, arguments,
etc are saved to disk.
This demo illustrates how you can create an experiment, create variations on that experiment, and view the results.
"""
class OnlineLinearRegressor:
def __init__(self, n_in, n_out, learning_rate = 0.01):
self.w = np.zeros((n_in, n_out))
self.learning_rate = learning_rate
def train(self, x, targ): # x: (n_samples, n_in), targ: (n_samples, n_out)
y = self.predict(x)
self.w -= self.learning_rate * (x.T.dot(y-targ))
def predict(self, x): # x: (n_samples, n_in)
return x.dot(self.w)
@experiment_function
def demo_linear_regression(
n_in = 100,
n_out = 4,
n_training_samples = 500,
n_test_samples = 500,
noise = .1,
n_epochs = 10,
eta = 0.001,
random_seed = 1234,
score_report_period = 100,
):
"""
Generate a random linear regression problem and train an online predictor to solve it with Stochastic gradient descent.
Log the scores and plot the resulting learning curves.
:param n_in: Number of inputs
:param n_out: Number of outputs
:param n_training_samples: Number of training samples in generated dataset.
:param n_test_samples: Number of test samples in generated dataset.
:param noise: Noise to add to generated dataset
:param n_epochs: Number of epochs to run for
:param eta: Learning rate for SGD
:param random_seed: Random seed (for generating data)
:param score_report_period: Report score every X training iterations.
"""
# Setup data
rng = np.random.RandomState(random_seed)
w_true = rng.randn(n_in, n_out)*.1 # (n_in, n_out)
training_data = rng.randn(n_training_samples, n_in) # (n_training_samples, n_in)
training_target = training_data.dot(w_true) + noise*rng.randn(n_training_samples, n_out) # (n_training_samples, n_out)
test_data = rng.randn(n_test_samples, n_in) # (n_test_samples, n_in)
test_target = test_data.dot(w_true) + noise*rng.randn(n_test_samples, n_out) # (n_test_samples, n_out)
predictor = OnlineLinearRegressor(n_in=n_in, n_out=n_out, learning_rate=eta)
# Train and periodically record scores.
epoch_scores = []
for i in xrange(n_training_samples*n_epochs+1):
if i % score_report_period == 0:
training_out = predictor.predict(training_data)
training_cost = ((training_target-training_out)**2).sum(axis=1).mean(axis=0)
test_out = predictor.predict(test_data)
test_cost = ((test_target-test_out)**2).sum(axis=1).mean(axis=0)
print('Epoch {epoch}: Test Cost: {test}, Training Cost: {train}'.format(epoch=float(i)/n_training_samples, test=test_cost, train=training_cost))
epoch = float(i) / n_training_samples
epoch_scores.append((epoch, training_cost, test_cost))
predictor.train(training_data[[i % n_training_samples]], training_target[[i % n_training_samples]])
# Plot
epochs, training_costs, test_costs = zip(*epoch_scores)
plt.plot(epochs, np.array([training_costs, test_costs]).T)
plt.xlabel('epoch')
plt.ylabel('cost')
plt.legend(['Training Cost', 'Test Cost'])
plt.title("Learning Curve")
plt.ion()
plt.show()
return {'training_cost': training_cost, 'test_cost': test_cost}
demo_linear_regression.add_variant('fast-learn', eta=0.01)
demo_linear_regression.add_variant('large_input_space', n_in=1000)
if __name__ == "__main__":
# Open a menu that allows you to run experiments and view old ones.
demo_linear_regression.browse(display_format="flat")
|
#!/usr/bin/env python
import matplotlib.pyplot as plt
import numpy
de = numpy.genfromtxt("total internal energy_EDRAHT.txt")
dm = numpy.genfromtxt("forces fx,fy,fz_NROT.txt")
plt.plot(de[:,0],de[:,1],'b',dm[:,0],dm[:,3],'r')
plt.grid(True)
plt.xlim([0,1])
plt.xlabel("t")
plt.ylabel("y")
plt.legend(["Energy","Moment"],loc=0)
plt.savefig("Biegung-history")
|
# coding: utf-8
"""
pollination-server
Pollination Server OpenAPI Definition # noqa: E501
The version of the OpenAPI document: 0.16.0
Contact: [email protected]
Generated by: https://openapi-generator.tech
"""
from __future__ import absolute_import
import re # noqa: F401
# python 2 and python 3 compatibility library
import six
from pollination_sdk.api_client import ApiClient
from pollination_sdk.exceptions import ( # noqa: F401
ApiTypeError,
ApiValueError
)
class ProjectsApi(object):
"""NOTE: This class is auto generated by OpenAPI Generator
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
def __init__(self, api_client=None):
if api_client is None:
api_client = ApiClient()
self.api_client = api_client
def create_project(self, owner, project_create, **kwargs): # noqa: E501
"""Create a Project # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.create_project(owner, project_create, async_req=True)
>>> result = thread.get()
:param owner: (required)
:type owner: str
:param project_create: (required)
:type project_create: ProjectCreate
:param async_req: Whether to execute the request asynchronously.
:type async_req: bool, optional
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:type _preload_content: bool, optional
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: Returns the result object.
If the method is called asynchronously,
returns the request thread.
:rtype: CreatedContent
"""
kwargs['_return_http_data_only'] = True
return self.create_project_with_http_info(owner, project_create, **kwargs) # noqa: E501
def create_project_with_http_info(self, owner, project_create, **kwargs): # noqa: E501
"""Create a Project # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.create_project_with_http_info(owner, project_create, async_req=True)
>>> result = thread.get()
:param owner: (required)
:type owner: str
:param project_create: (required)
:type project_create: ProjectCreate
:param async_req: Whether to execute the request asynchronously.
:type async_req: bool, optional
:param _return_http_data_only: response data without head status code
and headers
:type _return_http_data_only: bool, optional
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:type _preload_content: bool, optional
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:param _request_auth: set to override the auth_settings for an a single
request; this effectively ignores the authentication
in the spec for a single request.
:type _request_auth: dict, optional
:return: Returns the result object.
If the method is called asynchronously,
returns the request thread.
:rtype: tuple(CreatedContent, status_code(int), headers(HTTPHeaderDict))
"""
local_var_params = locals()
all_params = [
'owner',
'project_create'
]
all_params.extend(
[
'async_req',
'_return_http_data_only',
'_preload_content',
'_request_timeout',
'_request_auth'
]
)
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method create_project" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
# verify the required parameter 'owner' is set
if self.api_client.client_side_validation and ('owner' not in local_var_params or # noqa: E501
local_var_params['owner'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `owner` when calling `create_project`") # noqa: E501
# verify the required parameter 'project_create' is set
if self.api_client.client_side_validation and ('project_create' not in local_var_params or # noqa: E501
local_var_params['project_create'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `project_create` when calling `create_project`") # noqa: E501
collection_formats = {}
path_params = {}
if 'owner' in local_var_params:
path_params['owner'] = local_var_params['owner'] # noqa: E501
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'project_create' in local_var_params:
body_params = local_var_params['project_create']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['APIKeyAuth', 'JWTAuth'] # noqa: E501
return self.api_client.call_api(
'/projects/{owner}', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='CreatedContent', # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats,
_request_auth=local_var_params.get('_request_auth'))
def create_project_recipe_filter(self, owner, name, project_recipe_filter, **kwargs): # noqa: E501
"""Upsert a recipe filter to a project # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.create_project_recipe_filter(owner, name, project_recipe_filter, async_req=True)
>>> result = thread.get()
:param owner: (required)
:type owner: str
:param name: (required)
:type name: str
:param project_recipe_filter: (required)
:type project_recipe_filter: ProjectRecipeFilter
:param async_req: Whether to execute the request asynchronously.
:type async_req: bool, optional
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:type _preload_content: bool, optional
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: Returns the result object.
If the method is called asynchronously,
returns the request thread.
:rtype: ProjectRecipeFilter
"""
kwargs['_return_http_data_only'] = True
return self.create_project_recipe_filter_with_http_info(owner, name, project_recipe_filter, **kwargs) # noqa: E501
def create_project_recipe_filter_with_http_info(self, owner, name, project_recipe_filter, **kwargs): # noqa: E501
"""Upsert a recipe filter to a project # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.create_project_recipe_filter_with_http_info(owner, name, project_recipe_filter, async_req=True)
>>> result = thread.get()
:param owner: (required)
:type owner: str
:param name: (required)
:type name: str
:param project_recipe_filter: (required)
:type project_recipe_filter: ProjectRecipeFilter
:param async_req: Whether to execute the request asynchronously.
:type async_req: bool, optional
:param _return_http_data_only: response data without head status code
and headers
:type _return_http_data_only: bool, optional
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:type _preload_content: bool, optional
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:param _request_auth: set to override the auth_settings for an a single
request; this effectively ignores the authentication
in the spec for a single request.
:type _request_auth: dict, optional
:return: Returns the result object.
If the method is called asynchronously,
returns the request thread.
:rtype: tuple(ProjectRecipeFilter, status_code(int), headers(HTTPHeaderDict))
"""
local_var_params = locals()
all_params = [
'owner',
'name',
'project_recipe_filter'
]
all_params.extend(
[
'async_req',
'_return_http_data_only',
'_preload_content',
'_request_timeout',
'_request_auth'
]
)
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method create_project_recipe_filter" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
# verify the required parameter 'owner' is set
if self.api_client.client_side_validation and ('owner' not in local_var_params or # noqa: E501
local_var_params['owner'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `owner` when calling `create_project_recipe_filter`") # noqa: E501
# verify the required parameter 'name' is set
if self.api_client.client_side_validation and ('name' not in local_var_params or # noqa: E501
local_var_params['name'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `name` when calling `create_project_recipe_filter`") # noqa: E501
# verify the required parameter 'project_recipe_filter' is set
if self.api_client.client_side_validation and ('project_recipe_filter' not in local_var_params or # noqa: E501
local_var_params['project_recipe_filter'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `project_recipe_filter` when calling `create_project_recipe_filter`") # noqa: E501
collection_formats = {}
path_params = {}
if 'owner' in local_var_params:
path_params['owner'] = local_var_params['owner'] # noqa: E501
if 'name' in local_var_params:
path_params['name'] = local_var_params['name'] # noqa: E501
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'project_recipe_filter' in local_var_params:
body_params = local_var_params['project_recipe_filter']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['APIKeyAuth', 'JWTAuth'] # noqa: E501
return self.api_client.call_api(
'/projects/{owner}/{name}/recipes/filters', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='ProjectRecipeFilter', # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats,
_request_auth=local_var_params.get('_request_auth'))
def delete_project(self, owner, name, **kwargs): # noqa: E501
"""Delete a Project # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.delete_project(owner, name, async_req=True)
>>> result = thread.get()
:param owner: (required)
:type owner: str
:param name: (required)
:type name: str
:param async_req: Whether to execute the request asynchronously.
:type async_req: bool, optional
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:type _preload_content: bool, optional
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: Returns the result object.
If the method is called asynchronously,
returns the request thread.
:rtype: None
"""
kwargs['_return_http_data_only'] = True
return self.delete_project_with_http_info(owner, name, **kwargs) # noqa: E501
def delete_project_with_http_info(self, owner, name, **kwargs): # noqa: E501
"""Delete a Project # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.delete_project_with_http_info(owner, name, async_req=True)
>>> result = thread.get()
:param owner: (required)
:type owner: str
:param name: (required)
:type name: str
:param async_req: Whether to execute the request asynchronously.
:type async_req: bool, optional
:param _return_http_data_only: response data without head status code
and headers
:type _return_http_data_only: bool, optional
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:type _preload_content: bool, optional
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:param _request_auth: set to override the auth_settings for an a single
request; this effectively ignores the authentication
in the spec for a single request.
:type _request_auth: dict, optional
:return: Returns the result object.
If the method is called asynchronously,
returns the request thread.
:rtype: None
"""
local_var_params = locals()
all_params = [
'owner',
'name'
]
all_params.extend(
[
'async_req',
'_return_http_data_only',
'_preload_content',
'_request_timeout',
'_request_auth'
]
)
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method delete_project" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
# verify the required parameter 'owner' is set
if self.api_client.client_side_validation and ('owner' not in local_var_params or # noqa: E501
local_var_params['owner'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `owner` when calling `delete_project`") # noqa: E501
# verify the required parameter 'name' is set
if self.api_client.client_side_validation and ('name' not in local_var_params or # noqa: E501
local_var_params['name'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `name` when calling `delete_project`") # noqa: E501
collection_formats = {}
path_params = {}
if 'owner' in local_var_params:
path_params['owner'] = local_var_params['owner'] # noqa: E501
if 'name' in local_var_params:
path_params['name'] = local_var_params['name'] # noqa: E501
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['APIKeyAuth', 'JWTAuth'] # noqa: E501
return self.api_client.call_api(
'/projects/{owner}/{name}', 'DELETE',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type=None, # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats,
_request_auth=local_var_params.get('_request_auth'))
def delete_project_org_permission(self, owner, name, project_policy_subject, **kwargs): # noqa: E501
"""Remove a Project permissions # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.delete_project_org_permission(owner, name, project_policy_subject, async_req=True)
>>> result = thread.get()
:param owner: (required)
:type owner: str
:param name: (required)
:type name: str
:param project_policy_subject: (required)
:type project_policy_subject: ProjectPolicySubject
:param async_req: Whether to execute the request asynchronously.
:type async_req: bool, optional
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:type _preload_content: bool, optional
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: Returns the result object.
If the method is called asynchronously,
returns the request thread.
:rtype: None
"""
kwargs['_return_http_data_only'] = True
return self.delete_project_org_permission_with_http_info(owner, name, project_policy_subject, **kwargs) # noqa: E501
def delete_project_org_permission_with_http_info(self, owner, name, project_policy_subject, **kwargs): # noqa: E501
"""Remove a Project permissions # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.delete_project_org_permission_with_http_info(owner, name, project_policy_subject, async_req=True)
>>> result = thread.get()
:param owner: (required)
:type owner: str
:param name: (required)
:type name: str
:param project_policy_subject: (required)
:type project_policy_subject: ProjectPolicySubject
:param async_req: Whether to execute the request asynchronously.
:type async_req: bool, optional
:param _return_http_data_only: response data without head status code
and headers
:type _return_http_data_only: bool, optional
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:type _preload_content: bool, optional
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:param _request_auth: set to override the auth_settings for an a single
request; this effectively ignores the authentication
in the spec for a single request.
:type _request_auth: dict, optional
:return: Returns the result object.
If the method is called asynchronously,
returns the request thread.
:rtype: None
"""
local_var_params = locals()
all_params = [
'owner',
'name',
'project_policy_subject'
]
all_params.extend(
[
'async_req',
'_return_http_data_only',
'_preload_content',
'_request_timeout',
'_request_auth'
]
)
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method delete_project_org_permission" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
# verify the required parameter 'owner' is set
if self.api_client.client_side_validation and ('owner' not in local_var_params or # noqa: E501
local_var_params['owner'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `owner` when calling `delete_project_org_permission`") # noqa: E501
# verify the required parameter 'name' is set
if self.api_client.client_side_validation and ('name' not in local_var_params or # noqa: E501
local_var_params['name'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `name` when calling `delete_project_org_permission`") # noqa: E501
# verify the required parameter 'project_policy_subject' is set
if self.api_client.client_side_validation and ('project_policy_subject' not in local_var_params or # noqa: E501
local_var_params['project_policy_subject'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `project_policy_subject` when calling `delete_project_org_permission`") # noqa: E501
collection_formats = {}
path_params = {}
if 'owner' in local_var_params:
path_params['owner'] = local_var_params['owner'] # noqa: E501
if 'name' in local_var_params:
path_params['name'] = local_var_params['name'] # noqa: E501
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'project_policy_subject' in local_var_params:
body_params = local_var_params['project_policy_subject']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['APIKeyAuth', 'JWTAuth'] # noqa: E501
return self.api_client.call_api(
'/projects/{owner}/{name}/permissions', 'DELETE',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type=None, # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats,
_request_auth=local_var_params.get('_request_auth'))
def delete_project_recipe_filter(self, owner, name, project_recipe_filter, **kwargs): # noqa: E501
"""Remove a Project recipe filter # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.delete_project_recipe_filter(owner, name, project_recipe_filter, async_req=True)
>>> result = thread.get()
:param owner: (required)
:type owner: str
:param name: (required)
:type name: str
:param project_recipe_filter: (required)
:type project_recipe_filter: ProjectRecipeFilter
:param async_req: Whether to execute the request asynchronously.
:type async_req: bool, optional
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:type _preload_content: bool, optional
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: Returns the result object.
If the method is called asynchronously,
returns the request thread.
:rtype: None
"""
kwargs['_return_http_data_only'] = True
return self.delete_project_recipe_filter_with_http_info(owner, name, project_recipe_filter, **kwargs) # noqa: E501
def delete_project_recipe_filter_with_http_info(self, owner, name, project_recipe_filter, **kwargs): # noqa: E501
"""Remove a Project recipe filter # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.delete_project_recipe_filter_with_http_info(owner, name, project_recipe_filter, async_req=True)
>>> result = thread.get()
:param owner: (required)
:type owner: str
:param name: (required)
:type name: str
:param project_recipe_filter: (required)
:type project_recipe_filter: ProjectRecipeFilter
:param async_req: Whether to execute the request asynchronously.
:type async_req: bool, optional
:param _return_http_data_only: response data without head status code
and headers
:type _return_http_data_only: bool, optional
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:type _preload_content: bool, optional
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:param _request_auth: set to override the auth_settings for an a single
request; this effectively ignores the authentication
in the spec for a single request.
:type _request_auth: dict, optional
:return: Returns the result object.
If the method is called asynchronously,
returns the request thread.
:rtype: None
"""
local_var_params = locals()
all_params = [
'owner',
'name',
'project_recipe_filter'
]
all_params.extend(
[
'async_req',
'_return_http_data_only',
'_preload_content',
'_request_timeout',
'_request_auth'
]
)
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method delete_project_recipe_filter" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
# verify the required parameter 'owner' is set
if self.api_client.client_side_validation and ('owner' not in local_var_params or # noqa: E501
local_var_params['owner'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `owner` when calling `delete_project_recipe_filter`") # noqa: E501
# verify the required parameter 'name' is set
if self.api_client.client_side_validation and ('name' not in local_var_params or # noqa: E501
local_var_params['name'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `name` when calling `delete_project_recipe_filter`") # noqa: E501
# verify the required parameter 'project_recipe_filter' is set
if self.api_client.client_side_validation and ('project_recipe_filter' not in local_var_params or # noqa: E501
local_var_params['project_recipe_filter'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `project_recipe_filter` when calling `delete_project_recipe_filter`") # noqa: E501
collection_formats = {}
path_params = {}
if 'owner' in local_var_params:
path_params['owner'] = local_var_params['owner'] # noqa: E501
if 'name' in local_var_params:
path_params['name'] = local_var_params['name'] # noqa: E501
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'project_recipe_filter' in local_var_params:
body_params = local_var_params['project_recipe_filter']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['APIKeyAuth', 'JWTAuth'] # noqa: E501
return self.api_client.call_api(
'/projects/{owner}/{name}/recipes/filters', 'DELETE',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type=None, # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats,
_request_auth=local_var_params.get('_request_auth'))
def get_project(self, owner, name, **kwargs): # noqa: E501
"""Get a project # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_project(owner, name, async_req=True)
>>> result = thread.get()
:param owner: (required)
:type owner: str
:param name: (required)
:type name: str
:param async_req: Whether to execute the request asynchronously.
:type async_req: bool, optional
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:type _preload_content: bool, optional
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: Returns the result object.
If the method is called asynchronously,
returns the request thread.
:rtype: Project
"""
kwargs['_return_http_data_only'] = True
return self.get_project_with_http_info(owner, name, **kwargs) # noqa: E501
def get_project_with_http_info(self, owner, name, **kwargs): # noqa: E501
"""Get a project # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_project_with_http_info(owner, name, async_req=True)
>>> result = thread.get()
:param owner: (required)
:type owner: str
:param name: (required)
:type name: str
:param async_req: Whether to execute the request asynchronously.
:type async_req: bool, optional
:param _return_http_data_only: response data without head status code
and headers
:type _return_http_data_only: bool, optional
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:type _preload_content: bool, optional
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:param _request_auth: set to override the auth_settings for an a single
request; this effectively ignores the authentication
in the spec for a single request.
:type _request_auth: dict, optional
:return: Returns the result object.
If the method is called asynchronously,
returns the request thread.
:rtype: tuple(Project, status_code(int), headers(HTTPHeaderDict))
"""
local_var_params = locals()
all_params = [
'owner',
'name'
]
all_params.extend(
[
'async_req',
'_return_http_data_only',
'_preload_content',
'_request_timeout',
'_request_auth'
]
)
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method get_project" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
# verify the required parameter 'owner' is set
if self.api_client.client_side_validation and ('owner' not in local_var_params or # noqa: E501
local_var_params['owner'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `owner` when calling `get_project`") # noqa: E501
# verify the required parameter 'name' is set
if self.api_client.client_side_validation and ('name' not in local_var_params or # noqa: E501
local_var_params['name'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `name` when calling `get_project`") # noqa: E501
collection_formats = {}
path_params = {}
if 'owner' in local_var_params:
path_params['owner'] = local_var_params['owner'] # noqa: E501
if 'name' in local_var_params:
path_params['name'] = local_var_params['name'] # noqa: E501
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['APIKeyAuth', 'JWTAuth'] # noqa: E501
return self.api_client.call_api(
'/projects/{owner}/{name}', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='Project', # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats,
_request_auth=local_var_params.get('_request_auth'))
def get_project_access_permissions(self, owner, name, **kwargs): # noqa: E501
"""Get project access permissions # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_project_access_permissions(owner, name, async_req=True)
>>> result = thread.get()
:param owner: (required)
:type owner: str
:param name: (required)
:type name: str
:param page: Page number starting from 1
:type page: int
:param per_page: Number of items per page
:type per_page: int
:param subject_type: The type of access policy subject
:type subject_type: list[str]
:param permission: An access policy permission string
:type permission: list[str]
:param async_req: Whether to execute the request asynchronously.
:type async_req: bool, optional
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:type _preload_content: bool, optional
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: Returns the result object.
If the method is called asynchronously,
returns the request thread.
:rtype: ProjectAccessPolicyList
"""
kwargs['_return_http_data_only'] = True
return self.get_project_access_permissions_with_http_info(owner, name, **kwargs) # noqa: E501
def get_project_access_permissions_with_http_info(self, owner, name, **kwargs): # noqa: E501
"""Get project access permissions # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_project_access_permissions_with_http_info(owner, name, async_req=True)
>>> result = thread.get()
:param owner: (required)
:type owner: str
:param name: (required)
:type name: str
:param page: Page number starting from 1
:type page: int
:param per_page: Number of items per page
:type per_page: int
:param subject_type: The type of access policy subject
:type subject_type: list[str]
:param permission: An access policy permission string
:type permission: list[str]
:param async_req: Whether to execute the request asynchronously.
:type async_req: bool, optional
:param _return_http_data_only: response data without head status code
and headers
:type _return_http_data_only: bool, optional
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:type _preload_content: bool, optional
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:param _request_auth: set to override the auth_settings for an a single
request; this effectively ignores the authentication
in the spec for a single request.
:type _request_auth: dict, optional
:return: Returns the result object.
If the method is called asynchronously,
returns the request thread.
:rtype: tuple(ProjectAccessPolicyList, status_code(int), headers(HTTPHeaderDict))
"""
local_var_params = locals()
all_params = [
'owner',
'name',
'page',
'per_page',
'subject_type',
'permission'
]
all_params.extend(
[
'async_req',
'_return_http_data_only',
'_preload_content',
'_request_timeout',
'_request_auth'
]
)
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method get_project_access_permissions" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
# verify the required parameter 'owner' is set
if self.api_client.client_side_validation and ('owner' not in local_var_params or # noqa: E501
local_var_params['owner'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `owner` when calling `get_project_access_permissions`") # noqa: E501
# verify the required parameter 'name' is set
if self.api_client.client_side_validation and ('name' not in local_var_params or # noqa: E501
local_var_params['name'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `name` when calling `get_project_access_permissions`") # noqa: E501
if self.api_client.client_side_validation and 'page' in local_var_params and local_var_params['page'] < 1: # noqa: E501
raise ApiValueError("Invalid value for parameter `page` when calling `get_project_access_permissions`, must be a value greater than or equal to `1`") # noqa: E501
if self.api_client.client_side_validation and 'per_page' in local_var_params and local_var_params['per_page'] > 100: # noqa: E501
raise ApiValueError("Invalid value for parameter `per_page` when calling `get_project_access_permissions`, must be a value less than or equal to `100`") # noqa: E501
collection_formats = {}
path_params = {}
if 'owner' in local_var_params:
path_params['owner'] = local_var_params['owner'] # noqa: E501
if 'name' in local_var_params:
path_params['name'] = local_var_params['name'] # noqa: E501
query_params = []
if 'page' in local_var_params and local_var_params['page'] is not None: # noqa: E501
query_params.append(('page', local_var_params['page'])) # noqa: E501
if 'per_page' in local_var_params and local_var_params['per_page'] is not None: # noqa: E501
query_params.append(('per-page', local_var_params['per_page'])) # noqa: E501
if 'subject_type' in local_var_params and local_var_params['subject_type'] is not None: # noqa: E501
query_params.append(('subject_type', local_var_params['subject_type'])) # noqa: E501
collection_formats['subject_type'] = 'multi' # noqa: E501
if 'permission' in local_var_params and local_var_params['permission'] is not None: # noqa: E501
query_params.append(('permission', local_var_params['permission'])) # noqa: E501
collection_formats['permission'] = 'multi' # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['APIKeyAuth', 'JWTAuth'] # noqa: E501
return self.api_client.call_api(
'/projects/{owner}/{name}/permissions', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='ProjectAccessPolicyList', # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats,
_request_auth=local_var_params.get('_request_auth'))
def get_project_recipe_filters(self, owner, name, **kwargs): # noqa: E501
"""Get project recipe filters # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_project_recipe_filters(owner, name, async_req=True)
>>> result = thread.get()
:param owner: (required)
:type owner: str
:param name: (required)
:type name: str
:param page: Page number starting from 1
:type page: int
:param per_page: Number of items per page
:type per_page: int
:param async_req: Whether to execute the request asynchronously.
:type async_req: bool, optional
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:type _preload_content: bool, optional
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: Returns the result object.
If the method is called asynchronously,
returns the request thread.
:rtype: ProjectRecipeFilterList
"""
kwargs['_return_http_data_only'] = True
return self.get_project_recipe_filters_with_http_info(owner, name, **kwargs) # noqa: E501
def get_project_recipe_filters_with_http_info(self, owner, name, **kwargs): # noqa: E501
"""Get project recipe filters # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_project_recipe_filters_with_http_info(owner, name, async_req=True)
>>> result = thread.get()
:param owner: (required)
:type owner: str
:param name: (required)
:type name: str
:param page: Page number starting from 1
:type page: int
:param per_page: Number of items per page
:type per_page: int
:param async_req: Whether to execute the request asynchronously.
:type async_req: bool, optional
:param _return_http_data_only: response data without head status code
and headers
:type _return_http_data_only: bool, optional
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:type _preload_content: bool, optional
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:param _request_auth: set to override the auth_settings for an a single
request; this effectively ignores the authentication
in the spec for a single request.
:type _request_auth: dict, optional
:return: Returns the result object.
If the method is called asynchronously,
returns the request thread.
:rtype: tuple(ProjectRecipeFilterList, status_code(int), headers(HTTPHeaderDict))
"""
local_var_params = locals()
all_params = [
'owner',
'name',
'page',
'per_page'
]
all_params.extend(
[
'async_req',
'_return_http_data_only',
'_preload_content',
'_request_timeout',
'_request_auth'
]
)
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method get_project_recipe_filters" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
# verify the required parameter 'owner' is set
if self.api_client.client_side_validation and ('owner' not in local_var_params or # noqa: E501
local_var_params['owner'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `owner` when calling `get_project_recipe_filters`") # noqa: E501
# verify the required parameter 'name' is set
if self.api_client.client_side_validation and ('name' not in local_var_params or # noqa: E501
local_var_params['name'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `name` when calling `get_project_recipe_filters`") # noqa: E501
if self.api_client.client_side_validation and 'page' in local_var_params and local_var_params['page'] < 1: # noqa: E501
raise ApiValueError("Invalid value for parameter `page` when calling `get_project_recipe_filters`, must be a value greater than or equal to `1`") # noqa: E501
if self.api_client.client_side_validation and 'per_page' in local_var_params and local_var_params['per_page'] > 100: # noqa: E501
raise ApiValueError("Invalid value for parameter `per_page` when calling `get_project_recipe_filters`, must be a value less than or equal to `100`") # noqa: E501
collection_formats = {}
path_params = {}
if 'owner' in local_var_params:
path_params['owner'] = local_var_params['owner'] # noqa: E501
if 'name' in local_var_params:
path_params['name'] = local_var_params['name'] # noqa: E501
query_params = []
if 'page' in local_var_params and local_var_params['page'] is not None: # noqa: E501
query_params.append(('page', local_var_params['page'])) # noqa: E501
if 'per_page' in local_var_params and local_var_params['per_page'] is not None: # noqa: E501
query_params.append(('per-page', local_var_params['per_page'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['APIKeyAuth', 'JWTAuth'] # noqa: E501
return self.api_client.call_api(
'/projects/{owner}/{name}/recipes/filters', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='ProjectRecipeFilterList', # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats,
_request_auth=local_var_params.get('_request_auth'))
def get_project_recipes(self, owner, name, **kwargs): # noqa: E501
"""Get project recipes # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_project_recipes(owner, name, async_req=True)
>>> result = thread.get()
:param owner: (required)
:type owner: str
:param name: (required)
:type name: str
:param search: Search string to find recipes
:type search: str
:param page: Page number starting from 1
:type page: int
:param per_page: Number of items per page
:type per_page: int
:param async_req: Whether to execute the request asynchronously.
:type async_req: bool, optional
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:type _preload_content: bool, optional
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: Returns the result object.
If the method is called asynchronously,
returns the request thread.
:rtype: RecipeInterfaceList
"""
kwargs['_return_http_data_only'] = True
return self.get_project_recipes_with_http_info(owner, name, **kwargs) # noqa: E501
def get_project_recipes_with_http_info(self, owner, name, **kwargs): # noqa: E501
"""Get project recipes # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_project_recipes_with_http_info(owner, name, async_req=True)
>>> result = thread.get()
:param owner: (required)
:type owner: str
:param name: (required)
:type name: str
:param search: Search string to find recipes
:type search: str
:param page: Page number starting from 1
:type page: int
:param per_page: Number of items per page
:type per_page: int
:param async_req: Whether to execute the request asynchronously.
:type async_req: bool, optional
:param _return_http_data_only: response data without head status code
and headers
:type _return_http_data_only: bool, optional
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:type _preload_content: bool, optional
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:param _request_auth: set to override the auth_settings for an a single
request; this effectively ignores the authentication
in the spec for a single request.
:type _request_auth: dict, optional
:return: Returns the result object.
If the method is called asynchronously,
returns the request thread.
:rtype: tuple(RecipeInterfaceList, status_code(int), headers(HTTPHeaderDict))
"""
local_var_params = locals()
all_params = [
'owner',
'name',
'search',
'page',
'per_page'
]
all_params.extend(
[
'async_req',
'_return_http_data_only',
'_preload_content',
'_request_timeout',
'_request_auth'
]
)
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method get_project_recipes" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
# verify the required parameter 'owner' is set
if self.api_client.client_side_validation and ('owner' not in local_var_params or # noqa: E501
local_var_params['owner'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `owner` when calling `get_project_recipes`") # noqa: E501
# verify the required parameter 'name' is set
if self.api_client.client_side_validation and ('name' not in local_var_params or # noqa: E501
local_var_params['name'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `name` when calling `get_project_recipes`") # noqa: E501
if self.api_client.client_side_validation and 'page' in local_var_params and local_var_params['page'] < 1: # noqa: E501
raise ApiValueError("Invalid value for parameter `page` when calling `get_project_recipes`, must be a value greater than or equal to `1`") # noqa: E501
if self.api_client.client_side_validation and 'per_page' in local_var_params and local_var_params['per_page'] > 100: # noqa: E501
raise ApiValueError("Invalid value for parameter `per_page` when calling `get_project_recipes`, must be a value less than or equal to `100`") # noqa: E501
collection_formats = {}
path_params = {}
if 'owner' in local_var_params:
path_params['owner'] = local_var_params['owner'] # noqa: E501
if 'name' in local_var_params:
path_params['name'] = local_var_params['name'] # noqa: E501
query_params = []
if 'search' in local_var_params and local_var_params['search'] is not None: # noqa: E501
query_params.append(('search', local_var_params['search'])) # noqa: E501
if 'page' in local_var_params and local_var_params['page'] is not None: # noqa: E501
query_params.append(('page', local_var_params['page'])) # noqa: E501
if 'per_page' in local_var_params and local_var_params['per_page'] is not None: # noqa: E501
query_params.append(('per-page', local_var_params['per_page'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['APIKeyAuth', 'JWTAuth'] # noqa: E501
return self.api_client.call_api(
'/projects/{owner}/{name}/recipes', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='RecipeInterfaceList', # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats,
_request_auth=local_var_params.get('_request_auth'))
def list_projects(self, **kwargs): # noqa: E501
"""List Projects # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.list_projects(async_req=True)
>>> result = thread.get()
:param search: Search string to find projects
:type search: str
:param ids: The ID of a project to search for
:type ids: list[str]
:param names: The name of the project
:type names: list[str]
:param owner: Owner of the project
:type owner: list[str]
:param public: Boolean check for public/private projects
:type public: bool
:param permissions: Filter by permission on given resource
:type permissions: list[str]
:param sort_by: Key to sort the list by
:type sort_by: ProjectSortKey
:param sort_order: The order to sort the list
:type sort_order: SortEnum
:param page: Page number starting from 1
:type page: int
:param per_page: Number of items per page
:type per_page: int
:param async_req: Whether to execute the request asynchronously.
:type async_req: bool, optional
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:type _preload_content: bool, optional
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: Returns the result object.
If the method is called asynchronously,
returns the request thread.
:rtype: ProjectList
"""
kwargs['_return_http_data_only'] = True
return self.list_projects_with_http_info(**kwargs) # noqa: E501
def list_projects_with_http_info(self, **kwargs): # noqa: E501
"""List Projects # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.list_projects_with_http_info(async_req=True)
>>> result = thread.get()
:param search: Search string to find projects
:type search: str
:param ids: The ID of a project to search for
:type ids: list[str]
:param names: The name of the project
:type names: list[str]
:param owner: Owner of the project
:type owner: list[str]
:param public: Boolean check for public/private projects
:type public: bool
:param permissions: Filter by permission on given resource
:type permissions: list[str]
:param sort_by: Key to sort the list by
:type sort_by: ProjectSortKey
:param sort_order: The order to sort the list
:type sort_order: SortEnum
:param page: Page number starting from 1
:type page: int
:param per_page: Number of items per page
:type per_page: int
:param async_req: Whether to execute the request asynchronously.
:type async_req: bool, optional
:param _return_http_data_only: response data without head status code
and headers
:type _return_http_data_only: bool, optional
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:type _preload_content: bool, optional
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:param _request_auth: set to override the auth_settings for an a single
request; this effectively ignores the authentication
in the spec for a single request.
:type _request_auth: dict, optional
:return: Returns the result object.
If the method is called asynchronously,
returns the request thread.
:rtype: tuple(ProjectList, status_code(int), headers(HTTPHeaderDict))
"""
local_var_params = locals()
all_params = [
'search',
'ids',
'names',
'owner',
'public',
'permissions',
'sort_by',
'sort_order',
'page',
'per_page'
]
all_params.extend(
[
'async_req',
'_return_http_data_only',
'_preload_content',
'_request_timeout',
'_request_auth'
]
)
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method list_projects" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
if self.api_client.client_side_validation and 'page' in local_var_params and local_var_params['page'] < 1: # noqa: E501
raise ApiValueError("Invalid value for parameter `page` when calling `list_projects`, must be a value greater than or equal to `1`") # noqa: E501
if self.api_client.client_side_validation and 'per_page' in local_var_params and local_var_params['per_page'] > 100: # noqa: E501
raise ApiValueError("Invalid value for parameter `per_page` when calling `list_projects`, must be a value less than or equal to `100`") # noqa: E501
collection_formats = {}
path_params = {}
query_params = []
if 'search' in local_var_params and local_var_params['search'] is not None: # noqa: E501
query_params.append(('search', local_var_params['search'])) # noqa: E501
if 'ids' in local_var_params and local_var_params['ids'] is not None: # noqa: E501
query_params.append(('ids', local_var_params['ids'])) # noqa: E501
collection_formats['ids'] = 'multi' # noqa: E501
if 'names' in local_var_params and local_var_params['names'] is not None: # noqa: E501
query_params.append(('names', local_var_params['names'])) # noqa: E501
collection_formats['names'] = 'multi' # noqa: E501
if 'owner' in local_var_params and local_var_params['owner'] is not None: # noqa: E501
query_params.append(('owner', local_var_params['owner'])) # noqa: E501
collection_formats['owner'] = 'multi' # noqa: E501
if 'public' in local_var_params and local_var_params['public'] is not None: # noqa: E501
query_params.append(('public', local_var_params['public'])) # noqa: E501
if 'permissions' in local_var_params and local_var_params['permissions'] is not None: # noqa: E501
query_params.append(('permissions', local_var_params['permissions'])) # noqa: E501
collection_formats['permissions'] = 'multi' # noqa: E501
if 'sort_by' in local_var_params and local_var_params['sort_by'] is not None: # noqa: E501
query_params.append(('sort_by', local_var_params['sort_by'])) # noqa: E501
if 'sort_order' in local_var_params and local_var_params['sort_order'] is not None: # noqa: E501
query_params.append(('sort_order', local_var_params['sort_order'])) # noqa: E501
if 'page' in local_var_params and local_var_params['page'] is not None: # noqa: E501
query_params.append(('page', local_var_params['page'])) # noqa: E501
if 'per_page' in local_var_params and local_var_params['per_page'] is not None: # noqa: E501
query_params.append(('per-page', local_var_params['per_page'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['APIKeyAuth', 'JWTAuth'] # noqa: E501
return self.api_client.call_api(
'/projects', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='ProjectList', # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats,
_request_auth=local_var_params.get('_request_auth'))
def update(self, owner, name, project_update, **kwargs): # noqa: E501
"""Update a Project # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.update(owner, name, project_update, async_req=True)
>>> result = thread.get()
:param owner: (required)
:type owner: str
:param name: (required)
:type name: str
:param project_update: (required)
:type project_update: ProjectUpdate
:param async_req: Whether to execute the request asynchronously.
:type async_req: bool, optional
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:type _preload_content: bool, optional
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: Returns the result object.
If the method is called asynchronously,
returns the request thread.
:rtype: UpdateAccepted
"""
kwargs['_return_http_data_only'] = True
return self.update_with_http_info(owner, name, project_update, **kwargs) # noqa: E501
def update_with_http_info(self, owner, name, project_update, **kwargs): # noqa: E501
"""Update a Project # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.update_with_http_info(owner, name, project_update, async_req=True)
>>> result = thread.get()
:param owner: (required)
:type owner: str
:param name: (required)
:type name: str
:param project_update: (required)
:type project_update: ProjectUpdate
:param async_req: Whether to execute the request asynchronously.
:type async_req: bool, optional
:param _return_http_data_only: response data without head status code
and headers
:type _return_http_data_only: bool, optional
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:type _preload_content: bool, optional
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:param _request_auth: set to override the auth_settings for an a single
request; this effectively ignores the authentication
in the spec for a single request.
:type _request_auth: dict, optional
:return: Returns the result object.
If the method is called asynchronously,
returns the request thread.
:rtype: tuple(UpdateAccepted, status_code(int), headers(HTTPHeaderDict))
"""
local_var_params = locals()
all_params = [
'owner',
'name',
'project_update'
]
all_params.extend(
[
'async_req',
'_return_http_data_only',
'_preload_content',
'_request_timeout',
'_request_auth'
]
)
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method update" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
# verify the required parameter 'owner' is set
if self.api_client.client_side_validation and ('owner' not in local_var_params or # noqa: E501
local_var_params['owner'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `owner` when calling `update`") # noqa: E501
# verify the required parameter 'name' is set
if self.api_client.client_side_validation and ('name' not in local_var_params or # noqa: E501
local_var_params['name'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `name` when calling `update`") # noqa: E501
# verify the required parameter 'project_update' is set
if self.api_client.client_side_validation and ('project_update' not in local_var_params or # noqa: E501
local_var_params['project_update'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `project_update` when calling `update`") # noqa: E501
collection_formats = {}
path_params = {}
if 'owner' in local_var_params:
path_params['owner'] = local_var_params['owner'] # noqa: E501
if 'name' in local_var_params:
path_params['name'] = local_var_params['name'] # noqa: E501
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'project_update' in local_var_params:
body_params = local_var_params['project_update']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['APIKeyAuth', 'JWTAuth'] # noqa: E501
return self.api_client.call_api(
'/projects/{owner}/{name}', 'PUT',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='UpdateAccepted', # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats,
_request_auth=local_var_params.get('_request_auth'))
def upsert_project_permission(self, owner, name, project_access_policy, **kwargs): # noqa: E501
"""Upsert a new permission to a project # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.upsert_project_permission(owner, name, project_access_policy, async_req=True)
>>> result = thread.get()
:param owner: (required)
:type owner: str
:param name: (required)
:type name: str
:param project_access_policy: (required)
:type project_access_policy: ProjectAccessPolicy
:param async_req: Whether to execute the request asynchronously.
:type async_req: bool, optional
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:type _preload_content: bool, optional
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: Returns the result object.
If the method is called asynchronously,
returns the request thread.
:rtype: UpdateAccepted
"""
kwargs['_return_http_data_only'] = True
return self.upsert_project_permission_with_http_info(owner, name, project_access_policy, **kwargs) # noqa: E501
def upsert_project_permission_with_http_info(self, owner, name, project_access_policy, **kwargs): # noqa: E501
"""Upsert a new permission to a project # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.upsert_project_permission_with_http_info(owner, name, project_access_policy, async_req=True)
>>> result = thread.get()
:param owner: (required)
:type owner: str
:param name: (required)
:type name: str
:param project_access_policy: (required)
:type project_access_policy: ProjectAccessPolicy
:param async_req: Whether to execute the request asynchronously.
:type async_req: bool, optional
:param _return_http_data_only: response data without head status code
and headers
:type _return_http_data_only: bool, optional
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:type _preload_content: bool, optional
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:param _request_auth: set to override the auth_settings for an a single
request; this effectively ignores the authentication
in the spec for a single request.
:type _request_auth: dict, optional
:return: Returns the result object.
If the method is called asynchronously,
returns the request thread.
:rtype: tuple(UpdateAccepted, status_code(int), headers(HTTPHeaderDict))
"""
local_var_params = locals()
all_params = [
'owner',
'name',
'project_access_policy'
]
all_params.extend(
[
'async_req',
'_return_http_data_only',
'_preload_content',
'_request_timeout',
'_request_auth'
]
)
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method upsert_project_permission" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
# verify the required parameter 'owner' is set
if self.api_client.client_side_validation and ('owner' not in local_var_params or # noqa: E501
local_var_params['owner'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `owner` when calling `upsert_project_permission`") # noqa: E501
# verify the required parameter 'name' is set
if self.api_client.client_side_validation and ('name' not in local_var_params or # noqa: E501
local_var_params['name'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `name` when calling `upsert_project_permission`") # noqa: E501
# verify the required parameter 'project_access_policy' is set
if self.api_client.client_side_validation and ('project_access_policy' not in local_var_params or # noqa: E501
local_var_params['project_access_policy'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `project_access_policy` when calling `upsert_project_permission`") # noqa: E501
collection_formats = {}
path_params = {}
if 'owner' in local_var_params:
path_params['owner'] = local_var_params['owner'] # noqa: E501
if 'name' in local_var_params:
path_params['name'] = local_var_params['name'] # noqa: E501
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'project_access_policy' in local_var_params:
body_params = local_var_params['project_access_policy']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['APIKeyAuth', 'JWTAuth'] # noqa: E501
return self.api_client.call_api(
'/projects/{owner}/{name}/permissions', 'PATCH',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='UpdateAccepted', # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats,
_request_auth=local_var_params.get('_request_auth'))
|
class Constants(object):
LOGGER_CONF = "conf/logger.yml"
USERNAME = "mapr"
PASSWORD = "mapr"
GROUPNAME = "mapr"
USERID = 5000
GROUPID = 5000
MYSQL_USER = "admin"
MYSQL_PASS = "mapr"
LDAPADMIN_USER = "admin"
LDAPADMIN_PASS = "mapr"
LDAPBIND_USER = "readonly"
LDAPBIND_PASS = "mapr"
|
## for data
import pandas as pd
import numpy as np
import requests
import json
import os
from datetime import datetime, date
from dotenv import load_dotenv
from sklearn.preprocessing import PolynomialFeatures
from sklearn.linear_model import LinearRegression
## for plotting
import matplotlib.pyplot as plt
import matplotlib.patches as pltpatches
## for stationarity test
import statsmodels.api as sm
## for outliers detection, models tuning, clustering
from sklearn import preprocessing, svm, model_selection, metrics, cluster
## for autoregressive models
import pmdarima
import statsmodels.tsa.api as smt
import arch
import tensorflow as tf
## for deep learning
from tensorflow.python.keras import models, layers, preprocessing as kprocessing
## for prophet
from fbprophet import Prophet
pd.plotting.register_matplotlib_converters()
## for parametric fit and resistence/support
from scipy import optimize, stats, signal, cluster as sci_cluster
## for clustering
from tslearn.metrics import dtw
from tslearn.utils import to_time_series_dataset
from tslearn.clustering import TimeSeriesKMeans
###############################################################################
# TS ANALYSIS #
###############################################################################
def get_data_api_toTs(ini,coin):
coin_url = os.getenv(coin.upper()+"_HISTOHOUR")
if ini == 0 :
request = requests.get(coin_url)
else:
request = requests.get(coin_url+f"&toTs={ini}")
todo = json.loads(request.content)
return todo['Data']['Data']
def convertToDF(dfJSON):
return(pd.json_normalize(dfJSON))
'''
get cryptocurrency dataSet
:parameter
:param coin: coin name (BTC,ETH or XRP)
:param researches: number of observations * 2001
'''
def get_data_df(coin,researches):
load_dotenv()
data = get_data_api_toTs(0,coin)
df_aux = convertToDF(data)
for x in range(researches-1):
ini = df_aux['time'][0]
print("Buscando dados de : ",datetime.fromtimestamp(ini))
data1=get_data_api_toTs(ini,coin)
df_aux1 = convertToDF(data1)
df_aux = df_aux1.append(df_aux,ignore_index=True)
return df_aux
'''
get cryptocurrency dataSet
:parameter
:param coin: coin name (BTC,ETH or XRP)
:param sample_data: get sample data from api? (true or false)
'''
def get_data(coin, sample_data=True):
if coin.upper() not in ('BTC', 'ETH', 'XRP'):
err_msg = coin + ' is a invalid coin!'
raise ValueError(err_msg)
name_coin = "_SAMPLE_DATA" if sample_data else "_ALL_DATA"
name_coin = coin.upper() + name_coin
print("\nBuscando ", "amostra" if sample_data else "todas",
" observações da moeda", coin.upper())
load_dotenv()
coin_url = os.getenv(name_coin)
request = requests.get(coin_url)
data = json.loads(request.content)
content = data.get("Data")
content = content.get("Data")
print("Dataset foi carregado! Formatando Dataset ...")
df = pd.json_normalize(content[0])
for i in range(1, len(content)):
observation = content[i]
df_temp = pd.json_normalize(observation)
df = pd.DataFrame.append(df, df_temp)
return df
'''
Plot ts with rolling mean and 95% confidence interval with rolling std.
:parameter
:param ts: pandas Series
:param window: num for rolling stats
:param plot_intervals: bool - if True plots the conf interval
:param plot_ma: bool - if True plots the moving avg
'''
def plot_ts(ts, plot_ma=True, plot_intervals=True, window=30, figsize=(15,5)):
rolling_mean = ts.rolling(window=window).mean()
rolling_std = ts.rolling(window=window).std()
plt.figure(figsize=figsize)
plt.title(ts.name)
plt.plot(ts[window:], label='ts', color="black")
if plot_ma:
plt.plot(rolling_mean, 'g', label='MA'+str(window), color="red")
if plot_intervals:
lower_bound = rolling_mean - (1.96 * rolling_std)
upper_bound = rolling_mean + (1.96 * rolling_std)
plt.fill_between(x=ts.index, y1=lower_bound, y2=upper_bound, color='lightskyblue', alpha=0.4)
plt.legend(loc='best')
plt.grid(True)
plt.show()
'''
Fit a parametric trend line.
:parameter
:param ts: pandas Series
:param degree: polynomial order, ex. if 1 --> trend line = constant + slope*x, if 2 --> trend line = constant + a*x + b*x^2
'''
def fit_trend(ts, degree=1, plot=True, figsize=(15,5)):
## fit trend
dtf = ts.to_frame(name="ts")
params = np.polyfit(ts.reset_index().index, ts.values, deg=degree)
costant = params[-1]
dtf["trend"] = costant
X = np.array(range(1,len(ts)+1))
for i in range(1,degree+1):
dtf["trend"] = dtf["trend"] + params[i-1]*(X**i)
## plot
if plot is True:
ax = dtf.plot(grid=True, title="Fitting Trend", figsize=figsize, color=["black","red"])
ax.set(xlabel=None)
plt.show()
return dtf, params
'''
Fit a parametric trend poly.
:parameter
:param ts: pandas Series
:param degree: polynomial order, ex. if 2 --> trend line = constant + a*x + b*x^2 ...
'''
def fit_poly(ts_train, ts_test, degree=2, plot=True, figsize=(6,6)):
ts = ts_train.append(ts_test)
x = ts.reset_index().index
y = ts.values
params = np.polyfit(x, y,degree)
poly1d_fn = np.poly1d(params)
y_pred = poly1d_fn(x)
ts_plot = ts.reset_index()
poly = pd.DataFrame({'forecast': y_pred, 'x': ts.reset_index()['date'], 'ts': ts_plot['sales']})
## plot
if plot is True:
plt.figure(figsize=figsize)
es_ts = poly[["x","ts"]]
es_fc = poly[["x","forecast"]]
print(es_fc)
plt.plot(es_ts['x'], es_ts['ts'],color="black", label = "Histórico")
plt.plot(es_fc['x'], es_fc['forecast'],color="green", label = "Treinamento")
plt.xlabel("Data")
plt.xticks(rotation=45)
plt.ylabel("US$")
plt.grid(True)
plt.legend()
if degree > 1 :
plt.savefig('regressao_polinomial_train.png', format='png', bbox_inches='tight')
else:
plt.savefig('regressao_linear_train.png', format='png', bbox_inches='tight')
plt.show()
print('Figura Salva!')
plt.figure(figsize=figsize)
first_idx = poly[pd.notnull(poly["forecast"])].index[0]
first_loc = poly.index.tolist().index(first_idx)
zoom_idx = poly.index[first_loc-len(ts_test)]
es_ts = poly.loc[zoom_idx:][["x","ts"]]
es_fc = poly.loc[zoom_idx:][["x","forecast"]]
plt.plot(es_ts['x'], es_ts['ts'],color="black", label = "Histórico")
plt.plot(es_fc['x'], es_fc['forecast'],color="green", label = "Teste")
plt.xlabel("Data")
plt.xticks(rotation=45)
plt.ylabel("US$")
plt.grid(True)
plt.legend()
if degree > 1 :
plt.savefig('regressao_polinomial_test.png', format='png', bbox_inches='tight')
else:
plt.savefig('regressao_linear_test.png', format='png', bbox_inches='tight')
plt.show()
print('Figura Salva!')
d = y - y_pred
mape = np.mean(np.abs(d / y)) * 100
mse = np.mean(d**2)
mae = np.mean(abs(d))
rmse = np.sqrt(mse)
print("Results by manual calculation: Treinamento")
print("MAPE:%.4f" %mape,"%")
print("MAE:%.4f" %mae)
print("MSE:%.4f" %mse)
print("RMSE:%.4f" %rmse)
es_ts = poly.loc[zoom_idx:][["x","ts"]]
es_fc = poly.loc[zoom_idx:][["x","forecast"]]
poly["error"] = es_ts["ts"] - es_fc["forecast"]
poly["error_pct"] = poly["error"] / es_ts["ts"]
### kpi
error_mean = poly["error"].mean()
error_std = poly["error"].std()
mae = poly["error"].apply(lambda x: np.abs(x)).mean() #mean absolute error
mape = poly["error_pct"].apply(lambda x: np.abs(x)).mean() *100 #mean absolute error %
mse = poly["error"].apply(lambda x: x**2).mean() #mean squared error
rmse = np.sqrt(mse) #root mean squared error
print("Results by manual calculation Teste:")
print("MAPE:%.4f" %mape,"%")
print("MAE:%.4f" %mae)
print("MSE:%.4f" %mse)
print("RMSE:%.4f" %rmse)
'''
Defferenciate ts.
:parameter
:param ts: pandas Series
:param lag: num - diff[t] = y[t] - y[t-lag]
:param order: num - how many times it has to differenciate: diff[t]^order = diff[t] - diff[t-lag]
:param drop_na: logic - if True Na are dropped, else are filled with last observation
'''
def diff_ts(ts, lag=1, order=1, drop_na=True):
for i in range(order):
ts = ts - ts.shift(lag)
ts = ts[(pd.notnull(ts))] if drop_na is True else ts.fillna(method="bfill")
return ts
'''
Find outliers using sklearn unsupervised support vetcor machine.
:parameter
:param ts: pandas Series
:param perc: float - percentage of outliers to look for
:return
dtf with raw ts, outlier 1/0 (yes/no), numeric index
'''
def find_outliers(ts, perc=0.01, figsize=(6,6)):
## fit svm
scaler = preprocessing.StandardScaler()
ts_scaled = scaler.fit_transform(ts.values.reshape(-1,1))
model = svm.OneClassSVM(nu=perc, kernel="rbf", gamma=0.01)
model.fit(ts_scaled)
## dtf output
dtf_outliers = ts.to_frame(name="ts")
dtf_outliers["outlier"] = model.predict(ts_scaled)
dtf_outliers["outlier"] = dtf_outliers["outlier"].apply(lambda x: 1 if x == -1 else 0)
## plot
fig, ax = plt.subplots(figsize=figsize)
ax.set(title="Outliers detection: found "+str(sum(dtf_outliers["outlier"] == 1)))
ax.plot(dtf_outliers.index, dtf_outliers["ts"], color="black")
ax.scatter(x=dtf_outliers[dtf_outliers["outlier"]==1].index, y=dtf_outliers[dtf_outliers["outlier"]==1]['ts'], color='red')
ax.grid(True)
plt.show()
return dtf_outliers
'''
Interpolate outliers in a ts.
'''
def remove_outliers(ts, outliers_idx, figsize=(6,6)):
ts_clean = ts.copy()
ts_clean.loc[outliers_idx] = np.nan
ts_clean = ts_clean.interpolate(method="linear")
ax = ts.plot(figsize=figsize, color="red", alpha=0.5, label="Histórico", legend=True)
ts_clean.plot(ax=ax, grid=True, color="black", label="Interpolado", legend=True)
ax.set(xlabel=None)
plt.xlabel("Data")
plt.ylabel("US$")
plt.legend()
plt.savefig('remocao_outliers.png', format='png', bbox_inches='tight')
plt.show()
return ts_clean
'''
Finds Maxs, Mins, Resistence and Support levels.
:parameter
:param ts: pandas Series
:param window: int - rolling window
:param trend: bool - False if ts is flat
:return
dtf with raw ts, max, min, resistence, support
'''
def resistence_support(ts, window=30, trend=False, plot=True, figsize=(15,5)):
dtf = ts.to_frame(name="ts")
dtf["max"], dtf["min"] = [np.nan, np.nan]
rolling = dtf['ts'].rolling(window=window).mean().dropna()
## maxs
local_max = signal.argrelextrema(rolling.values, np.greater)[0]
local_max_idx = [dtf.iloc[i-window:i+window]['ts'].idxmax() for i in local_max if (i > window) and (i < len(dtf)-window)]
dtf["max"].loc[local_max_idx] = dtf["ts"].loc[local_max_idx]
## mins
local_min = signal.argrelextrema(rolling.values, np.less)[0]
local_min_idx = [dtf.iloc[i-window:i+window]['ts'].idxmin() for i in local_min if (i > window) and (i < len(dtf)-window)]
dtf["min"].loc[local_min_idx] = dtf["ts"].loc[local_min_idx]
## resistence/support
dtf["resistence"] = dtf["max"].interpolate(method="linear") if trend is True else dtf["max"].fillna(method="ffill")
dtf["support"] = dtf["min"].interpolate(method="linear") if trend is True else dtf["min"].fillna(method="ffill")
## plot
if plot is True:
ax = dtf["ts"].plot(color="black", figsize=figsize, grid=True)
dtf["resistence"].plot(ax=ax, color="darkviolet", label="resistence", grid=True, linestyle="--")
dtf["support"].plot(ax=ax, color="green", label="support", grid=True, linestyle="--")
ax.scatter(x=dtf["max"].index, y=dtf["max"].values, color="darkviolet", label="max")
ax.scatter(x=dtf["min"].index, y=dtf["min"].values, color="green", label="min")
ax.set(xlabel=None)
ax.legend()
plt.show()
return dtf
###############################################################################
# MODEL DESIGN & TESTING - FORECASTING #
###############################################################################
'''
Split train/test from any given data point.
:parameter
:param ts: pandas Series
:param exog: array len(ts) x n regressors
:param test: num or str - test size (ex. 0.20) or index position (ex. "yyyy-mm-dd", 1000)
:return
ts_train, ts_test, exog_train, exog_test
'''
def split_train_test(ts, exog=None, test=0.20, plot=True, figsize=(6,6)):
## define splitting point
if type(test) is float:
split = int(len(ts)*(1-test))
perc = test
elif type(test) is str:
split = ts.reset_index()[ts.reset_index().iloc[:,0]==test].index[0]
perc = round(len(ts[split:])/len(ts), 2)
else:
split = test
perc = round(len(ts[split:])/len(ts), 2)
print("--- splitting at index: ", split, "|", ts.index[split], "| test size:", perc, " ---")
## split ts
ts_train = ts.head(split)
ts_test = ts.tail(len(ts)-split)
upper_bound = max(ts) * 1.05
lower_bound = min(ts) * 1.05
if plot is True:
ts_train.plot(grid=True, title="", color="black")
plt.xlabel('Data')
plt.ylabel('US$')
plt.savefig('dados_treino.png', format='png', bbox_inches='tight')
plt.show()
ts_test.plot(grid=True, title="", color="black")
plt.xlabel('Data')
plt.ylabel('US$')
plt.savefig('dados_teste.png', format='png', bbox_inches='tight')
plt.show()
## split exog
if exog is not None:
exog_train = exog[0:split]
exog_test = exog[split:]
return ts_train, ts_test, exog_train, exog_test
else:
return ts_train, ts_test
'''
Compute the confidence interval for predictions:
[y[t+h] +- (c*σ*√h)]
:parameter
:param lst_values: list or array
:param error_std: σ (standard dev of residuals)
:param conf: num - confidence level (90%, 95%, 99%)
:return
array with 2 columns (upper and lower bounds)
'''
def utils_conf_int(lst_values, error_std, conf=0.95):
lst_values = list(lst_values) if type(lst_values) != list else lst_values
c = round( stats.norm.ppf(1-(1-conf)/2), 2)
lst_ci = []
for x in lst_values:
lst_x = lst_values[:lst_values.index(x)+1]
h = len(lst_x)
ci = [x - (c*error_std*np.sqrt(h)), x + (c*error_std*np.sqrt(h))]
lst_ci.append(ci)
return np.array(lst_ci)
'''
Evaluation metrics for predictions.
:parameter
:param dtf: DataFrame with columns "ts", "model", "forecast", and "lower"/"upper" (if available)
:return
dtf with columns "ts", "model", "residuals", "lower", "forecast", "upper", "error"
'''
def utils_evaluate_ts_model(dtf, conf=0.95, title=None, plot=True, figsize=(20,13)):
try:
## residuals from fitting
### add column
dtf["residuals"] = dtf["ts"] - dtf["model"]
### kpi
residuals_mean = dtf["residuals"].mean()
residuals_std = dtf["residuals"].std()
## Model error
### add column
dtf["model_error_pct"] = dtf["residuals"] / dtf["ts"]
### kpi
model_error_mean = dtf["residuals"].mean()
model_error_std = dtf["residuals"].std()
model_mae = dtf["residuals"].apply(lambda x: np.abs(x)).mean() #mean absolute error
model_mape = dtf["model_error_pct"].apply(lambda x: np.abs(x)).mean() #mean absolute error %
model_mse = dtf["residuals"].apply(lambda x: x**2).mean() #mean squared error
model_rmse = np.sqrt(model_mse) #root mean squared error
## forecasting error
### add column
dtf["error"] = dtf["ts"] - dtf["forecast"]
dtf["error_pct"] = dtf["error"] / dtf["ts"]
### kpi
error_mean = dtf["error"].mean()
error_std = dtf["error"].std()
mae = dtf["error"].apply(lambda x: np.abs(x)).mean() #mean absolute error
mape = dtf["error_pct"].apply(lambda x: np.abs(x)).mean() #mean absolute error %
mse = dtf["error"].apply(lambda x: x**2).mean() #mean squared error
rmse = np.sqrt(mse) #root mean squared error
## interval
if "upper" not in dtf.columns:
print("--- computing confidence interval ---")
dtf["lower"], dtf["upper"] = [np.nan, np.nan]
dtf.loc[dtf["forecast"].notnull(), ["lower","upper"]] = utils_conf_int(
dtf[dtf["forecast"].notnull()]["forecast"], residuals_std, conf)
## plot
if plot is True:
plt.figure(figsize=figsize)
### training
ts = dtf[pd.notnull(dtf["model"])][["ts"]]
print(ts.reset_index().head())
model = dtf[pd.notnull(dtf["model"])][["model"]]
print(model.reset_index().head())
plt.plot(ts, color='black', label='Histórico')
plt.plot(model, color='green', label='Treinamento')
plt.xlabel("Data")
plt.xticks(rotation=45)
plt.ylabel("US$")
plt.grid(True)
plt.legend()
plt.savefig(title+'treinamento.png', format='png', bbox_inches='tight')
plt.show()
print('\nFigura Salva!\n')
### testing
plt.figure(figsize=figsize)
ts = dtf[pd.isnull(dtf["model"])][["ts"]]
forecast = dtf[pd.isnull(dtf["model"])][["forecast"]]
plt.plot(ts, color='black', label='Histórico')
plt.plot(forecast, color='green', label='Teste')
plt.xlabel("Data")
plt.fill_between(x=dtf.index, y1=dtf['lower'], y2=dtf['upper'], color='b', alpha=0.2)
plt.xticks(rotation=45)
plt.ylabel("US$")
plt.grid(True)
plt.legend()
plt.savefig(title+'teste.png', format='png', bbox_inches='tight')
plt.show()
print('\nFigura Salva!\n')
print("Training --> Residuals mean:", np.round(residuals_mean), " | std:", np.round(residuals_std),
" | mae:",np.round(model_mae), " | mape:",np.round(model_mape*100), "% | mse:",np.round(model_mse), " | rmse:",np.round(model_rmse))
print("Test --> Error mean:", np.round(error_mean), " | std:", np.round(error_std),
" | mae:",np.round(mae), " | mape:",np.round(mape*100), "% | mse:",np.round(mse), " | rmse:",np.round(rmse))
return dtf[["ts", "model", "residuals", "lower", "forecast", "upper", "error"]]
except Exception as e:
print("--- got error ---")
print(e)
'''
Generate dates to index predictions.
:parameter
:param start: str - "yyyy-mm-dd"
:param end: str - "yyyy-mm-dd"
:param n: num - length of index
:param freq: None or str - 'B' business day, 'D' daily, 'W' weekly, 'M' monthly, 'A' annual, 'Q' quarterly
'''
def utils_generate_indexdate(start, end=None, n=None, freq="D"):
if end is not None:
index = pd.date_range(start=start, end=end, freq=freq)
else:
index = pd.date_range(start=start, periods=n, freq=freq)
index = index[1:]
print("--- generating index date --> start:", index[0], "| end:", index[-1], "| len:", len(index), "---")
return index
'''
Plot unknown future forecast and produce conf_int with residual_std and pred_int if an error_std is given.
:parameter
:param dtf: DataFrame with columns "ts", "model", "forecast", and "lower"/"upper" (if available)
:param conf: num - confidence level (90%, 95%, 99%)
:param zoom: int - plots the focus on the last zoom days
:return
dtf with columns "ts", "model", "residuals", "lower", "forecast", "upper" (No error)
'''
def utils_add_forecast_int(dtf, conf=0.95, plot=True, zoom=30, figsize=(6,6), title=None):
## residuals from fitting
### add column
dtf["residuals"] = dtf["ts"] - dtf["model"]
### kpi
residuals_std = dtf["residuals"].std()
## interval
if "upper" not in dtf.columns:
print("--- computing confidence interval ---")
dtf["lower"], dtf["upper"] = [np.nan, np.nan]
dtf.loc[dtf["forecast"].notnull(), ["lower","upper"]] = utils_conf_int(
dtf[dtf["forecast"].notnull()]["forecast"], residuals_std, conf)
## plot
if plot is True:
plt.figure(figsize=figsize)
### entire series
es_ts = dtf[["ts"]]
es_fc = dtf[["forecast"]]
plt.plot(es_ts,color="black", label = "Histórico")
plt.plot(es_fc,color="red", label = "Projeção")
plt.xlabel("Data")
plt.fill_between(x=dtf.index, y1=dtf['lower'], y2=dtf['upper'], color='b', alpha=0.2)
plt.xticks(rotation=45)
plt.ylabel("US$")
plt.grid(True)
plt.legend()
plt.savefig(title+'_entire_series.png', format='png', bbox_inches='tight')
plt.show()
### focus on last
plt.figure(figsize=figsize)
first_idx = dtf[pd.notnull(dtf["forecast"])].index[0]
first_loc = dtf.index.tolist().index(first_idx)
zoom_idx = dtf.index[first_loc-zoom]
es_ts = dtf.loc[zoom_idx:][["ts"]]
es_fc = dtf.loc[zoom_idx:][["forecast"]]
plt.plot(es_ts,color="black", label = "Histórico")
plt.plot(es_fc,color="red", label = "Projeção")
plt.xlabel("Data")
plt.fill_between(x=dtf.loc[zoom_idx:].index, y1=dtf.loc[zoom_idx:]['lower'], y2=dtf.loc[zoom_idx:]['upper'], color='b', alpha=0.2)
plt.xticks(rotation=45)
plt.ylabel("US$")
plt.grid(True)
plt.legend()
plt.savefig(title+'_zoom.png', format='png', bbox_inches='tight')
plt.show()
return dtf[["ts", "model", "residuals", "lower", "forecast", "upper"]]
###############################################################################
# AUTOREGRESSIVE #
###############################################################################
'''
Tune Holt-Winters Exponential Smoothing
:parameter
:param ts_train: pandas timeseries
:param s: num - number of observations per seasonal (ex. 7 for weekly seasonality with daily data, 12 for yearly seasonality with monthly data)
:param val_size: num - size of validation fold
:param scoring: function(y_true, y_pred)
:param top: num - plot top models only
:return
dtf with results
'''
def tune_expsmooth_model(ts_train, s=7, val_size=0.2, scoring=None, top=None, figsize=(15,5)):
## split
dtf_fit, dtf_val = model_selection.train_test_split(ts_train, test_size=val_size, shuffle=False)
dtf_fit, dtf_val = dtf_fit.to_frame(name="ts"), dtf_val.to_frame(name="ts")
## scoring
scoring = metrics.mean_absolute_error if scoring is None else scoring
## hyperamater space
trend = ['add', 'mul', None]
damped = [True, False]
seasonal = ['add', 'mult', None]
## grid search
dtf_search = pd.DataFrame(columns=["combo","score","model"])
combinations = []
for t in trend:
for d in damped:
for ss in seasonal:
combo = "trend="+str(t)+", damped="+str(d)+", seas="+str(ss)
if combo not in combinations:
combinations.append(combo)
try:
### fit
model = smt.ExponentialSmoothing(dtf_fit, trend=t, damped=d, seasonal=ss, seasonal_periods=s).fit()
### predict
pred = model.forecast(len(dtf_val))
if pred.isna().sum() == 0:
dtf_val[combo] = pred.values
score = scoring(dtf_val["ts"].values, dtf_val[combo].values)
dtf_search = dtf_search.append(pd.DataFrame({"combo":[combo],"score":[score],"model":[model]}))
except:
continue
## find best
dtf_search = dtf_search.sort_values("score").reset_index(drop=True)
best = dtf_search["combo"].iloc[0]
dtf_val = dtf_val.rename(columns={best:best+" [BEST]"})
dtf_val = dtf_val[["ts",best+" [BEST]"] + list(dtf_search["combo"].unique())[1:]]
## plot
fig, ax = plt.subplots(nrows=1, ncols=2, figsize=figsize)
fig.suptitle("Model Tuning", fontsize=15)
combos = dtf_val.drop("ts", axis=1).columns[:top]
if (len(combos) <= 7) or ((top is not None) and (top <= 7)):
colors = ["red","blue","green","violet","sienna","orange","yellow"]
else:
colors = [tuple(np.random.rand(3,)) for i in range(len(combos))]
### main
ts_train.plot(ax=ax[0], grid=True, color="black", legend=True, label="ts")
ax[0].fill_between(x=dtf_fit.index, y1=ts_train.max(), color='grey', alpha=0.2)
dtf_val[combos].plot(grid=True, ax=ax[0], color=colors, legend=True)
ax[0].legend(loc="upper left")
ax[0].set(xlabel=None)
### zoom
dtf_val["ts"].plot(grid=True, ax=ax[1], color="black", legend=False)
for i,col in enumerate(combos):
linewidth = 2 if col == best+" [BEST]" else 1
dtf_val[col].plot(grid=True, ax=ax[1], color=colors[i], legend=False, linewidth=linewidth)
ax[1].set(xlabel=None)
plt.show()
return dtf_search
'''
Fits Exponential Smoothing:
Simple (level) --> trend=None + seasonal=None
y[t+i] = α*y[t] + α(1-α)^1*y[t-1] + α(1-α)^2*y[t-2] + ... = (α)*y[t] + (1-α)*yhat[t]
Holt (level + trend) --> trend=["add","mul"] + seasonal=None
y[t+i] = level_f(α) + i*trend_f(β)
Holt-Winters (level + trend + seasonality) --> trend=["add","mul"] + seasonal=["add","mul"]
y[t+i] = level_f(α) + i*trend_f(β) + seasonality_f(γ)
:parameter
:param ts_train: pandas timeseries
:param ts_test: pandas timeseries
:param trend: str - "additive" (linear), "multiplicative" (non-linear)
:param damped: bool - damp trend
:param seasonal: str - "additive" (ex. +100 every 7 days), "multiplicative" (ex. x10 every 7 days)
:param s: num - number of observations per seasonal (ex. 7 for weekly seasonality with daily data, 12 for yearly seasonality with monthly data)
:param factors: tuple - (α,β,γ) smoothing factor for the level (ex 0.94), trend, seasonal
:return
dtf with predictons and the model
'''
def fit_expsmooth(ts_train, ts_test, trend="additive", damped=False, seasonal="multiplicative", s=None, factors=(None,None,None), conf=0.95, figsize=(15,10)):
## checks
check_seasonality = "Seasonal parameters: No Seasonality" if (seasonal is None) & (s is None) else "Seasonal parameters: "+str(seasonal)+" Seasonality every "+str(s)+" observations"
print(check_seasonality)
## train
model = smt.ExponentialSmoothing(ts_train, trend=trend, damped=damped, seasonal=seasonal, seasonal_periods=s).fit(factors[0], factors[1], factors[2])
dtf_train = ts_train.to_frame(name="ts")
dtf_train["model"] = model.fittedvalues
## test
dtf_test = ts_test.to_frame(name="ts")
dtf_test["forecast"] = model.predict(start=len(ts_train), end=len(ts_train)+len(ts_test)-1)
## evaluate
dtf = dtf_train.append(dtf_test)
alpha, beta, gamma = round(model.params["smoothing_level"],2), round(model.params["smoothing_slope"],2), round(model.params["smoothing_seasonal"],2)
dtf = utils_evaluate_ts_model(dtf, conf=conf, figsize=figsize, title="Holt-Winters "+str((alpha, beta, gamma)))
return dtf, model
'''
Tune ARIMA
:parameter
:param ts_train: pandas timeseries
:param s: num - number of observations per seasonal (ex. 7 for weekly seasonality with daily data, 12 for yearly seasonality with monthly data)
:param val_size: num - size of validation fold
:param max_order: tuple - max (p,d,q) values
:param seasonal_order: tuple - max (P,D,Q) values
:param scoring: function(y_true, y_pred)
:param top: num - plot top models only
:return
dtf with results
'''
def tune_arima_model(ts_train, s=7, val_size=0.2, max_order=(3,1,3), seasonal_order=(1,1,1), scoring=None, top=None, figsize=(15,5)):
## split
dtf_fit, dtf_val = model_selection.train_test_split(ts_train, test_size=val_size, shuffle=False)
dtf_fit, dtf_val = dtf_fit.to_frame(name="ts"), dtf_val.to_frame(name="ts")
## scoring
scoring = metrics.mean_absolute_error if scoring is None else scoring
## hyperamater space
ps = range(0,max_order[0]+1)
ds = range(0,max_order[1]+1)
qs = range(0,max_order[2]+1)
Ps = range(0,seasonal_order[0]+1)
Ds = range(0,seasonal_order[1]+1)
Qs = range(0,seasonal_order[2]+1)
## grid search
dtf_search = pd.DataFrame(columns=["combo","score","model"])
combinations = []
for p in ps:
for d in ds:
for q in qs:
for P in Ps:
for D in Ds:
for Q in Qs:
combo = "("+str(p)+","+str(d)+","+str(q)+")x("+str(P)+","+str(D)+","+str(Q)+")"
if combo not in combinations:
combinations.append(combo)
try:
### fit
model = smt.SARIMAX(ts_train, order=(p,d,q), seasonal_order=(P,D,Q,s)).fit()
### predict
pred = model.forecast(len(dtf_val))
if pred.isna().sum() == 0:
dtf_val[combo] = pred.values
score = scoring(dtf_val["ts"].values, dtf_val[combo].values)
dtf_search = dtf_search.append(pd.DataFrame({"combo":[combo],"score":[score],"model":[model]}))
except:
continue
## find best
dtf_search = dtf_search.sort_values("score").reset_index(drop=True)
best = dtf_search["combo"].iloc[0]
dtf_val = dtf_val.rename(columns={best:best+" [BEST]"})
dtf_val = dtf_val[["ts",best+" [BEST]"] + list(dtf_search["combo"].unique())[1:]]
## plot
fig, ax = plt.subplots(nrows=1, ncols=2, figsize=figsize)
fig.suptitle("Model Tuning", fontsize=15)
combos = dtf_val.drop("ts", axis=1).columns[:top]
if (len(combos) <= 7) or ((top is not None) and (top <= 7)):
colors = ["red","blue","green","violet","sienna","orange","yellow"]
else:
colors = [tuple(np.random.rand(3,)) for i in range(len(combos))]
### main
ts_train.plot(ax=ax[0], grid=True, color="black", legend=True, label="ts")
ax[0].fill_between(x=dtf_fit.index, y1=ts_train.max(), color='grey', alpha=0.2)
dtf_val[combos].plot(grid=True, ax=ax[0], color=colors, legend=True)
ax[0].legend(loc="upper left")
ax[0].set(xlabel=None)
### zoom
dtf_val["ts"].plot(grid=True, ax=ax[1], color="black", legend=False)
for i,col in enumerate(combos):
linewidth = 2 if col == best+" [BEST]" else 1
dtf_val[col].plot(grid=True, ax=ax[1], color=colors[i], legend=False, linewidth=linewidth)
ax[1].set(xlabel=None)
plt.show()
return dtf_search
'''
Find best Seasonal-ARIMAX parameters.
:parameter
:param ts: pandas timeseries
:param exog: pandas dataframe or numpy array
:param s: num - number of observations per seasonal (ex. 7 for weekly seasonality with daily data, 12 for yearly seasonality with monthly data)
:return
best model
'''
def find_best_sarimax(ts, seasonal=True, stationary=False, s=1, exog=None,
max_p=10, max_d=3, max_q=10,
max_P=10, max_D=3, max_Q=10):
best_model = pmdarima.auto_arima(ts, exogenous=exog,
seasonal=seasonal, stationary=stationary, m=s,
information_criterion='aic', max_order=20,
max_p=max_p, max_d=max_d, max_q=max_q,
max_P=max_P, max_D=max_D, max_Q=max_Q,
error_action='ignore')
print("best model --> (p, d, q):", best_model.order, " and (P, D, Q, s):", best_model.seasonal_order)
return best_model.summary()
'''
Fits SARIMAX (Seasonal ARIMA with External Regressors) (p,d,q)x(P,D,Q,s):
y[t+1] = (c + a0*y[t] + a1*y[t-1] +...+ ap*y[t-p]) + (e[t] + b1*e[t-1] + b2*e[t-2] +...+ bq*e[t-q]) + (B*X[t])
:parameter
:param ts_train: pandas timeseries
:param ts_test: pandas timeseries
:param order: tuple - (p,d,q) --> p: lag order (AR), d: degree of differencing (to remove trend), q: order of moving average (MA)
:param seasonal_order: tuple - (P,D,Q) --> seasonal lag orders (ex. lag from the last 2 seasons)
:param s: num - number of observations per seasonal (ex. 7 for weekly seasonality with daily data, 12 for yearly seasonality with monthly data)
:param exog_train: pandas dataframe or numpy array
:param exog_test: pandas dataframe or numpy array
:return
dtf with predictons and the model
'''
def fit_sarimax(ts_train, ts_test, order=(1,0,1), seasonal_order=(1,0,1), s=7, exog_train=None, exog_test=None, conf=0.95, figsize=(15,10)):
## checks
check_trend = "Trend parameters: No differencing" if order[1] == 0 else "Trend parameters: d="+str(order[1])
print(check_trend)
check_seasonality = "Seasonal parameters: No Seasonality" if (s == 0) & (np.sum(seasonal_order[0:2]) == 0) else "Seasonal parameters: Seasonality every "+str(s)+" observations"
print(check_seasonality)
check_exog = "Exog parameters: Not given" if (exog_train is None) & (exog_test is None) else "Exog parameters: number of regressors="+str(exog_train.shape[1])
print(check_exog)
## train
model = smt.SARIMAX(ts_train, order=order, seasonal_order=seasonal_order+(s,), exog=exog_train, enforce_stationarity=False, enforce_invertibility=False).fit()
dtf_train = ts_train.to_frame(name="ts")
dtf_train["model"] = model.fittedvalues
## test
dtf_test = ts_test.to_frame(name="ts")
dtf_test["forecast"] = model.predict(start=len(ts_train), end=len(ts_train)+len(ts_test)-1, exog=exog_test)
## add conf_int
ci = model.get_forecast(len(ts_test)).conf_int(1-conf).values
dtf_test["lower"], dtf_test["upper"] = ci[:,0], ci[:,1]
## evaluate
dtf = dtf_train.append(dtf_test)
title = "ARIMA "+str(order) if exog_train is None else "ARIMAX "+str(order)
title = "S"+title+" x "+str(seasonal_order) if np.sum(seasonal_order) > 0 else title
dtf = utils_evaluate_ts_model(dtf, conf=conf, figsize=figsize, title=title)
return dtf, model
'''
Forecast unknown future with sarimax or expsmooth.
:parameter
:param ts: pandas series
:param model: model object
:param pred_ahead: number of observations to forecast (ex. pred_ahead=30)
:param end: string - date to forecast (ex. end="2016-12-31")
:param freq: None or str - 'B' business day, 'D' daily, 'W' weekly, 'M' monthly, 'A' annual, 'Q' quarterly
:param zoom: for plotting
'''
def forecast_autoregressive(ts, model=None, pred_ahead=None, end=None, freq="D", conf=0.95, zoom=30, figsize=(6,6)):
## model
model = smt.SARIMAX(ts, order=(1,1,1), seasonal_order=(0,0,0,0)).fit() if model is None else model
## fit
dtf = ts.to_frame(name="ts")
dtf["model"] = model.fittedvalues
dtf["residuals"] = dtf["ts"] - dtf["model"]
## index
index = utils_generate_indexdate(start=ts.index[-1], end=end, n=pred_ahead, freq=freq)
## forecast
if "holtwinters" in str(model):
preds = model.forecast(len(index))
dtf_preds = preds.to_frame(name="forecast")
else:
preds = model.get_forecast(len(index))
dtf_preds = preds.predicted_mean.to_frame(name="forecast")
ci = preds.conf_int(1-conf).values
dtf_preds["lower"], dtf_preds["upper"] = ci[:,0], ci[:,1]
#dtf_preds.index, dtf_preds.index.freq = index, 'D'
#print(dtf_preds)
## add intervals and plot
dtf = dtf.append(dtf_preds)
dtf = utils_add_forecast_int(dtf, conf=conf, zoom=zoom, title="SARIMAX", figsize=figsize)
return dtf
###############################################################################
# RNN #
###############################################################################
'''
Plot loss and metrics of keras training.
'''
def utils_plot_keras_training(training):
metrics = [k for k in training.history.keys() if ("loss" not in k) and ("val" not in k)]
fig, ax = plt.subplots(nrows=1, ncols=2, sharey=True, figsize=(15,3))
## training
ax[0].set(title="Training")
ax11 = ax[0].twinx()
ax[0].plot(training.history['loss'], color='black')
ax[0].set_xlabel('Epochs')
ax[0].set_ylabel('Loss', color='black')
for metric in metrics:
ax11.plot(training.history[metric], label=metric)
ax11.set_ylabel("Score", color='steelblue')
ax11.legend()
## validation
ax[1].set(title="Validation")
ax22 = ax[1].twinx()
ax[1].plot(training.history['val_loss'], color='black')
ax[1].set_xlabel('Epochs')
ax[1].set_ylabel('Loss', color='black')
for metric in metrics:
ax22.plot(training.history['val_'+metric], label=metric)
ax22.set_ylabel("Score", color="steelblue")
plt.show()
'''
Preprocess a ts for LSTM partitioning into X and y.
:parameter
:param ts: pandas timeseries
:param s: num - number of observations per seasonal (ex. 7 for weekly seasonality with daily data, 12 for yearly seasonality with monthly data)
:param scaler: sklearn scaler object - if None is fitted
:param exog: pandas dataframe or numpy array
:return
X with shape: (len(ts)-s, s, features)
y with shape: (len(ts)-s,)
the fitted scaler
'''
def utils_preprocess_lstm(ts, s, scaler=None, exog=None):
## scale
if scaler is None:
scaler = preprocessing.MinMaxScaler(feature_range=(0,1))
ts_preprocessed = scaler.fit_transform(ts.values.reshape(-1,1)).reshape(-1)
## create X (N,s,x) and y (N,)
ts_preprocessed = kprocessing.sequence.TimeseriesGenerator(data=ts_preprocessed,
targets=ts_preprocessed,
length=s, batch_size=1)
lst_X, lst_y = [], []
for i in range(len(ts_preprocessed)):
xi, yi = ts_preprocessed[i]
lst_X.append(xi[0])
lst_y.append(yi[0])
X = np.expand_dims(np.array(lst_X), axis=2)
y = np.array(lst_y)
return X, y, scaler
'''
Get fitted values from LSTM.
'''
def utils_fitted_lstm(ts, model, scaler, exog=None):
## scale
s = model.input_shape[1]
ts_preprocessed = scaler.transform(ts.values.reshape(-1,1)).reshape(-1)
## create Xy, predict = fitted
lst_fitted = [np.nan]*s
for i in range(len(ts_preprocessed)):
end_ix = i + s
if end_ix > len(ts_preprocessed)-1:
break
X = ts_preprocessed[i:end_ix]
X = np.array(X)
X = np.reshape(X, (1,s,1))
fit = model.predict(X)
fit = scaler.inverse_transform(fit)[0][0]
lst_fitted.append(fit)
return np.array(lst_fitted)
'''
Predict ts with LSTM using previous predictions.
'''
def utils_predict_lstm(last_s_obs, model, scaler, pred_ahead, exog=None):
## scale
s = model.input_shape[1]
ts_preprocessed = list(scaler.transform(last_s_obs.values.reshape(-1,1)))
## predict, append, re-predict
lst_preds = []
for i in range(pred_ahead):
X = np.array(ts_preprocessed[len(ts_preprocessed)-s:])
X = np.reshape(X, (1,s,1))
pred = model.predict(X)
ts_preprocessed.append(pred[0])
pred = scaler.inverse_transform(pred)[0][0]
lst_preds.append(pred)
return np.array(lst_preds)
'''
Fit Long Short-Term Memory neural network.
:parameter
:param ts: pandas timeseries
:param exog: pandas dataframe or numpy array
:param s: num - number of observations per seasonal (ex. 7 for weekly seasonality with daily data, 12 for yearly seasonality with monthly data)
:return
dtf with predictons and the model
'''
def fit_lstm(ts_train, ts_test, model, exog=None, s=20, epochs=100, conf=0.95, figsize=(15,5)):
## check
print("Seasonality: using the last", s, "observations to predict the next 1")
## preprocess train
X_train, y_train, scaler = utils_preprocess_lstm(ts_train, scaler=None, exog=exog, s=s)
print("--- X:", X_train.shape, "| y:", y_train.shape, "---")
## lstm
if model is None:
model = models.Sequential()
model.add( layers.LSTM(input_shape=X_train.shape[1:], units=50, activation='relu', return_sequences=False) )
model.add( layers.Dense(1) )
model.compile(optimizer='adam', loss='mean_absolute_error')
print(model.summary())
## train
verbose = 0 if epochs > 1 else 1
training = model.fit(x=X_train, y=y_train, batch_size=1, epochs=epochs, shuffle=True, verbose=verbose, validation_split=0.3)
dtf_train = ts_train.to_frame(name="ts")
dtf_train["model"] = utils_fitted_lstm(ts_train, training.model, scaler, exog)
dtf_train["model"] = dtf_train["model"].fillna(method='bfill')
## test
last_s_obs = ts_train[-s:]
preds = utils_predict_lstm(last_s_obs, training.model, scaler, pred_ahead=len(ts_test), exog=None)
dtf_test = ts_test.to_frame(name="ts").merge(pd.DataFrame(data=preds, index=ts_test.index, columns=["forecast"]),
how='left', left_index=True, right_index=True)
## evaluate
dtf = dtf_train.append(dtf_test)
dtf = utils_evaluate_ts_model(dtf, conf=conf, figsize=figsize, title="LSTM (memory:"+str(s)+")")
return dtf, training.model
'''
Forecast unknown future.
:parameter
:param ts: pandas series
:param model: model object
:param pred_ahead: number of observations to forecast (ex. pred_ahead=30)
:param end: string - date to forecast (ex. end="2016-12-31")
:param freq: None or str - 'B' business day, 'D' daily, 'W' weekly, 'M' monthly, 'A' annual, 'Q' quarterly
:param zoom: for plotting
'''
def forecast_lstm(ts, model=None, epochs=100, pred_ahead=None, end=None, freq="D", conf=0.95, zoom=30, figsize=(6,6)):
## model
if model is None:
model = models.Sequential([
layers.LSTM(input_shape=(1,1), units=50, activation='relu', return_sequences=False),
layers.Dense(1) ])
model.compile(optimizer='adam', loss='mean_absolute_error')
## fit
s = model.input_shape[1]
X, y, scaler = utils_preprocess_lstm(ts, scaler=None, exog=None, s=s)
training = model.fit(x=X, y=y, batch_size=1, epochs=epochs, shuffle=True, verbose=0, validation_split=0.3)
dtf = ts.to_frame(name="ts")
dtf["model"] = utils_fitted_lstm(ts, training.model, scaler, None)
dtf["model"] = dtf["model"].fillna(method='bfill')
## index
index = utils_generate_indexdate(start=ts.index[-1], end=end, n=pred_ahead, freq=freq)
## forecast
last_s_obs = ts[-s:]
preds = utils_predict_lstm(last_s_obs, training.model, scaler, pred_ahead=len(index), exog=None)
dtf = dtf.append(pd.DataFrame(data=preds, index=index, columns=["forecast"]))
print(pd.DataFrame(data=preds, index=index, columns=["forecast"]))
## add intervals and plot
dtf = utils_add_forecast_int(dtf, conf=conf, zoom=zoom, title="LSTM", figsize=figsize)
return dtf
###############################################################################
# PROPHET #
###############################################################################
'''
Fits prophet on Business Data:
y = trend + seasonality + holidays
:parameter
:param dtf_train: pandas Dataframe with columns 'ds' (dates), 'y' (values), 'cap' (capacity if growth="logistic"), other additional regressor
:param dtf_test: pandas Dataframe with columns 'ds' (dates), 'y' (values), 'cap' (capacity if growth="logistic"), other additional regressor
:param lst_exog: list - names of variables
:param freq: str - "D" daily, "M" monthly, "Y" annual, "MS" monthly start ...
:return
dtf with predictons and the model
'''
def fit_prophet(dtf_train, dtf_test, lst_exog=None, model=None, freq="D", conf=0.95, figsize=(15,10)):
## setup prophet
if model is None:
model = Prophet(growth="linear", changepoints=None, n_changepoints=25, seasonality_mode="multiplicative",
yearly_seasonality="auto", weekly_seasonality="auto", daily_seasonality="auto",
holidays=None, interval_width=conf)
if lst_exog != None:
for regressor in lst_exog:
model.add_regressor(regressor)
## train
model.fit(dtf_train)
## test
dtf_prophet = model.make_future_dataframe(periods=len(dtf_test), freq=freq, include_history=True)
if model.growth == "logistic":
dtf_prophet["cap"] = dtf_train["cap"].unique()[0]
if lst_exog != None:
dtf_prophet = dtf_prophet.merge(dtf_train[["ds"]+lst_exog], how="left")
dtf_prophet.iloc[-len(dtf_test):][lst_exog] = dtf_test[lst_exog].values
dtf_prophet = model.predict(dtf_prophet)
dtf_train = dtf_train.merge(dtf_prophet[["ds","yhat"]], how="left").rename(
columns={'yhat':'model', 'y':'ts'}).set_index("ds")
dtf_test = dtf_test.merge(dtf_prophet[["ds","yhat","yhat_lower","yhat_upper"]], how="left").rename(
columns={'yhat':'forecast', 'y':'ts', 'yhat_lower':'lower', 'yhat_upper':'upper'}).set_index("ds")
## evaluate
dtf = dtf_train.append(dtf_test)
dtf = utils_evaluate_ts_model(dtf, conf=conf, figsize=figsize, title="Prophet")
return dtf, model
'''
Forecast unknown future.
:parameter
:param ts: pandas series
:param model: model object
:param pred_ahead: number of observations to forecast (ex. pred_ahead=30)
:param end: string - date to forecast (ex. end="2016-12-31")
:param freq: None or str - 'B' business day, 'D' daily, 'W' weekly, 'M' monthly, 'A' annual, 'Q' quarterly
:param zoom: for plotting
'''
def forecast_prophet(dtf, model=None, pred_ahead=None, end=None, freq="D", conf=0.95, zoom=30, figsize=(6,6)):
## model
model = Prophet() if model is None else model
## fit
model.fit(dtf)
## index
index = utils_generate_indexdate(start=dtf["ds"].values[-1], end=end, n=pred_ahead, freq=freq)
## forecast
dtf_prophet = model.make_future_dataframe(periods=len(index), freq=freq, include_history=True)
dtf_prophet = model.predict(dtf_prophet)
dtf = dtf.merge(dtf_prophet[["ds","yhat"]], how="left").rename(columns={'yhat':'model', 'y':'ts'}).set_index("ds")
preds = pd.DataFrame(data=index, columns=["ds"])
preds = preds.merge(dtf_prophet[["ds","yhat","yhat_lower","yhat_upper"]], how="left").rename(
columns={'yhat':'forecast', 'yhat_lower':'lower', 'yhat_upper':'upper'}).set_index("ds")
dtf = dtf.append(preds)
print(preds)
## plot
dtf = utils_add_forecast_int(dtf, conf=conf, zoom=zoom, title="Prophet", figsize=figsize)
return dtf
|
# pylint: disable=missing-docstring,redefined-outer-name
import pathlib
import pytest
from tox_constraints import git_filter
@pytest.fixture
def sample():
dirpath = pathlib.Path(__file__).with_suffix("")
filepath = dirpath / "constraints.txt"
return filepath.read_text()
def test_roundtrip(sample):
assert git_filter.clean_text(git_filter.smudge_text(sample)) == sample
@pytest.mark.parametrize("func", [git_filter.clean_text, git_filter.smudge_text])
def test_filters_are_idempontent(sample, func):
expected = func(sample)
actual = func(sample)
assert actual == expected
def test_smudge_removed_marker(sample):
evidence = "$" # the presence of this string is evidence that the marker is present
if not evidence in sample:
raise RuntimeError("Evidence not present before test")
assert evidence not in git_filter.smudge_text(sample)
def test_smudge_adds_url(sample):
evidence = (
"file://" # the presence of this string is evidence that the url is present
)
if evidence in sample:
raise RuntimeError("Evidence present before test")
assert evidence in git_filter.smudge_text(sample)
|
# Imports
from os.path import join
import matplotlib.pyplot as plt
import nibabel as nib
from nilearn import plotting
from gclda.model import Model
from gclda.decode import decode_continuous
from gclda.utils import get_resource_path
# Files to decode
f_in = '/home/data/nbc/physics-learning/data/group-level/fci/fci-postOnly_diffmod_s123_difficulty_c123456_vtrhes_p001_cthres_p05.gfeat/cope1.feat/stats/zstat1.nii.gz'
# Output filename
f_out = 'fci-cntrl_diffmod_gcldaDecoded.csv'
# Load model and initialize decoder
model_file = join('/home/data/nbc/tools/gclda/data/models/', 'Neurosynth2015Filtered2',
'model_200topics_2015Filtered2_10000iters.pklz')
model = Model.load(model_file)
# Read in image to decode
file_to_decode = f_in
img_to_decode = nib.load(file_to_decode)
#fig = plotting.plot_stat_map(img_to_decode, display_mode='z',
# threshold=3.290527,
# cut_coords=[-28, -4, 20, 50])
# Decode ROI
df, topic_weights = decode_continuous(model, img_to_decode)
# Get associated terms
df = df.sort_values(by='Weight', ascending=False)
print(df.head(10))
df.to_csv(f_out, index_label='Term')
# Plot topic weights
fig2, ax2 = plt.subplots()
ax2.plot(topic_weights)
ax2.set_xlabel('Topic #')
ax2.set_ylabel('Weight')
fig2.show()
|
from abc import ABC, abstractmethod
from typing import List, Union
import numpy as np
import torch
SOBEL_X = (
torch.tensor([[1, 0, -1], [2, 0, -2], [1, 0, -1]], dtype=torch.float).unsqueeze(0).unsqueeze(0)
)
SOBEL_Y = (
torch.tensor([[1, 2, 1], [0, 0, 0], [-1, -2, -1]], dtype=torch.float).unsqueeze(0).unsqueeze(0)
)
class CustomTransformNumpy(ABC):
"""Abstract method for custom numpy transformations.
Every subclass should implement `__init__` for
transformations parameters setting and `__call__` method for application to image.
"""
@abstractmethod
def __init__(self):
raise NotImplementedError
@abstractmethod
def __call__(self, *args, **kwargs):
raise NotImplementedError
class Normalization(CustomTransformNumpy):
def __init__(
self,
sample: List[np.ndarray] = None,
from_min: float = None,
from_max: float = None,
to_min: float = None,
to_max: float = None,
):
self.to_min, self.to_max = to_min, to_max
self.to_span = self.to_max - self.to_min
if sample:
sample = np.concatenate(sample)
self.from_min = np.min(sample)
self.from_max = np.max(sample)
else:
assert (from_min is not None) and (from_max is not None)
self.from_min = from_min
self.from_max = from_max
self.from_span = self.from_max - self.from_min
def __call__(self, volume: np.ndarray) -> np.ndarray:
""" min max normalization"""
scaled = (volume - self.from_min) / self.from_span
return scaled * self.to_span + self.to_min
def denorm(self, volume: Union[np.ndarray, torch.Tensor]) -> Union[np.ndarray, torch.Tensor]:
"""Denormalization with pre-saved stats"""
scaled = (volume - self.to_min) / self.to_span
return scaled * self.from_span + self.from_min
class AugmentationNoduleDict(ABC):
def __init__(self, autoupd=True):
self.autoupd = autoupd
def __call__(self, nodule: torch.Tensor) -> torch.Tensor:
"""Transform data
Parameters
----------
nodule : torch.Tensor, [C D H W]
input tensor
Returns
-------
torch.Tensor, [C D H W]
"""
tensor = self._augmentation(nodule)
if self.autoupd:
self._update()
return tensor
@abstractmethod
def _augmentation(self, tensor: torch.Tensor) -> torch.Tensor:
pass
@abstractmethod
def _update(self):
pass
class RotNodule3D(AugmentationNoduleDict):
def __init__(self, dims_pair: list = None, autoupd=True):
super().__init__(autoupd)
self._dims_pair = dims_pair
if dims_pair is None:
self._dims_pair = [[1, 2], [1, 3], [2, 3]]
else:
self._dims_pair = [dims_pair]
self._count_dims = len(self._dims_pair)
self._count_rotate = [0] * self._count_dims
def _augmentation(self, tensor: torch.Tensor) -> torch.Tensor:
for i in range(self._count_dims):
dims = self._dims_pair[i]
count_rotate = self._count_rotate[i]
if count_rotate == 0:
continue
tensor = torch.rot90(tensor, count_rotate, dims)
return tensor
def _update(self):
self._count_rotate = np.random.randint(-3, 4, self._count_dims).tolist()
class FlipNodule3D(AugmentationNoduleDict):
def __init__(self, dims: list = None, autoupd=True):
super().__init__(autoupd)
if dims is None:
self._dims = [1, 2, 3]
else:
self._dims = dims
self._count_dims = len(self._dims)
self._need_flip = [0] * self._count_dims
def _augmentation(self, tensor: torch.Tensor) -> torch.Tensor:
for i in range(self._count_dims):
dim = self._dims[i]
need_flip = self._need_flip[i]
if need_flip == 0:
continue
tensor = torch.flip(tensor, [dim])
return tensor
def _update(self):
self._need_flip = np.random.randint(0, 2, self._count_dims).tolist()
class TranslateNodule3D(AugmentationNoduleDict):
def __init__(self, dims: list = None, shift_val: int = 10, autoupd=True):
super().__init__(autoupd)
if dims is None:
self._dims = [1, 2, 3]
else:
self._dims = dims
self._count_dims = len(self._dims)
self._shifts = [0] * self._count_dims
self._shift_val = shift_val
def _augmentation(self, tensor: torch.Tensor) -> torch.Tensor:
tensor = torch.roll(tensor, self._shifts, self._dims)
if self._shifts[0] < 0:
tensor[:, :, tensor.size()[1] + self._shifts[0] :] = 0
elif self._shifts[0] > 0:
tensor[:, :, : self._shifts[0]] = 0
if self._shifts[1] < 0:
tensor[:, :, :, tensor.size()[2] + self._shifts[1] :] = 0
elif self._shifts[1] > 0:
tensor[:, :, :, : self._shifts[1]] = 0
if self._shifts[2] < 0:
tensor[:, :, :, :, tensor.size()[3] + self._shifts[2] :] = 0
elif self._shifts[2] > 0:
tensor[:, :, :, :, : self._shifts[2]] = 0
return tensor
def _update(self):
self._shifts = np.random.randint(-self._shift_val, self._shift_val, self._count_dims).tolist()
self._shifts[2] = 0
class CropCenterNodule3D(AugmentationNoduleDict):
def __init__(self, final_size: float):
super().__init__()
self.final_size = final_size
def _augmentation(self, tensor: torch.Tensor) -> torch.Tensor:
sizes_per_dim = np.array(tensor.size()[1:])
sizes_per_dim -= self.final_size
sizes_per_dim //= 2
sizes_per_dim_shifted = sizes_per_dim + self.final_size
return tensor[
:,
sizes_per_dim[0] : sizes_per_dim_shifted[0],
sizes_per_dim[1] : sizes_per_dim_shifted[1],
sizes_per_dim[2] : sizes_per_dim_shifted[2],
]
def _update(self):
pass
def heaviside(mask, eps=1e-5):
return 1 / 2 * (1 + (2 / np.pi) * (torch.atan(mask / eps)))
def img_derivative(input: torch.FloatTensor, sobel_kernel: torch.FloatTensor) -> torch.FloatTensor:
assert input.dim() == 4
assert sobel_kernel.dim() == 4
conv = torch.nn.Conv2d(1, 1, kernel_size=3, stride=1, padding=1, bias=False)
conv.weight = torch.nn.Parameter(sobel_kernel.type_as(input), requires_grad=False)
return conv(input) # [N, C, H, W]
|
print('---' * 10)
print('Analisador de Triângulos')
print('---' * 10)
r1 = int(input('Primeiro valor: '))
r2 = int(input('Segundo valor: '))
r3 = int(input('Terceiro valor: '))
if r1 < r2 + 3 and r2 < r1 + r3 and r3 < r1 + r2:
print('Os segmentos acima FORMAM um triângulo')
else:
print('Os segmentos acima NÃO PODEM FORMAR um triângulo') |
"""
Cookie Clicker Simulator
"""
import simpleplot
# Used to increase the timeout, if necessary
import codeskulptor
codeskulptor.set_timeout(20)
import poc_clicker_provided as provided
import math
# Constants
SIM_TIME = 10000000000.0
class ClickerState:
"""
Simple class to keep track of the game state.
"""
def __init__(self):
self._cookies = 0.0
self._cps = 1.0
self._time = 0.0
self._total = 0.0
self._history = [(0.0, None, 0.0, 0.0)]
def __str__(self):
"""
Return human readable state
"""
status_repr = "total: " + str(self.get_total()) + "\n now: "
status_repr += str(self.get_cookies()) + "\n time: " + str(self.get_time())
status_repr += "\n CPS: " + str(self.get_cps()) + "\n"
return status_repr
def get_cookies(self):
"""
Return current number of cookies
(not total number of cookies)
Should return a float
"""
return self._cookies
def get_cps(self):
"""
Get current CPS
Should return a float
"""
return self._cps
def get_time(self):
"""
Get current time
Should return a float
"""
return self._time
def get_history(self):
"""
Return history list
"""
return self._history
def get_total(self):
"""
Return total number of cookies
"""
return self._total
def add_cookies(self, cookies):
"""
Add the number of current cookies and total number
of cookies
"""
self._cookies += cookies
self._total += cookies
def operate_buy(self, cost, cps):
"""
Charge cookies and add CPS
"""
self._cps += cps
self._cookies -= cost
def add_time(self, time):
"""
Add current time
"""
self._time += time
def append_history(self, history_item):
"""
Add history to history list
"""
self._history.append(history_item)
def time_until(self, cookies):
"""
Return time until you have the given number of cookies
(could be 0 if you already have enough cookies)
Should return a float with no fractional part
"""
if (self.get_cookies() >= cookies):
return 0.0
else:
return math.ceil((cookies - self.get_cookies()) / self.get_cps())
def wait(self, time):
"""
Wait for given amount of time and update state
Should do nothing if time <= 0
"""
if (time >= 0):
cookies_earned = time * self.get_cps()
self.add_time(time)
self.add_cookies(cookies_earned)
def buy_item(self, item_name, cost, additional_cps):
"""
Buy an item and update state
Should do nothing if you cannot afford the item
"""
if (self.get_cookies() >= cost):
self.operate_buy(cost, additional_cps)
self.append_history((self.get_time(), item_name, cost, self.get_total()))
def simulate_clicker(build_info, duration, strategy):
"""
Function to run a Cookie Clicker game for the given
duration with the given strategy. Returns a ClickerState
object corresponding to game.
"""
ava_builds = build_info.clone()
clicker = ClickerState()
while (clicker.get_time() <= duration):
item = strategy(clicker.get_cookies(), clicker.get_cps(),
duration - clicker.get_time(), ava_builds)
if (item):
cost = ava_builds.get_cost(item)
next_time = clicker.time_until(cost)
if (next_time + clicker.get_time() > duration):
break
clicker.wait(next_time)
ava_builds.update_item(item)
clicker.buy_item(item, cost, ava_builds.get_cps(item))
else:
break
clicker.wait(duration - clicker.get_time())
return clicker
def strategy_cursor(cookies, cps, time_left, build_info):
"""
Always pick Cursor!
Note that this simplistic strategy does not properly check whether
it can actually buy a Cursor in the time left. Your strategy
functions must do this and return None rather than an item you
can't buy in the time left.
"""
return "Cursor"
def strategy_none(cookies, cps, time_left, build_info):
"""
Always return None
This is a pointless strategy that you can use to help debug
your simulate_clicker function.
"""
return None
def strategy_cheap(cookies, cps, time_left, build_info):
"""
Returns the cheapest item in buildable list.
"""
cost = float("inf")
choice = None
for item in build_info.build_items():
if (build_info.get_cost(item) < cost):
temp_cost = build_info.get_cost(item)
if (time_left >= (temp_cost - cookies) / cps):
cost = temp_cost
choice = item
return choice
def strategy_expensive(cookies, cps, time_left, build_info):
"""
Returns the most expensive item in buildable list.
"""
cost = float("-inf")
choice = None
for item in build_info.build_items():
if (build_info.get_cost(item) > cost):
temp_cost = build_info.get_cost(item)
if (time_left >= (temp_cost - cookies) / cps):
cost = temp_cost
choice = item
return choice
def strategy_best(cookies, cps, time_left, build_info):
"""
Returns the best choice.
"""
ratio = 0
choice = None
for item in build_info.build_items():
ratio_to_compare = build_info.get_cps(item) / build_info.get_cost(item)
if (ratio_to_compare > ratio):
temp_cost = build_info.get_cost(item)
if (time_left >= (temp_cost - cookies) / cps):
ratio = ratio_to_compare
choice = item
return choice
def run_strategy(strategy_name, time, strategy):
"""
Run a simulation with one strategy
"""
state = simulate_clicker(provided.BuildInfo(), time, strategy)
print strategy_name, ":\n", state
# Plot total cookies over time
# Uncomment out the lines below to see a plot of total cookies vs. time
# Be sure to allow popups, if you do want to see it
# history = state.get_history()
# history = [(item[0], item[3]) for item in history]
# simpleplot.plot_lines(strategy_name, 1000, 400, 'Time', 'Total Cookies', [history], True)
def run():
"""
Run the simulator.
"""
run_strategy("Cursor", SIM_TIME, strategy_cursor)
run_strategy("Cheap", SIM_TIME, strategy_cheap)
run_strategy("Expensive", SIM_TIME, strategy_expensive)
run_strategy("Best", SIM_TIME, strategy_best)
# Add calls to run_strategy to run additional strategies
# run_strategy("Cheap", SIM_TIME, strategy_cheap)
# run_strategy("Expensive", SIM_TIME, strategy_expensive)
# run_strategy("Best", SIM_TIME, strategy_best)
run()
|
""" Library of submission strings
"""
from submission import substr
from submission import read_dat
__all__ = [
'substr',
'read_dat'
]
|
#!/usr/bin/env python
import logging
import sys
import unittest
import scipy as sp
import numpy as np
import mango.mpi as mpi
import mango.image
import mango.data
import mango.io
logger, rootLogger = mpi.getLoggers(__name__)
class CropTest(unittest.TestCase):
def setUp(self):
np.random.seed((mango.mpi.rank+1)*975421)
subdShape = sp.array((16,64,32))
mpiCartShape = mango.mpi.getCartShape(dimension=3)
mpiCartShape = sp.array(mpiCartShape)
self.imgShape = mpiCartShape*subdShape
def getSteppedShape(self, shape, step):
return sp.array([len(range(0, shape[i], step[i])) for i in range(len(shape))])
def doTestCropWithHalo(self, haloSz=0):
rootLogger.info("*************************")
rootLogger.info("haloSz=%s" % haloSz)
if (isinstance(haloSz, int) or ((sys.version_info.major < 3) and isinstance(haloSz, long))):
if (haloSz < 0):
haloSz = 0
haloSz = sp.array((haloSz,)*3)
imgDds = mango.data.gaussian_noise(shape=self.imgShape, mtype="tomo_float", halo=haloSz)
imgDds.setBorderToValue(0)
imgDds.updateOverlapRegions()
imgDds.md.setVoxelSize((1,1,1));
imgDds.md.setVoxelSizeUnit("mm");
logger.info("imgDds.mtype=%s" % imgDds.mtype)
logger.info("imgDds.md.getVoxelSize()=%s" % imgDds.md.getVoxelSize())
cropDds = \
mango.image.crop(
imgDds,
offset=(0,0,0),
shape = imgDds.shape
)
logger.info("imgDds.shape=%s" % imgDds.shape)
logger.info("cropDds.shape=%s" % cropDds.shape)
slc = []
for d in range(len(haloSz)):
slc.append(slice(haloSz[d], cropDds.asarray().shape[d]-haloSz[d]))
slc = tuple(slc)
self.assertEqual(imgDds.dtype, cropDds.dtype)
self.assertEqual(imgDds.mtype, cropDds.mtype)
self.assertTrue(sp.all(imgDds.halo == cropDds.halo))
self.assertTrue(sp.all(imgDds.origin == cropDds.origin), "%s != %s" % (imgDds.origin, cropDds.origin))
self.assertTrue(sp.all(imgDds.mpi.shape == cropDds.mpi.shape))
self.assertTrue(sp.all(imgDds.md.getVoxelSize() == cropDds.md.getVoxelSize()))
logger.info("imgDds min = %s, imgDds max = %s" % (np.min(imgDds.asarray()), np.max(imgDds.asarray())))
logger.info("cropDds min = %s, cropDds max = %s" % (np.min(cropDds.asarray()[slc]), np.max(cropDds.asarray()[slc])))
logger.info("num non-zero cropDds = %s" % sp.sum(sp.where(cropDds.asarray()[slc] != 0, 1, 0)))
self.assertTrue(sp.all(imgDds.asarray()[slc] == cropDds.asarray()[slc]))
imgDds = \
mango.data.gaussian_noise(
mean=32000., stdd=2000.,
shape=self.imgShape,
dtype="uint16",
halo=haloSz,
origin=(2,-8,4)
)
imgDds.setBorderToValue(32000)
imgDds.updateOverlapRegions()
imgDds.md.setVoxelSize((1,1,1));
imgDds.md.setVoxelSizeUnit("mm");
cropDds = \
mango.image.crop(
imgDds,
offset = imgDds.shape//4,
shape = imgDds.shape//2
)
slc = []
for d in range(len(haloSz)):
slc.append(slice(haloSz[d], cropDds.asarray().shape[d]-haloSz[d]))
slc = tuple(slc)
logger.info("imgDds.md.getVoxelSize()=%s%s" % (imgDds.md.getVoxelSize(), imgDds.md.getVoxelSizeUnit()))
logger.info("cropDds.md.getVoxelSize()=%s%s" % (cropDds.md.getVoxelSize(), cropDds.md.getVoxelSizeUnit()))
self.assertEqual(imgDds.dtype, cropDds.dtype)
self.assertEqual(imgDds.mtype, cropDds.mtype)
self.assertTrue(sp.all(imgDds.halo == cropDds.halo))
self.assertTrue(sp.all(imgDds.shape//2 == cropDds.shape))
self.assertTrue(sp.all(imgDds.origin+imgDds.shape//4 == cropDds.origin), "%s != %s" % (imgDds.origin, cropDds.origin))
self.assertTrue(sp.all(imgDds.mpi.shape == cropDds.mpi.shape))
self.assertTrue(sp.all(imgDds.md.getVoxelSize() == cropDds.md.getVoxelSize()))
logger.info("imgDds min = %s, imgDds max = %s" % (np.min(imgDds.asarray()), np.max(imgDds.asarray())))
logger.info("cropDds min = %s, cropDds max = %s" % (np.min(cropDds.asarray()[slc]), np.max(cropDds.asarray()[slc])))
logger.info("num non-zero cropDds = %s" % sp.sum(sp.where(cropDds.asarray()[slc] != 0, 1, 0)))
cropDds = \
mango.image.crop(
imgDds,
offset=(3,5,7),
shape=(imgDds.shape[0]-2, imgDds.shape[1]-8, imgDds.shape[2]-4)
)
slc = []
for d in range(len(haloSz)):
slc.append(slice(haloSz[d], cropDds.asarray().shape[d]-haloSz[d]))
slc = tuple(slc)
logger.info("imgDds min = %s, imgDds max = %s" % (np.min(imgDds.asarray()), np.max(imgDds.asarray())))
logger.info("cropDds min = %s, cropDds max = %s" % (np.min(cropDds.asarray()[slc]), np.max(cropDds.asarray()[slc])))
logger.info("num non-zero cropDds = %s" % sp.sum(sp.where(cropDds.asarray()[slc] != 0, 1, 0)))
self.assertEqual(imgDds.dtype, cropDds.dtype)
self.assertEqual(imgDds.mtype, cropDds.mtype)
self.assertTrue(sp.all(imgDds.halo == cropDds.halo))
self.assertTrue(sp.all(imgDds.shape-(2,8,4) == cropDds.shape), "%s != %s" % (imgDds.shape//(3,5,7), cropDds.shape))
self.assertTrue(sp.all(imgDds.origin+(3,5,7) == cropDds.origin), "%s != %s" % (imgDds.origin, cropDds.origin))
self.assertTrue(sp.all(imgDds.mpi.shape == cropDds.mpi.shape))
self.assertTrue(sp.all(imgDds.md.getVoxelSize() == cropDds.md.getVoxelSize()))
rootLogger.info("*************************")
def testCropWithHalo0(self):
self.doTestCropWithHalo(0)
def testCropWithHalo2(self):
self.doTestCropWithHalo(4)
if __name__ == "__main__":
mango.setLoggingVerbosityLevel("high")
mpi.initialiseLoggers(
[__name__, "mango.mpi", "mango.image", "mango.imageTest"],
logLevel=logging.INFO
)
unittest.main()
|
from django.db import models
class Category(models.Model):
"""Model definition for Category."""
# TODO: Define fields here
name = models.CharField(max_length=50)
description = models.TextField()
products = models.ManyToManyField(
'Product', through='ProductCategory')
class Meta:
"""Meta definition for Category."""
verbose_name = 'Category'
verbose_name_plural = 'Categorys'
def __str__(self):
"""Unicode representation of Category."""
return self.name
# def save(self):
# """Save method for Category."""
# pass
def get_absolute_url(self):
"""Return absolute url for Category."""
return ('')
# TODO: Define custom methods here
|
import json, os
from core.models.client import Client
from discord_slash import SlashCommand
main_path = os.path.dirname(__file__)
config_path = os.path.join(main_path, "config.json")
with open(config_path, "r") as jsonfile:
config: dict = json.load(jsonfile)
if __name__ == "__main__":
Client(**config).run()
|
# -*- coding: utf-8 -*-
'''Defines file types and implements commands on files
'''
# module imports
from . import cli, get_user_context_obj, logger
from .common import *
|
## /!\ NE PAS TOUCHER NI SUPPRIMER CE FICHIER /!\ ##
import discord
from replit import db
client = discord.Client()
commands = {}
@client.event
async def on_message(message, *member: discord.User):
if message.content.split(" ")[0] in commands.keys():
code = commands[message.content.split(" ")[0]]
latency = client.latency * 1000
executecode = code.replace('$channelID', str(message.channel.id))
pingreplace = executecode.replace('$ping', str(round(latency)))
serverreplace = pingreplace.replace('$serversCount', str(len(client.guilds)))
authorreplace = serverreplace.replace('$authorID', str(message.author.id))
usernamereplace = authorreplace.replace('$username', str(message.author))
commandreplace = usernamereplace.replace('$commandCount', str(len(commands)))
usersreplace = commandreplace.replace('$usersCount', str(len(client.users)))
versionreplace = usersreplace.replace('$version', str(discord.__version__))
await message.channel.send(versionreplace)
def command(name, code):
commands[name] = code
def setstatus(name):
db['status'] = name
@client.event
async def on_ready():
status = db['status']
print(f"Connexion au bot {client.user.name}")
print('----------')
print(f'Changement du status pour : Joue a {status}')
print('----------')
await client.change_presence(activity=discord.Game(name=status))
def login(token):
client.run(token)
|
import argparse
import pickle as pkl
import numpy as np
import tensorflow as tf
import params
import model
FLAGS = None
def remove_eos(sentence, eos = '<EOS>', pad = '<PAD>'):
if eos in sentence:
return sentence[:sentence.index(eos)] + '\n'
elif pad in sentence:
return sentence[:sentence.index(pad)] + '\n'
else:
return sentence + '\n'
def write_result(predict_results, dic_dir):
print 'Load dic file...'
with open(dic_dir) as dic:
dic_file = pkl.load(dic)
reversed_dic = dict((y,x) for x,y in dic_file.iteritems())
print 'Writing into file...'
with open(FLAGS.pred_dir, 'w') as f:
while True:
try :
output = predict_results.next()
output = output['question'].tolist()
if -1 in output: # beam search
output = output[:output.index(-1)]
indices = [reversed_dic[index] for index in output]
sentence = ' '.join(indices)
sentence = remove_eos(sentence)
f.write(sentence.encode('utf-8'))
except StopIteration:
break
def main(unused):
# Enable logging for tf.estimator
tf.logging.set_verbosity(tf.logging.INFO)
# Config
config = tf.contrib.learn.RunConfig(
model_dir = FLAGS.model_dir,
keep_checkpoint_max = 10,
save_checkpoints_steps = 100)
# Load parameters
model_params = getattr(params, FLAGS.params)().values()
# Define estimator
q_generation = model.q_generation(model_params)
nn = tf.estimator.Estimator(model_fn=q_generation.run, config = config, params=model_params)
# Load training data
train_sentence = np.load(FLAGS.train_sentence) # train_data
train_question = np.load(FLAGS.train_question) # train_label
# Data shuffling for training data
permutation = np.random.permutation(len(train_sentence))
train_sentence = train_sentence[permutation]
train_question = train_question[permutation]
# Training input function for estimator
train_input_fn = tf.estimator.inputs.numpy_input_fn(
x={'enc_inputs': train_sentence, 'dec_inputs': train_question},
y=None, # useless value
batch_size = model_params['batch_size'],
num_epochs=FLAGS.num_epochs,
shuffle=True)
# Load evaluation data
eval_sentence = np.load(FLAGS.eval_sentence)
eval_question = np.load(FLAGS.eval_question)
# Evaluation input function for estimator
eval_input_fn = tf.estimator.inputs.numpy_input_fn(
x = {'enc_inputs': eval_sentence, 'dec_inputs': eval_question},
y = None,
batch_size = model_params['batch_size'],
num_epochs=1,
shuffle=False)
# define experiment
exp_nn = tf.contrib.learn.Experiment(
estimator = nn,
train_input_fn = train_input_fn,
eval_input_fn = eval_input_fn,
train_steps = None,
min_eval_frequency = 100)
# train and evaluate
if FLAGS.mode == 'train':
exp_nn.train_and_evaluate()
elif FLAGS.mode == 'eval':
exp_nn.evaluate(delay_secs = 0)
else: # 'pred'
# Load test data
test_sentence = np.load(FLAGS.test_sentence)
# prediction input function for estimator
pred_input_fn = tf.estimator.inputs.numpy_input_fn(
x = {'enc_inputs' : test_sentence},
y = None,
batch_size = model_params['batch_size'],
num_epochs = 1,
shuffle = False)
# prediction
predict_results = nn.predict(input_fn = pred_input_fn)
# write result(question) into file
write_result(predict_results, FLAGS.dic_dir)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--mode', type = str, help = 'train, eval')
parser.add_argument('--train_sentence', type = str, default= '', help = 'path to the training sentence.')
parser.add_argument('--train_question', type = str, default = '', help = 'path to the training question.')
parser.add_argument('--eval_sentence', type = str, default = '', help = 'path to the evaluation sentence. ')
parser.add_argument('--eval_question', type = str, default = '', help = 'path to the evaluation question.')
parser.add_argument('--test_sentence', type = str, default = '', help = 'path to the test sentence.')
parser.add_argument('--dic_dir', type = str, help = 'path to the dictionary')
parser.add_argument('--model_dir', type = str, help = 'path to save the model')
parser.add_argument('--pred_dir', type = str, help = 'path to save the predictions')
parser.add_argument('--params', type = str, help = 'parameter setting')
parser.add_argument('--num_epochs', type = int, default = 10, help = 'training epoch size')
FLAGS = parser.parse_args()
tf.app.run(main)
|
import logging
import blueforge.apis.telegram as tg
import requests
from blueforge.apis.facebook import Message, ImageAttachment, QuickReply, QuickReplyTextItem
from chatbrick.util import get_items_from_xml, UNKNOWN_ERROR_MSG
import time
from blueforge.apis.facebook import TemplateAttachment, Element, GenericTemplate
logger = logging.getLogger(__name__)
BRICK_DEFAULT_IMAGE = 'https://www.chatbrick.io/api/static/brick/img_brick_10_001.png'
class Emergency(object):
def __init__(self, fb, brick_db):
self.brick_db = brick_db
self.fb = fb
async def facebook(self, command):
if command == 'get_started':
# send_message = [
# Message(
# attachment=ImageAttachment(
# url=BRICK_DEFAULT_IMAGE
# )
# ),
# Message(
# text='중앙응급의료센터에서 제공하는 "응급실검색 서비스"에요.'
# )
# ]
send_message = [
Message(
attachment=TemplateAttachment(
payload=GenericTemplate(
elements=[
Element(image_url=BRICK_DEFAULT_IMAGE,
title='응급실검색 서비스',
subtitle='중앙응급의료센터에서 제공하는 "응급실검색 서비스"에요.')
]
)
)
)
]
await self.fb.send_messages(send_message)
await self.brick_db.save()
elif command == 'final':
input_data = await self.brick_db.get()
state = input_data['store'][0]['value']
town = input_data['store'][1]['value']
res = requests.get(
url='http://apis.data.go.kr/B552657/ErmctInfoInqireService/getEgytListInfoInqire?serviceKey=%s&Q0=%s&Q1=%s&ORD=NAME&pageNo=1&startPage=1&numOfRows=3&pageSize=3' % (
input_data['data']['api_key'], state, town))
items = get_items_from_xml(res)
if type(items) is dict:
if items.get('code', '00') == '99' or items.get('code', '00') == '30':
send_message = [
Message(
text='chatbrick 홈페이지에 올바르지 않은 API key를 입력했어요. 다시 한번 확인해주세요.',
)
]
else:
send_message = [
Message(
text=UNKNOWN_ERROR_MSG
)
]
else:
if len(items) == 0:
send_message = [
Message(
text='조회된 결과가 없습니다.',
quick_replies=QuickReply(
quick_reply_items=[
QuickReplyTextItem(
title='다른 지역검색',
payload='brick|emergency|get_started'
)
]
)
)
]
else:
send_message = [
Message(
text='조회된 결과에요'
),
Message(
text='{dutyName}\n{dutyEmclsName}\n{dutyAddr}\n{dutyTel1}\n{dutyTel3}'.format(
**items[0]),
quick_replies=QuickReply(
quick_reply_items=[
QuickReplyTextItem(
title='다른 지역검색',
payload='brick|emergency|get_started'
)
]
)
)
]
if len(items) > 1:
for surplus_item in items[1:]:
send_message.insert(1, Message(
text='{dutyName}\n{dutyEmclsName}\n{dutyAddr}\n{dutyTel1}\n{dutyTel3}'.format(
**surplus_item)
)
)
await self.brick_db.delete()
await self.fb.send_messages(send_message)
return None
async def telegram(self, command):
if command == 'get_started':
send_message = [
tg.SendPhoto(
photo=BRICK_DEFAULT_IMAGE
),
tg.SendMessage(
text='중앙응급의료센터에서 제공하는 "응급실검색 서비스"에요.'
)
]
await self.fb.send_messages(send_message)
await self.brick_db.save()
elif command == 'final':
input_data = await self.brick_db.get()
state = input_data['store'][0]['value']
town = input_data['store'][1]['value']
res = requests.get(
url='http://apis.data.go.kr/B552657/ErmctInfoInqireService/getEgytListInfoInqire?serviceKey=%s&Q0=%s&Q1=%s&ORD=NAME&pageNo=1&startPage=1&numOfRows=3&pageSize=3' % (
input_data['data']['api_key'], state, town))
items = get_items_from_xml(res)
if type(items) is dict:
if items.get('code', '00') == '99' or items.get('code', '00') == '30':
send_message = [
tg.SendMessage(
text='chatbrick 홈페이지에 올바르지 않은 API key를 입력했어요. 다시 한번 확인해주세요.',
)
]
else:
send_message = [
tg.SendMessage(
text=UNKNOWN_ERROR_MSG
)
]
else:
if len(items) == 0:
send_message = [
tg.SendMessage(
text='조회된 결과가 없습니다.'
)
]
else:
send_message = [
tg.SendMessage(
text='조회된 결과에요.'
),
tg.SendMessage(
text='*{dutyName}*\n{dutyEmclsName}\n{dutyAddr}\n{dutyTel1}\n{dutyTel3}'.format(
**items[0]),
parse_mode='Markdown',
reply_markup=tg.MarkUpContainer(
inline_keyboard=[
[
tg.CallbackButton(
text='다른 지역검색',
callback_data='BRICK|emergency|get_started'
)
]
]
)
)
]
if len(items) > 1:
for surplus_item in items[1:]:
send_message.insert(1, tg.SendMessage(
text='*{dutyName}*\n{dutyEmclsName}\n{dutyAddr}\n{dutyTel1}\n{dutyTel3}'.format(
**surplus_item),
parse_mode='Markdown'
)
)
await self.brick_db.delete()
await self.fb.send_messages(send_message)
return None
|
from .persistence_provider import IPersistenceProvider
from .lock_provider import ILockProvider
from .queue_provider import IQueueProvider, EVENT_QUEUE, WORKFLOW_QUEUE
from .background_service import IBackgroundService
from .execution_pointer_factory import IExecutionPointerFactory
from .execution_result_processor import IExecutionResultProcessor
from .workflow_host import IWorkflowHost
from .workflow_registry import IWorkflowRegistry
from .workflow_executor import IWorkflowExecutor
|
import textwrap
import sys
from datetime import datetime
HEADER = """\
from zic.classes import *
from datetime import *
"""
RAW_FILES = [
'africa', 'antarctica', 'asia', 'australasia',
'europe', 'northamerica', 'southamerica'
]
def lines(input):
"""Remove comments and empty lines"""
for raw_line in input:
line = raw_line.strip()
if line and not line.startswith('#'):
yield strip_comments(line)
def strip_comments(line):
quoted = False
for i, c in enumerate(line):
if c == '"':
quoted = not quoted
elif c == "#" and not quoted:
return line[:i].strip()
return line
OBSERVANCE_TEMPLATE = """\
Observance(gmtoff={},
rules={},
format='{}',
until={}),
"""
def compile(infile, outfile=None):
with open(infile) as input:
if outfile is None:
compile_stream(input, sys.stdout)
else:
with open(outfile, 'w') as output:
compile_stream(input, output)
def compile_stream(input, output, header=HEADER):
output.write(header)
observances = state = None
zones = {}
rules = {}
for line in lines(input):
fields = line.split()
if fields[0] == 'Zone':
names = fields[1].split('/')
z = zones
for name in names:
z = z.setdefault(name, {})
observances = z.setdefault('observances', [])
state = 'Zone'
del fields[:2]
elif fields[0] == 'Rule':
rules.setdefault(fields[1], []).append(fields[2:])
if state == 'Zone':
gmtoff, zone_rules, format = fields[:3]
until = format_until(fields[3:])
if until is None:
state = None
observances.append(
format_observance(gmtoff, zone_rules, format, until))
print_rules(rules, file=output)
print_zones(zones, file=output)
RULE_TEMPLATE = ('Rule({}, {}, {}, {}, {},\n'
' at={},\n'
' save={}, letters={!r})')
def format_rule(begin, end, type, in_month, on, at, save, letters):
begin = int(begin)
if end == 'only':
end = begin + 1
elif end == 'max':
end = 10000
else:
end = int(end) + 1
if type == '-':
type = None
if letters == '-':
letters = ''
at = format_at(at)
save = format_time(save)
return RULE_TEMPLATE.format(begin, end, type, in_month,
on, at, save, letters)
TIME_FORMATS = ['%H', '%H:%M', "%H:%M:%S"]
TIME_TYPES = {
'w': 'wall',
'u': 'utc',
'g': 'utc',
'z': 'utc',
's': 'std',
}
def format_time(t):
if t == '-':
return 'timedelta(0)'
if t.startswith('24'):
return 'timedelta(1)'
n = t.count(':')
fmt = TIME_FORMATS[n]
t = datetime.strptime(t, fmt).time()
args = ['hours={0.hour}', 'minutes={0.minute}', 'seconds={0.second)']
template = 'timedelta(%s)' % ', '.join(args[:n+1])
return template.format(t)
def format_at(at):
try:
time_type = TIME_TYPES[at[-1]]
except KeyError:
time_type = 'wall'
else:
at = at[:-1]
return '(%s, %r)' % (format_time(at), time_type)
def print_rules(rules, file):
prefix = ' ' * 8
for name, lines in rules.items():
file.write('class %s(Rules):\n'
' name ="%s"\n'
' rules = [\n' % (rules_name(name), name))
for args in lines:
rule = format_rule(*args)
file.write(textwrap.indent(rule, prefix) + ',\n')
file.write(' ]\n\n')
TIME_UNITS = 'hours', 'minutes', 'seconds'
def format_until(until):
n = len(until)
if n == 0:
return None
if n == 1:
return int(until[0])
return '(%s)' % ', '.join(repr(u) for u in until)
def format_delta(delta):
sign = ''
if delta.startswith('-'):
sign = '-'
delta = delta[1:]
args = ['%s=%s' % (unit, int(value))
for unit, value in zip(TIME_UNITS, delta.split(':'))]
return '%stimedelta(%s)' % (sign, ', '.join(args))
def format_observance(gmtoff, rules, format, until):
if rules == '-':
rules = None
elif ':' in rules:
rules = "'%s'" % rules
else:
rules = rules_name(rules)
return OBSERVANCE_TEMPLATE.format(format_delta(gmtoff),
rules, format, until)
def print_zones(zones, file, indent=0):
for name, info in sorted(zones.items()):
try:
observances = info['observances']
except KeyError:
file.write(indent * ' ' + 'class %s:\n' % name)
print_zones(info, file, indent + 4)
else:
prefix = indent * ' '
file.write(prefix + 'class %s(Zone):\n' % zone_name(name))
file.write(prefix + ' name = %r\n' % name)
file.write(prefix + ' observances = [\n')
for observance in observances:
file.write(textwrap.indent(observance, prefix + 8 * ' '))
file.write(prefix + '%s]\n' % (4 * ' '))
def rules_name(name):
return name.replace('-', '_')
zone_name = rules_name
if __name__ == '__main__':
if len(sys.argv) < 2:
print("Usage: zic infile [outfile]")
sys.exit(1)
if sys.argv[1] == '--all':
for f in RAW_FILES:
compile('raw/' + f, f + '.py')
else:
compile(*sys.argv[1:])
|
# Copyright 2016 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# pylint: disable=g-wrong-blank-lines,global-variable-not-assigned,invalid-name,redefined-outer-name,unused-variable
x = 123
def f1():
global x
x = 'abc'
f1()
assert x == 'abc'
x = 'foo'
def f2():
global x
class x(object):
pass
f2()
assert isinstance(x, type)
assert x.__name__ == 'x'
x = 3.14
class C1(object):
global x
x = 'foo'
assert x == 'foo'
x = 42
def f3():
global x
del x
f3()
try:
print(x)
raise AssertionError
except NameError:
pass
x = 'foo'
def f4():
x = 'bar'
def g():
global x
def h():
return x
return h()
return g()
assert f4() == 'foo'
x = 3.14
def f5():
x = 'foo'
class C(object):
global x
y = x
return C.y
assert f5() == 3.14
|
import unittest
import os
import configparser
from pathlib import Path
from pyshex import ShExEvaluator
from pyshex.utils.schema_loader import SchemaLoader
from rdflib import Namespace
from rdflib import Graph
from rdflib.namespace import RDF
class TestDdiemRDF(unittest.TestCase):
RDF_FILE = ""
BASE = Namespace("http://ddiem.phenomebrowser.net/schema#")
DDIEM = Namespace("http://ddiem.phenomebrowser.net/")
OBO = Namespace("http://purl.obolibrary.org/obo/")
def test_rdf(self):
config_dir = os.path.expanduser("~") + "/.config"
config_file = config_dir + "/ddiem-pipeline.ini"
config = configparser.RawConfigParser()
config.read(config_file)
data_dir = config["data"]["dir"]
shex_file = data_dir + "/ddiem.shex"
rdf = Graph()
rdf.load(data_dir + "/" + self.RDF_FILE)
shex = Path(shex_file).read_text()
schema = SchemaLoader().loads(shex)
total_failures = []
for combination_procedure in rdf.subjects(RDF.type, self.OBO.DDIEM_0000023):
failures = self.evaluate(rdf, schema, combination_procedure, self.BASE.Combination_Procedure)
if len(failures) > 0:
total_failures = total_failures + failures
for procedure in rdf.subjects(RDF.type, self.OBO.OGMS_0000112):
if (procedure, RDF.type, self.OBO.DDIEM_0000023) in rdf:
continue
failures = self.evaluate(rdf, schema, procedure, self.BASE.Procedure)
if len(failures) > 0:
total_failures = total_failures + failures
for protien in rdf.subjects(RDF.type, self.DDIEM.ProtienOrEnzyme):
failures = self.evaluate(rdf, schema, protien, self.BASE.ProtienOrEnzyme)
if len(failures) > 0:
total_failures = total_failures + failures
if len(total_failures) > 0:
content = ""
for reason in total_failures:
content = content + reason + "\n"
self.fail(f"FAIL: \n {content}")
def evaluate(self, rdf, shex, resource, shex_type):
results = ShExEvaluator().evaluate(rdf, shex, focus= resource, start=shex_type)
failures = []
for item in results:
if item.result:
print("PASS:", str(item.focus), str(item.start))
else:
failures.append(item.reason)
return failures
if __name__ == "__main__":
TestDdiemRDF.RDF_FILE = "ddiem-data.2020-04-29.rdf"
unittest.main() |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import json
import pandas as pd
import numpy as np
from tqdm import tqdm
from sklearn.decomposition import TruncatedSVD
from sklearn.preprocessing import MinMaxScaler
# Set train file names
train_queries_file = "./dataset/train_queries.csv"
train_plans_file = "./dataset/train_plans.csv"
train_click_file= "./dataset/train_clicks.csv"
profiles_file = "./dataset/profiles.csv"
# Set test file names
test_queries_file = "./dataset/test_queries.csv"
test_plans_file = "./dataset/test_plans.csv"
def load_prepare_data():
# Load training data
train_queries = pd.read_csv(train_queries_file)
train_plans = pd.read_csv(train_plans_file)
train_click = pd.read_csv(train_click_file)
# Load testing data
test_queries = pd.read_csv(test_queries_file)
test_plans = pd.read_csv(test_plans_file)
# Prepare train data
train_data = train_queries.merge(train_click, on='sid', how='left')
train_data = train_data.merge(train_plans, on='sid', how='left')
test_data = test_queries.merge(test_plans, on='sid', how='left')
return train_data, test_data
def preprocess_features(train_data, test_data):
train_data = train_data.drop(['click_time'], axis=1)
train_data['click_mode'] = train_data['click_mode'].fillna(0)
test_data['click_mode'] = -1
# concat train and test sets
all_data = pd.concat([train_data, test_data], axis=0, sort=True)
all_data = all_data.drop(['plan_time'], axis=1)
all_data = all_data.reset_index(drop=True)
# Prepare OD features by spliting coordinates for each of them
all_data['o_first'] = all_data['o'].apply(lambda od: float(od.split(',')[0]))
all_data['o_second'] = all_data['o'].apply(lambda od: float(od.split(',')[1]))
all_data['d_first'] = all_data['d'].apply(lambda od: float(od.split(',')[0]))
all_data['d_second'] = all_data['d'].apply(lambda od: float(od.split(',')[1]))
all_data = all_data.drop(['o', 'd'], axis=1)
all_data['req_time'] = pd.to_datetime(all_data['req_time'])
all_data['reqweekday'] = all_data['req_time'].dt.dayofweek
all_data['reqhour'] = all_data['req_time'].dt.hour
all_data = all_data.drop(['req_time'], axis=1)
return all_data
def generate_plan_features(all_data):
n = all_data.shape[0]
mode_list_feasible = np.zeros((n, 12))
max_distance, min_distance, mean_distance, std_distance = np.zeros(
(n,)), np.zeros((n,)), np.zeros((n,)), np.zeros((n,))
max_price, min_price, mean_price, std_price = np.zeros(
(n,)), np.zeros((n,)), np.zeros((n,)), np.zeros((n,))
max_eta, min_eta, mean_eta, std_eta = np.zeros(
(n,)), np.zeros((n,)), np.zeros((n,)), np.zeros((n,))
mode_min_distance, mode_max_distance, mode_min_price, mode_max_price, mode_min_eta, mode_max_eta, first_mode = np.zeros(
(n,)), np.zeros((n,)), np.zeros((n,)), np.zeros((n,)), np.zeros((n,)), np.zeros((n,)), np.zeros((n,))
for i, plan in tqdm(enumerate(all_data['plans'].values)):
try:
plan_list = json.loads(plan)
except:
plan_list = []
if len(plan_list) == 0:
mode_list_feasible[i, 0] = 1
first_mode[i] = 0
max_distance[i] = -1
min_distance[i] = -1
mean_distance[i] = -1
std_distance[i] = -1
max_price[i] = -1
min_price[i] = -1
mean_price[i] = -1
std_price[i] = -1
max_eta[i] = -1
min_eta[i] = -1
mean_eta[i] = -1
std_eta[i] = -1
mode_min_distance[i] = -1
mode_max_distance[i] = -1
mode_min_price[i] = -1
mode_max_price[i] = -1
mode_min_eta[i] = -1
mode_max_eta[i] = -1
else:
distance_list = []
price_list = []
eta_list = []
mode_list = []
for tmp_dit in plan_list:
distance_list.append(int(tmp_dit['distance']))
if tmp_dit['price'] == '':
price_list.append(0)
else:
price_list.append(int(tmp_dit['price']))
eta_list.append(int(tmp_dit['eta']))
mode_list.append(int(tmp_dit['transport_mode']))
distance_list = np.array(distance_list)
price_list = np.array(price_list)
eta_list = np.array(eta_list)
mode_list = np.array(mode_list, dtype='int')
mode_list_feasible[i, mode_list] = 1
distance_sort_idx = np.argsort(distance_list)
price_sort_idx = np.argsort(price_list)
eta_sort_idx = np.argsort(eta_list)
max_distance[i] = distance_list[distance_sort_idx[-1]]
min_distance[i] = distance_list[distance_sort_idx[0]]
mean_distance[i] = np.mean(distance_list)
std_distance[i] = np.std(distance_list)
max_price[i] = price_list[price_sort_idx[-1]]
min_price[i] = price_list[price_sort_idx[0]]
mean_price[i] = np.mean(price_list)
std_price[i] = np.std(price_list)
max_eta[i] = eta_list[eta_sort_idx[-1]]
min_eta[i] = eta_list[eta_sort_idx[0]]
mean_eta[i] = np.mean(eta_list)
std_eta[i] = np.std(eta_list)
first_mode[i] = mode_list[0]
mode_max_distance[i] = mode_list[distance_sort_idx[-1]]
mode_min_distance[i] = mode_list[distance_sort_idx[0]]
mode_max_price[i] = mode_list[price_sort_idx[-1]]
mode_min_price[i] = mode_list[price_sort_idx[0]]
mode_max_eta[i] = mode_list[eta_sort_idx[-1]]
mode_min_eta[i] = mode_list[eta_sort_idx[0]]
feature_data = pd.DataFrame(mode_list_feasible)
feature_data.columns = ['mode_feasible_{}'.format(i) for i in range(12)]
feature_data['max_distance'] = max_distance
feature_data['min_distance'] = min_distance
feature_data['mean_distance'] = mean_distance
feature_data['std_distance'] = std_distance
feature_data['max_price'] = max_price
feature_data['min_price'] = min_price
feature_data['mean_price'] = mean_price
feature_data['std_price'] = std_price
feature_data['max_eta'] = max_eta
feature_data['min_eta'] = min_eta
feature_data['mean_eta'] = mean_eta
feature_data['std_eta'] = std_eta
feature_data['mode_max_distance'] = mode_max_distance
feature_data['mode_min_distance'] = mode_min_distance
feature_data['mode_max_price'] = mode_max_price
feature_data['mode_min_price'] = mode_min_price
feature_data['mode_max_eta'] = mode_max_eta
feature_data['mode_min_eta'] = mode_min_eta
feature_data['first_mode'] = first_mode
all_data = pd.concat([all_data, feature_data], axis=1)
all_data = all_data.drop(['plans'], axis=1)
return all_data
def read_profile_data():
profile_data = pd.read_csv(profiles_file)
profile_na = np.zeros(67)
profile_na[0] = -1
profile_na = pd.DataFrame(profile_na.reshape(1, -1))
profile_na.columns = profile_data.columns
profile_data = profile_data.append(profile_na)
return profile_data
def generate_profile_features(data):
profile_data = read_profile_data()
x = profile_data.drop(['pid'], axis=1).values
svd = TruncatedSVD(n_components=20, n_iter=20, random_state=42)
svd_x = svd.fit_transform(x)
svd_feas = pd.DataFrame(svd_x)
svd_feas.columns = ['svd_attribute_{}'.format(i) for i in range(20)]
svd_feas['pid'] = profile_data['pid'].values
data['pid'] = data['pid'].fillna(-1)
data = data.merge(svd_feas, on='pid', how='left')
return data
def split_train_test(data):
train_data = data[data['click_mode'] != -1]
test_data = data[data['click_mode'] == -1]
submit = test_data[['sid']].copy()
train_data = train_data.drop(['sid', 'pid'], axis=1)
test_data = test_data.drop(['sid', 'pid'], axis=1)
test_data = test_data.drop(['click_mode'], axis=1)
train_y = train_data['click_mode'].values
train_x = train_data.drop(['click_mode'], axis=1)
return train_x, train_y, test_data, submit
def save_data(trainX, y_train, testX, y_test):
trainX.to_csv('preprocess_data/train.csv',index = False)
testX.to_csv('preprocess_data/test.csv',index = False)
y_train = pd.DataFrame({'click_mode': y_train})
y_train.to_csv('preprocess_data/train_label.csv',index = False)
y_test.to_csv('preprocess_data/test_label.csv',index = False)
def load_data():
trainX = pd.read_csv('preprocess_data/train.csv')
testX = pd.read_csv('preprocess_data/test.csv')
y_train = pd.read_csv('preprocess_data/train_label.csv')
y_test = pd.read_csv('preprocess_data/test_label.csv')
return trainX, y_train, testX, y_test
def build_norm_context(trainX, testX):
trainX = np.array(trainX)
context_input = trainX[:,:37]
user_input = trainX[:,37:]
testX = np.array(testX)
context_input_test = testX[:,:37]
user_input_test = testX[:,37:]
scaler = MinMaxScaler()
scaler.fit(context_input)
# apply transform
normalized_train = scaler.transform(context_input)
normalized_test = scaler.transform(context_input_test)
normalized_train= pd.DataFrame(normalized_train)
user_input= pd.DataFrame(user_input)
merged_train = pd.concat([normalized_train, user_input], axis=1)
normalized_test= pd.DataFrame(normalized_test)
user_input_test= pd.DataFrame(user_input_test)
merged_test = pd.concat([normalized_test, user_input_test], axis=1)
return merged_train, merged_test
def get_prepare_data(train_data, test_data):
all_data = preprocess_features(train_data, test_data)
all_data = generate_plan_features(all_data)
all_data = generate_profile_features(all_data)
train_x, train_y, test_x, submit = split_train_test(all_data)
return train_x, train_y, test_x, submit
if __name__ == '__main__':
pass
|
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Conv2D
from tensorflow.keras.layers import MaxPooling2D
from tensorflow.keras.layers import Flatten
from tensorflow.keras.layers import Dropout
from tensorflow.keras.layers import Dense
class VGGNetSmall():
def __init__(self, input_shape, num_classes, final_activation):
self.input_shape = input_shape
self.num_classes = num_classes
self.final_activation = final_activation
def build(self):
model = Sequential()
model.add(Conv2D(filters=32, kernel_size=(3, 3), input_shape=self.input_shape, activation='relu'))
model.add(Conv2D(filters=64, kernel_size=(3, 3), activation='relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Dropout(0.25))
model.add(Conv2D(filters=128, kernel_size=(3, 3), activation='relu'))
model.add(Conv2D(filters=128, kernel_size=(3, 3), activation='relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Dropout(0.25))
model.add(Flatten())
model.add(Dense(units=1024, activation='relu'))
model.add(Dropout(0.5))
model.add(Dense(units=self.num_classes, activation=self.final_activation))
return model |
def cigar_party(cigars, is_weekend):
return (40<=cigars<=60 or (is_weekend and 40<=cigars))
def date_fashion(you, date):
if you <= 2 or date <= 2:
return 0
if you >= 8 or date >= 8:
return 2
else:
return 1
def squirrel_play(temp, is_summer):
if 60<=temp<=90:
return True
if 60<=temp<=100 and is_summer:
return True
else:
return False
def alarm_clock(day, vacation):
is_weekend = day == 0 or day == 6
if vacation:
if is_weekend:
return "off"
else:
return "10:00"
else:
if is_weekend:
return "10:00"
else:
return "7:00"
def love6(a, b):
if a == 6 or b == 6:
return True
if abs(a-b)==6:
return True
if sum(a+b)==6:
return True
else:
return False
def in1to10(n, outside_mode):
if not outside_mode:
if 1<= n <= 10:
return True
else:
return False
if outside_mode and (n <= 1 or n>= 10):
return True
else:
return False
def near_ten(num):
if 0 <= (num+2)%10 <= 4:
return True
else:
return False
|
# coding=utf-8
from __future__ import unicode_literals
from collections import OrderedDict
from .. import Provider as AddressProvider
class Provider(AddressProvider):
street_suffixes = OrderedDict(
(('utca', 0.75), ('út', 0.1), ('tér', 0.1), ('köz', 0.001), ('körút', 0.001), ('sétány', 0.001),))
street_name_formats = (
'{{frequent_street_name}} {{street_suffix}}',
'{{real_city_name}}i {{street_suffix}}',
'{{city_part}}{{city_suffix}}i {{street_suffix}}',
'{{city_prefix}}{{city_part}}i {{street_suffix}}')
# Currently deprecated.
# secondary_address_formats = ("#.em #.", "##. em. #.")
city_formats = ('{{city_prefix}}{{city_part}}{{city_suffix}}',
'{{city_part}}{{city_suffix}}', '{{real_city_name}}')
street_address_formats = ('{{street_name}} {{building_number}}',)
address_formats = ("{{street_address}}\n{{postcode}} {{city}}",)
frequent_street_names = (
'Ady Endre',
'Dózsa György',
'Petőfi',
'Petőfi Sándor',
'Arany János',
'Béke',
'Szabadság',
'Kossuth',
'József Attila')
# The 'real city name' generator includes a number of real cities of
# Hungary that no generator could feasibly dispense. Please note that the
# post code generator is, at this point, not capable of generating a
# fitting post code. In Hungary, post codes are determined by the county of
# the place (see the county generator), and for this reason, often there
# will be a discrepancy. A patch is in the works - until then, use
# Wikipedia to resolve postcode issues.
#
# This generator was created by collecting the 30 largest Hungarian places
# by population, based on the Hungarian Gazetteer generated with effect as
# of 01 January 2016 (http://www.ksh.hu/docs/hun/hnk/hnk_2016.pdf).
real_city_names = (
'Budapest',
'Debrecen',
'Szeged',
'Miskolc',
'Pécs',
'Győr',
'Nyíregyháza',
'Kecskemét',
'Székesfehérvár',
'Szombathely',
'Szolnok',
'Tatabánya',
'Érd',
'Kaposvár',
'Sopron',
'Veszprém',
'Békéscsaba',
'Zalaegerszeg',
'Eger',
'Nagykanizsa',
'Dunaújváros',
'Hódmezővásárhely',
'Dunakeszi',
'Szigetszentmiklós',
'Cegléd',
'Baja',
'Salgótarján',
'Ózd',
'Vác',
'Mosonmagyaróvár')
city_prefs = (
'kis',
'nagy',
'szent',
'duna',
'tisza',
'alsó',
'felső',
'belső',
'bakony',
'vác',
'mező',
'nyék',
'nyír',
'balaton',
'borsod',
'buda',
'hajdú',
'kun',
'moson',
'pilis',
'új',
'egyházas',
'dráva',
'magyar',
'mátra',
'somogy',
'lajos',
'bács',
'békés',
'puszta',
'orosz',
'rác',
'szerb',
'német',
'török')
city_parts = (
'híd',
'györgy',
'mindszent',
'kereszt',
'márton',
'hát',
'hetven',
'mellék',
'tamási',
'tapolca',
'fürdő',
'liget',
'szék',
'tót',
'')
city_suffixes = (
'háza',
'németi',
'devecser',
'fa',
'nádasd',
'apáti',
'falu',
'falva',
'vég',
'vár',
'vára',
'várad',
'hida',
'kövesd',
'bánya',
'halas',
'berény',
'kőrös',
'haraszti',
'város')
counties = (
'Bács-Kiskun',
'Baranya',
'Békés',
'Borsod-Abaúj-Zemplén',
'Csongrád',
'Fejér',
'Győr-Moson-Sopron',
'Hajdú-Bihar',
'Heves',
'Jász-Nagykun-Szolnok',
'Komárom-Esztergom',
'Nógrád',
'Pest',
'Somogy',
'Szabolcs-Szatmár-Bereg',
'Tolna',
'Vas',
'Veszprém',
'Zala')
countries = (
"Afganisztán", "Aland-szigetek", "Albánia", "Algéria", "Amerikai Szamoa", "Amerikai Virgin-szigetek", "Andorra",
"Angola", "Anguilla", "Antarktisz", "Antigua és Barbuda", "Apostoli Szentszék", "Argentína", "Aruba",
"Ausztrália", "Ausztria", "Amerikai Egyesült Államok Külső Szigetei", "Azerbajdzsán", "Bahama-szigetek",
"Bahrein", "Banglades", "Barbados", "Fehéroroszország", "Belgium", "Belize", "Benin", "Bermuda", "Bhután",
"Bissa -Guinea", "Bolívia", "Bosznia-Hercegovina", "Botswana", "Bouvet-sziget", "Brazília",
"Brit Indiai-óceáni Terület", "Brit Virgin - szigetek", "Brunei", "Bulgária", "Burkina Faso", "Burundi",
"Chile", "Ciprus", "Comore-szigetek", "Cook-szigetek", "Costa Rica", "Csád", "Csehország", "Dánia",
"Dél-Afrika", "Dél-Korea", "Dominika", "Dominikai Köztársaság", "Dzsibuti", "Ecuador", "Egyenlítői-Guinea",
"Egyesült Államok", "Egyesült Arab Emírségek", "Egyesült Királyság", "Egyiptom", "Elefántcsontpart", "Eritrea",
"Északi Mariana-szigetek", "Észak-Korea", "Észtország", "Etiópia", "Falkland-szigetek", "Feröer szigetek",
"Fidzsi-szigetek", "Finnország", "Francia Déli Területek", "Francia Guyana", "Francia Polinézia",
"Franciaország", "Fülöp-szigetek", "Gabon", "Gambia", "Ghána", "Gibraltár", "Görögország", "Grenada",
"Grönland", "Grúzia", "Guadeloupe", "Guam", "Guatemala", "Guinea", "Guyana", "Haiti", "Holland Antillák",
"Hollandia", "Honduras", "Hongkong", "Horvátország", "India", "Indonézia", "Irak", "Irán", "Írország", "Izland",
"Izrael", "Jamaica", "Japán", "Jemen", "Jordánia", "Kajmán-szigetek", "Kambodzsa", "Kamerun", "Kanada",
"Karácsony-sziget", "Katar", "Kazahsztán", "Kelet-Timor", "Kenya", "Kína", "Kirgizisztán", "Kiribati",
"Keeling-szigetek", "Kolumbia", "Kongó", "Kongói Demokratikus Köztársaság", "Közép-afrikai Köztársaság", "Kuba",
"Kuvait", "Laosz", "Lengyelország", "Lesotho", "Lettország", "Libanon", "Libéria", "Líbia", "Liechtenstein",
"Litvánia", "Luxemburg", "Macedónia", "Madagaszkár", "Magyarország", "Makaó", "Malajzia", "Malawi",
"Maldív-szigetek", "Mali", "Málta", "Marokkó", "Marshall-szigetek", "Martinique", "Mauritánia", "Mauritius",
"Mayotte", "Mexikó", "Mianmar", "Mikronézia", "Moldova", "Monaco", "Mongólia", "Montenegró", "Montserrat",
"Mozambik", "Namíbia", "Nauru", "Németország", "Nepál", "Nicaragua", "Niger", "Nigéria", "Niue",
"Norfolk-sziget", "Norvégia", "Nyugat-Szahara", "Olaszország", "Omán", "Oroszország", "Örményország",
"Pakisztán", "Palau", "Panama", "Pápua", "Új-Guinea", "Paraguay", "Peru", "Pitcairn-szigetek", "Portugália",
"Puerto Rico", "Réunion", "Románia", "Ruanda", "Saint Kitts és Nevis", "Saint Lucia",
"Saint-Pierre és Miquelon", "Saint Vincent és Grenadine-szigetek", "Salamon-szigetek", "Salvador", "San Marino",
"São Tomé és Príncipe", "Seychelle-szigetek", "Sierra Leone", "Spanyolország", "Srí Lanka", "Suriname", "Svájc",
"Svalbard szigetek", "Svédország", "Szamoa", "Szaúdi-Arábia", "Szenegál", "Szent Ilona", "Szerbia", "Szingapúr",
"Szíria", "Szlovákia", "Szlovénia", "Szomália", "Szudán", "Szváziföld", "Tádzsikisztán", "Tajvan", "Tanzánia",
"Thaiföld", "Togo", "Tokelau-szigetek", "Tonga", "Törökország", "Trinidad és Tobago", "Tunézia",
"Turks- és Caicos-szigetek", "Tuvalu", "Türkmenisztán", "Uganda", "Új-Kaledónia", "Új-Zéland", "Ukrajna",
"Uruguay", "Üzbegisztán", "Vanuatu", "Venezuela", "Vietnam", "Wallis és Futuna", "Zambia", "Zimbabwe",
"Zöld-foki szigetek",)
def county(self):
return self.random_element(self.counties)
def street_address_with_county(self):
return "{street_address}\n{county} megye\n{postcode} {city}".format(
street_address=self.street_address(),
county=self.county(),
postcode=self.postcode(),
city=self.city().capitalize())
def city_prefix(self):
return self.random_element(self.city_prefs)
def city_part(self):
return self.random_element(self.city_parts)
def real_city_name(self):
return self.random_element(self.real_city_names)
def frequent_street_name(self):
return self.random_element(self.frequent_street_names)
def postcode(self):
return "H-{}{}{}{}".format(
super(
Provider, self).random_digit_not_null(), super(
Provider, self).random_digit(), super(
Provider, self).random_digit(), super(
Provider, self).random_digit())
def street_name(self):
return super(Provider, self).street_name().capitalize()
def building_number(self):
numeric_part = super(Provider, self).random_int(1, 250)
return str(numeric_part) + "."
|
number = int(input("Enter number: "))
if (number >= 100 and number <= 200) or number == 0:
pass
else:
print("invalid") |
#!/usr/bin/env python3
"""
File: isc_clause_acl.py
Clause: acl
Title: Clause statement for the Access Control List
Description: Provides clause-specific aspect of ACL-related grammar
in PyParsing engine for ISC-configuration style.
Reason for separate file from isc_acl is to avoid the Python
'import' circular dependency of 'isc_aml'.
"""
from pyparsing import Group, ZeroOrMore, Literal, Word, alphanums, Keyword
from bind9_parser.isc_utils import acl_name
from bind9_parser.isc_aml import aml_nesting
#############################################################
# ACL clause
# The following ACL names are built-in:
#
# * any - Matches all hosts.
# * none - Matches no hosts.
# * localhost - Matches the IPv4 and IPv6 addresses of all
# network interfaces on the system. When
# addresses are added or removed, the
# localhost ACL element is updated to reflect
# the changes.
# * localnets - Matches any host on an IPv4 or IPv6 network
# for which the system has an interface. When
# addresses are added or removed, the
# localnets ACL element is updated to reflect
# the changes. Some systems do not provide a
# way to determine the prefix lengths of
# local IPv6 addresses. In such a case,
# localnets only matches the local IPv6
# addresses, just like localhost
#############################################################
# acl acl-name {
# [ address_match_nosemicolon | any | all ];
# };
clause_stmt_acl_standalone = (
Keyword('acl').suppress()
- Group( # Best thing I've ever done.
acl_name #(Word(alphanums + '_-'))('acl_name')
- (
ZeroOrMore(
Group(
aml_nesting('') # peel away testing label here
)('') # ('aml_series3')
)('')
)('aml_series')
)('')
)('acl')
# Syntax:
# acl a { b; }; acl c { d; e; f; }; acl g { ! h; ! { i; }; };
#
clause_stmt_acl_series = ZeroOrMore(
(
clause_stmt_acl_standalone
)
)('acl')
|
from __future__ import division, print_function, absolute_import
import numpy as np
from ..math import gauss
from .ideal import aideal, daideal_drho, d2aideal_drho
from .association_aux import association_config
from .polarGV import aij, bij, cij
from .monomer_aux import I_lam, J_lam
from .a1sB_monomer import x0lambda_eval
from .ares import ares, dares_drho, d2ares_drho
from .density_solver import density_topliss, density_newton
from .psat_saft import psat
from .tsat_saft import tsat
from .critical_pure import get_critical
from ..constants import kb, Na
R = Na * kb
def U_mie(r, c, eps, lambda_r, lambda_a):
u = c * eps * (r**lambda_r - r**lambda_a)
return u
phi16 = np.array([[7.5365557, -37.60463, 71.745953, -46.83552, -2.467982,
-0.50272, 8.0956883],
[-359.44, 1825.6, -3168.0, 1884.2, -0.82376, -3.1935, 3.7090],
[1550.9, -5070.1, 6534.6, -3288.7, -2.7171, 2.0883, 0],
[-1.19932, 9.063632, -17.9482, 11.34027, 20.52142, -56.6377,
40.53683],
[-1911.28, 21390.175, -51320.7, 37064.54, 1103.742, -3264.61,
2556.181],
[9236.9, -129430., 357230., -315530., 1390.2, -4518.2,
4241.6]])
nfi = np.arange(0, 7)
nfi_num = nfi[:4]
nfi_den = nfi[4:]
# Equation 20
def fi(alpha, i):
phi = phi16[i-1]
num = np.dot(phi[nfi_num], np.power(alpha, nfi_num))
den = 1 + np.dot(phi[nfi_den], np.power(alpha, nfi_den - 3))
return num/den
class saftvrmie_pure():
'''
Pure component SAFT-VR-Mie EoS Object
This object have implemeted methods for phase equilibrium
as for interfacial properties calculations.
Parameters
----------
pure : object
pure component created with component class
Attributes
----------
ms: number of chain segments
sigma: size parameter of Mie potential [m]
eps: well-depth of Mie potential [J]
lambda_a: attractive exponent for Mie potential
lambda_r: repulsive exponent for Mie potential
ring: geometric parameter for ring molecules
(see Langmuir 2017, 33, 11518-11529, Table I.)
eABij: association energy [J]
rcij: association range [m]
rdij: association site position [m]
sites: triplet of number of association sites [B, P, N]
mupol: dipolar moment [Debye]
npol: number of dipolar sites
cii : influence factor for SGT [J m^5 / mol^2]
Methods
-------
cii_correlation : correlates the influence parameter of the fluid
diameter : computes the diameter at given temperature
temperature_aux : computes temperature dependent parameters of the fluid
density : computes the density of the fluid
psat : computes saturation pressure
tsat : computes saturation temperature
get_critical : attemps to compute the critical point of the fluid
afcn: computes total Helmholtz energy
dafcn_drho : computes total Helmholtz energy and its density derivative
d2afcn_drho : computes total Helmholtz energy and it density derivatives
pressure : computes the pressure
dP_drho : computes pressure and its density derivative
logfug : computes the fugacity coefficient
a0ad : computes adimentional Helmholtz density energy
muad : computes adimentional chemical potential
dOm : computes adimentional Thermodynamic Grand Potential
ci : computes influence parameters matrix for SGT
sgt_adim : computes adimentional factors for SGT
EntropyR : computes the residual entropy of the fluid
EnthalpyR : computes the residual enthalpy of the fluid
CvR : computes the residual isochoric heat capacity
CpR : computes the residual heat capacity
speed_sound : computes the speed of sound
Auxiliar methods (computed using temperature_aux output list)
-------------------------------------------------------------
density_aux : computes density
afcn_aux : computes afcn
dafcn_aux : computes dafcn_drho
d2afcn_aux : computes d2afcn_drho
pressure_aux : computes pressure
dP_drho_aux : computes dP_drho
logfug_aux : computes logfug
a0ad_aux : compute a0ad
muad_aux : computes muad
dOm_aux : computes dOm
'''
def __init__(self, pure):
self.pure = pure
self.Mw = pure.Mw
self.ms = pure.ms
self.sigma = pure.sigma
self.eps = pure.eps
self.ring = pure.ring
self.lambda_a = pure.lambda_a
self.lambda_r = pure.lambda_r
self.lambda_ar = self.lambda_r + self.lambda_a
dif_c = self.lambda_r - self.lambda_a
expc = self.lambda_a/dif_c
self.c = self.lambda_r/dif_c*(self.lambda_r/self.lambda_a)**expc
self.c2 = self.c**2
alpha = self.c*(1/(self.lambda_a - 3) - 1/(self.lambda_r - 3))
self.alpha = alpha
self.lambdas = self.lambda_a, self.lambda_r, self.lambda_ar
self.sigma3 = pure.sigma**3
self.cte_a2m = 0.5*self.eps*self.c2
self.eps3 = self.eps**3
self.f1 = fi(alpha, 1)
self.f2 = fi(alpha, 2)
self.f3 = fi(alpha, 3)
self.f4 = fi(alpha, 4)
self.f5 = fi(alpha, 5)
self.f6 = fi(alpha, 6)
roots, weights = gauss(100)
self.roots = roots
self.weights = weights
self.umie = U_mie(1./roots, self.c, self.eps, self.lambda_r,
self.lambda_a)
c_matrix = np.array([[0.81096, 1.7888, -37.578, 92.284],
[1.0205, -19.341, 151.26, -463.5],
[-1.9057, 22.845, -228.14, 973.92],
[1.0885, -6.1962, 106.98, -677.64]])
lam_exp = np.array([0, -1, -2, -3])
self.cctes_lr = np.matmul(c_matrix, self.lambda_r**lam_exp)
self.cctes_la = np.matmul(c_matrix, self.lambda_a**lam_exp)
self.cctes_lar = np.matmul(c_matrix, self.lambda_ar**lam_exp)
self.cctes_2lr = np.matmul(c_matrix, (2*self.lambda_r)**lam_exp)
self.cctes_2la = np.matmul(c_matrix, (2*self.lambda_a)**lam_exp)
self.cctes = (self.cctes_la, self.cctes_lr,
self.cctes_2la, self.cctes_2lr, self.cctes_lar)
# association configuration
self.eABij = pure.eAB
self.rcij = pure.rcAB
self.rdij = pure.rdAB
self.rcij2 = self.rcij**2
self.rcij3 = self.rcij**3
self.rdij2 = self.rdij**2
self.rdij3 = self.rdij**3
self.sites = pure.sites
S, DIJ, indexabij, nsites, diagasso = association_config(self)
assoc_bool = nsites != 0
self.assoc_bool = assoc_bool
if assoc_bool:
self.S = S
self.DIJ = DIJ
self.indexabij = indexabij
self.nsites = nsites
self.diagasso = diagasso
# Polar Contribution
self.mupol = pure.mupol
self.npol = pure.npol
polar_bool = self.npol != 0
self.polar_bool = polar_bool
if polar_bool:
mpol = self.ms * (self.ms < 2) + 2 * (self.ms >= 2)
self.mpol = mpol
aux1 = np.array([1, (mpol-1)/mpol, (mpol-1)/mpol*(mpol-2)/mpol])
self.anij = aij@aux1
self.bnij = bij@aux1
self.cnijk = cij@aux1
# 1 D = 3.33564e-30 C * m
# 1 C^2 = 9e9 N m^2
cte = (3.33564e-30)**2 * (9e9)
self.mupolad2 = self.mupol**2*cte/(self.ms*self.eps*self.sigma3)
# For SGT Computations
self.cii = np.array(pure.cii, ndmin=1)
# computing critical point
self.critical = False
out = get_critical(self, None, None, method='hybr', full_output=True)
if out.success:
self.critical = True
self.Tc = out.Tc
self.Pc = out.Pc
self.rhoc = out.rhoc
def cii_correlation(self, overwrite=False):
"""
cii_corelation()
Method that computes the influence parameter of coarsed-grained
molecules
AIChE Journal, 62, 5, 1781-1794 (2016)
Eq. (23)
Parameters
----------
overwrite : bool
If true it will overwrite the actual influence parameter.
Returns
-------
cii : float
correlated influence parameter [J m^5 / mol^2]
"""
cii = self.ms * (0.12008072630855947 + 2.2197907527439655 * self.alpha)
cii *= np.sqrt(Na**2 * self.eps * self.sigma**5)
cii **= 2
cii = np.array([cii], ndmin=1)
if overwrite:
self.cii = cii
return cii
def diameter(self, beta):
"""
d(beta)
Method that computes the diameter of the fluid at given
beta = 1 / kb T
Journal of Chemical Physics, 139(15), 1–37 (2013)
Eq. (7)
Parameters
----------
beta : float
Boltzmann's factor: beta = 1 / kb T [1/J]
Returns
-------
d : float
computed diameter [m]
"""
integrer = np.exp(-beta * self.umie)
d = self.sigma * (1. - np.dot(integrer, self.weights))
return d
def eta_sigma(self, rho):
"""
eta_sigma(rho)
Method that computes packing fraction of the fluid at diameter=sigma.
Parameters
----------
rho : float
molecular density [molecules/m^3]
Returns
-------
eta : float
packing fraction [Adim]
"""
return self.ms * rho * np.pi * self.sigma3 / 6
def eta_bh(self, rho, dia3):
"""
eta_sigma(rho, d)
Method that computes packing fraction of the fluid at given diameter.
Parameters
----------
rho : float
molecular density [molecules/m^3]
d : float
diameter [m]
Returns
-------
eta : float
packing fraction [Adim]
deta : float
derivative of packing fraction respect to density [m^3]
"""
deta_drho = self.ms * np.pi * dia3 / 6
eta = deta_drho * rho
return eta, deta_drho
def temperature_aux(self, T):
"""
temperature_aux(T)
Method that computes temperature dependent parameters.
It returns the following list:
temp_aux = [beta, dia, tetha, x0, x03, Fab, epsa]
Journal of Chemical Physics, 139(15), 1–37 (2013)
beta: Boltzmann's factor [1/J]
dia: computed diameter [m] (Eq 7)
tetha: exp(beta*eps)-1 [Adim] (Below Eq. 63)
x0: sigma/dia [Adim] (Below Eq. 17)
x03: x0^3 [Adim]
Fab: association strength [Adim] (Below Eq. 77)
epsa: eps / kb / T [Adim]
Parameters
----------
T : float
Absolute temperature [K]
Returns
-------
temp_aux : list
list of computed parameters
"""
beta = 1 / (kb*T)
beta2 = beta**2
beta3 = beta2*beta
dia = self.diameter(beta)
dia3 = dia**3
x0 = self.sigma/dia
x03 = x0**3
# Parameters needed for evaluating the helmothlz contributions
la, lr, lar = self.lambda_a, self.lambda_r, self.lambda_ar
out = x0lambda_eval(x0, la, lr, lar)
x0_a1, x0_a2, x0_a12, x0_a22 = out
I_la = I_lam(x0, la)
I_lr = I_lam(x0, lr)
I_2la = I_lam(x0, 2*la)
I_2lr = I_lam(x0, 2*lr)
I_lar = I_lam(x0, lar)
I_lambdas = (I_la, I_lr, I_2la, I_2lr, I_lar)
J_la = J_lam(x0, la)
J_lr = J_lam(x0, lr)
J_2la = J_lam(x0, 2*la)
J_2lr = J_lam(x0, 2*lr)
J_lar = J_lam(x0, lar)
J_lambdas = (J_la, J_lr, J_2la, J_2lr, J_lar)
# for chain contribution
beps = beta*self.eps
beps2 = beps**2
tetha = np.exp(beps)-1
x0_vector = np.array([1, x0, x0**2, x0**3])
cte_g1s = 1/(2*np.pi*self.eps*self.ms*dia3)
cte_g2s = cte_g1s / self.eps
# For Association
Fab = np.exp(beta * self.eABij) - 1
rc, rc2, rc3 = self.rcij, self.rcij2, self.rcij3
rd, rd2, rd3 = self.rdij, self.rdij2, self.rdij3
dia2 = dia**2
Kab = np.log((rc + 2*rd)/dia)
Kab *= 6*rc3 + 18 * rc2*rd - 24 * rd3
aux1 = (rc + 2 * rd - dia)
aux2 = (22*rd2 - 5*rc*rd - 7*rd*dia - 8*rc2 + rc*dia + dia2)
Kab += aux1 * aux2
Kab /= (72*rd2 * self.sigma3)
Kab *= 4 * np.pi * dia2
# For polar
epsa = self.eps / T / kb
temp_aux = [beta, beta2, beta3, dia, dia3, x0, x03, x0_a1, x0_a2,
x0_a12, x0_a22, I_lambdas, J_lambdas, beps, beps2, tetha,
x0_vector, cte_g1s, cte_g2s, Fab, Kab, epsa]
return temp_aux
def density_aux(self, temp_aux, P, state, rho0=None, Xass0=None):
"""
density_aux(T, temp_aux, state, rho0, Xass0)
Method that computes the density of the fluid at T, P
Parameters
----------
temp_aux : list
temperature dependend parameters computed with temperature_aux(T)
P : float
pressure [Pa]
state : string
'L' for liquid phase and 'V' for vapor phase
rho0 : float, optional
initial guess to compute density root [mol/m^3]
Xass0: array, optional
Initial guess for the calculation of fraction of non-bonded sites
Returns
-------
density: float
density [mol/m^3]
Xass : array
computed fraction of nonbonded sites
"""
if rho0 is None:
rho, Xass = density_topliss(state, temp_aux, P, Xass0, self)
else:
rho, Xass = density_newton(rho0, temp_aux, P, Xass0, self)
return rho, Xass
def density(self, T, P, state, rho0=None, Xass0=None):
"""
density(T, P, state)
Method that computes the density of the fluid at T, P
Parameters
----------
T : float
absolute temperature [K]
P : float
pressure [Pa]
state : string
'L' for liquid phase and 'V' for vapor phase
rho0 : float, optional
initial guess to compute density root [mol/m^3]
Xass0: array, optional
Initial guess for the calculation of fraction of non-bonded sites
Returns
-------
density: float
density [mol/m^3]
"""
temp_aux = self.temperature_aux(T)
rho, Xass = self.density_aux(temp_aux, P, state, rho0, Xass0)
return rho
def psat(self, T, P0=None, v0=[None, None], Xass0=[None, None],
full_output=False):
"""
psat(T, P0)
Method that computes saturation pressure at fixed T
Parameters
----------
T : float
absolute temperature [K]
P0 : float, optional
initial value to find saturation pressure [Pa]
v0: list, optional
initial guess for liquid and vapor phase, respectively [m^3/mol]
Xass0: array, optional
Initial guess for the calculation of fraction of non-bonded sites
full_output: bool, optional
whether to outputs or not all the calculation info.
Returns
-------
psat : float
saturation pressure [Pa]
vl : float
liquid saturation volume [m3/mol]
vv : float
vapor saturation volume [m3/mol]
"""
out = psat(self, T, P0, v0, Xass0, full_output)
return out
def tsat(self, P, T0=None, Tbounds=None, v0=[None, None],
Xass0=[None, None], full_output=False):
"""
tsat(P, Tbounds)
Method that computes saturation temperature at given pressure.
Parameters
----------
P : float
absolute pressure [Pa]
T0 : float, optional
Temperature to start iterations [K]
Tbounds : tuple, optional
(Tmin, Tmax) Temperature interval to start iterations [K]
v0: list, optional
initial guess for liquid and vapor phase, respectively [m^3/mol]
Xass0: array, optional
Initial guess for the calculation of fraction of non-bonded sites
full_output: bool, optional
whether to outputs or not all the calculation info.
Returns
-------
tsat : float
saturation temperature [K]
vl : float
liquid saturation volume [m^3/mol]
vv : float
vapor saturation volume [m^3/mol]
"""
out = tsat(self, P, T0, Tbounds, v0, Xass0, full_output)
return out
def get_critical(self, Tc0=None, rhoc0=None, method='hybr',
full_output=False, overwrite=False):
"""
get_critical(Tc0, rhoc0, method)
Method that solves the critical coordinate of the fluid.
This metho requires good initial guesses for the critical temperature
and density to converge.
Second derivative of pressure against volume is estimated numerically.
Parameters
----------
Tc0 : float, optional
initial guess for critical temperature [K]
rhoc : float, optional
initial guess for critical density [mol/m^3]
method : string, optional
SciPy; root method to solve critical coordinate
full_output: bool, optional
whether to outputs or not all the calculation info
overwrite: bool, optional
wheter to overwrite already computed critical points
Returns
-------
Tc: float
Critical temperature [K]
Pc: float
Critical pressure [Pa]
rhoc: float
Critical density [mol/m3]
"""
out = get_critical(self, Tc0, rhoc0, method, full_output)
if overwrite:
if full_output:
if out.success:
self.critical = True
self.Tc = out.Tc
self.Pc = out.Pc
self.rhoc = out.rhoc
else:
Tc0 = out[0]
rhoc0 = out[2]
out2 = get_critical(self, Tc0, rhoc0, method, full_output=True)
if out2.success:
self.critical = True
self.Tc = out2.Tc
self.Pc = out2.Pc
self.rhoc = out2.rhoc
return out
def ares(self, rho, T, Xass0=None):
"""
ares(x, rho, T, Xass0)
Method that computes the residual Helmholtz free energy of the fluid.
Parameters
----------
rho: float
molecular density [molecules/m3]
T: float
absolute temperature [K]
Xass0: array, optional
Initial guess for the calculation of fraction of non-bonded sites
Returns
-------
a: float
residual dimentionless Helmholtz free energy [Adim]
"""
temp_aux = self.temperature_aux(T)
a, Xass = ares(self, rho, temp_aux, Xass0)
return a, Xass
def dares_drho(self, rho, T, Xass0=None):
"""
dares_drho(rho, T, Xass0)
Method that computes the residual Helmholtz free energy of the fluid
and its first density derivative.
Parameters
----------
rho: float
molecular density [molecules/m3]
T: float
absolute temperature [K]
Xass0: array, optional
Initial guess for the calculation of fraction of non-bonded sites
Returns
-------
a: array_like
residual dimentionless Helmholtz free energy [Adim, m^3]
"""
temp_aux = self.temperature_aux(T)
a, Xass = dares_drho(self, rho, temp_aux, Xass0)
return a, Xass
def d2ares_drho(self, rho, T, Xass0=None):
"""
d2ares_drho(rho, T, Xass0)
Method that computes the residual Helmholtz free energy of the fluid
and its first and second density derivatives.
Parameters
----------
rho: float
molecular density [molecules/m3]
T: float
absolute temperature [K]
Xass0: array, optional
Initial guess for the calculation of fraction of non-bonded sites
Returns
-------
a: array_like
residual dimentionless Helmholtz free energy [Adim, m^3, m^6]
"""
temp_aux = self.temperature_aux(T)
a, Xass = d2ares_drho(self, rho, temp_aux, Xass0)
return a, Xass
def afcn_aux(self, rho, temp_aux, Xass0=None):
"""
afcn_aux(rho, temp_aux, Xass0)
Method that computes the total Helmholtz free energy of the fluid.
Parameters
----------
rho: float
molecular density [molecules/m3]
temp_aux : list
temperature dependend parameters computed with temperature_aux(T)
Xass0: array, optional
Initial guess for the calculation of fraction of non-bonded sites
Returns
-------
a: float
Helmholtz free energy [J/mol]
Xass : array
computed fraction of nonbonded sites
"""
beta = temp_aux[0]
a, Xass = ares(self, rho, temp_aux, Xass0)
a += aideal(rho, beta)
a *= (Na/beta)
return a, Xass
def dafcn_aux(self, rho, temp_aux, Xass0=None):
"""
dafcn_aux(rho, temp_aux, Xass0)
Method that computes the total Helmholtz free energy of the fluid and
its first density derivative.
Parameters
----------
rho: float
density [mol/m3]
temp_aux : list
temperature dependend parameters computed with temperature_aux(T)
Xass0: array, optional
Initial guess for the calculation of fraction of non-bonded sites
Returns
-------
a: array
Helmholtz free energy and its derivative [J/mol, J m^3/mol]
Xass : array
computed fraction of nonbonded sites
"""
beta = temp_aux[0]
a, Xass = dares_drho(self, rho, temp_aux, Xass0)
a += daideal_drho(rho, beta)
a *= (Na/beta)
return a, Xass
def d2afcn_aux(self, rho, temp_aux, Xass0=None):
"""
d2afcn_aux(rho, temp_aux, Xass0)
Method that computes the total Helmholtz free energy of the fluid and
its first ans second density derivative.
Parameters
----------
rho: float
molecular density [molecules/m3]
temp_aux : list
temperature dependend parameters computed with temperature_aux(T)
Xass0: array, optional
Initial guess for the calculation of fraction of non-bonded sites
Returns
-------
a: array
Helmholtz free energy and its derivatives: a, da, d2a
[J/mol, J m^3/mol^2, J m^6/mol^3]
Xass : array
computed fraction of nonbonded sites
"""
beta = temp_aux[0]
a, Xass = d2ares_drho(self, rho, temp_aux, Xass0)
a += d2aideal_drho(rho, beta)
a *= (Na/beta)
return a, Xass
def afcn(self, rho, T, Xass0=None):
"""
afcn(rho, T, Xass0)
Method that computes the total Helmholtz free energy of the fluid.
Parameters
----------
rho: float
molecular density [molecules/m3]
T : float
absolute temperature [K]
Xass0: array, optional
Initial guess for the calculation of fraction of non-bonded sites
Returns
-------
a: float
Helmholtz free energy [J/mol]
"""
temp_aux = self.temperature_aux(T)
a, Xass = self.afcn_aux(rho, temp_aux, Xass0)
return a
def dafcn_drho(self, rho, T, Xass0=None):
"""
dafcn_drho(rho, T, Xass0)
Method that computes the total Helmholtz free energy of the fluid and
its first density derivative.
Parameters
----------
rho: float
molecular density [molecules/m3]
T : float
absolute temperature [K]
Xass0: array, optional
Initial guess for the calculation of fraction of non-bonded sites
Returns
-------
a: array
Helmholtz free energy and its derivative [J/mol, J m^3/mol]
"""
temp_aux = self.temperature_aux(T)
a, Xass = self.dafcn_aux(rho, temp_aux, Xass0)
return a
def d2afcn_drho(self, rho, T, Xass0=None):
"""
d2afcn_drho(rho, T, Xass0)
Method that computes the total Helmholtz free energy of the fluid and
its first ans second density derivative.
Parameters
----------
rho: float
molecular density [molecules/m3]
T : float
absolute temperature [K]
Xass0: array, optional
Initial guess for the calculation of fraction of non-bonded sites
Returns
-------
a: array
Helmholtz free energy and its derivatives: a, da, d2a
[J/mol, J m^3/mol, J m^6/mol]
"""
temp_aux = self.temperature_aux(T)
a, Xass = self.d2afcn_aux(rho, temp_aux, Xass0)
return a
def pressure_aux(self, rho, temp_aux, Xass0=None):
"""
pressure_aux(rho, temp_aux, Xass0)
Method that computes the pressure at given density [mol/m3] and
temperature [K]
Parameters
----------
rho: float
density [mol/m3]
temp_aux : list
temperature dependend parameters computed with temperature_aux(T)
Xass0: array, optional
Initial guess for the calculation of fraction of non-bonded sites
Returns
-------
P : float
pressure [Pa]
Xass : array
computed fraction of nonbonded sites
"""
rhomolecular = Na * rho
da, Xass = self.dafcn_aux(rhomolecular, temp_aux, Xass0)
afcn, dafcn = da
Psaft = rhomolecular**2 * dafcn / Na
return Psaft, Xass
def dP_drho_aux(self, rho, temp_aux, Xass0=None):
"""
dP_drho_aux(rho, temp_aux, Xass0)
Method that computes the pressure and its density derivative at given
density [mol/m3] and temperature [K]
Parameters
----------
rho: float
density [mol/m3]
temp_aux : list
temperature dependend parameters computed with temperature_aux(T)
Xass0: array, optional
Initial guess for the calculation of fraction of non-bonded sites
Returns
-------
P : float
pressure [Pa]
dP: float
derivate of pressure respect density [Pa m^3 / mol]
Xass : array
computed fraction of nonbonded sites
"""
rhomolecular = Na * rho
da, Xass = self.d2afcn_aux(rhomolecular, temp_aux, Xass0)
afcn, dafcn, d2afcn = da
Psaft = rhomolecular**2 * dafcn / Na
dPsaft = 2 * rhomolecular * dafcn + rhomolecular**2 * d2afcn
return Psaft, dPsaft, Xass
def pressure(self, rho, T, Xass0=None):
"""
pressure(rho, T, Xass0)
Method that computes the pressure at given density [mol/m3] and
temperature [K]
Parameters
----------
rho: float
density [mol/m3]
T : float
absolute temperature [K]
Xass0: array, optional
Initial guess for the calculation of fraction of non-bonded sites
Returns
-------
P : float
pressure [Pa]
"""
temp_aux = self.temperature_aux(T)
Psaft, Xass = self.pressure_aux(rho, temp_aux, Xass0)
return Psaft
def dP_drho(self, rho, T, Xass0=None):
"""
dP_drho(rho, T, Xass0)
Method that computes the pressure and its density derivative at given
density [mol/m3] and temperature [K]
Parameters
----------
rho: float
density [mol/m3]
T : float
absolute temperature [K]
Xass0: array, optional
Initial guess for the calculation of fraction of non-bonded sites
Returns
-------
P : float
pressure [Pa]
dP: float
derivate of pressure respect density [Pa m^3 / mol]
"""
temp_aux = self.temperature_aux(T)
Psaft, dPsaft, Xass = self.dP_drho_aux(rho, temp_aux, Xass0)
return Psaft, dPsaft
def logfug_aux(self, temp_aux, P, state, v0=None, Xass0=None):
"""
logfug_aux(T, P, state, v0, Xass0)
Method that computes the fugacity coefficient at given
composition, temperature and pressure.
Parameters
----------
temp_aux : list
temperature dependend parameters computed with temperature_aux(T)
P : float
pressure [Pa]
state : string
'L' for liquid phase and 'V' for vapour phase
v0: float, optional
initial guess for volume root [m^3/mol]
Xass0: array, optional
Initial guess for the calculation of fraction of non-bonded sites
Returns
-------
logfug : float
fugacity coefficient
v : float
computed volume of the phase [m^3/mol]
Xass : array
computed fraction of nonbonded sites
"""
if v0 is None:
rho, Xass = self.density_aux(temp_aux, P, state, None, Xass0)
else:
rho0 = 1./v0
rho, Xass = self.density_aux(temp_aux, P, state, rho0, Xass0)
v = 1./rho
rhomolecular = Na * rho
ar, Xass = ares(self, rhomolecular, temp_aux, Xass)
beta = temp_aux[0]
RT = Na/beta
Z = P * v / RT
lnphi = ar + (Z - 1.) - np.log(Z)
return lnphi, v, Xass
def logfug(self, T, P, state, v0=None, Xass0=None):
"""
logfug(T, P, state, v0, Xass0)
Method that computes the fugacity coefficient at given temperature
and pressure.
Parameters
----------
T : float
absolute temperature [K]
P : float
pressure [Pa]
state : string
'L' for liquid phase and 'V' for vapour phase
v0: float, optional
initial guess for volume root [m^3/mol]
Xass0: array, optional
Initial guess for the calculation of fraction of non-bonded sites
Returns
-------
logfug: float
fugacity coefficient
v: float
computed volume of the phase [m^3/mol]
"""
temp_aux = self.temperature_aux(T)
lnphi, v, Xass = self.logfug_aux(temp_aux, P, state, v0, Xass0)
return lnphi, v
def ci(self, T):
'''
ci(T)
Method that evaluates the polynomial for the influence parameters used
in the SGT theory for surface tension calculations.
Parameters
----------
T : float
absolute temperature [K]
Returns
-------
ci: float
influence parameters [J m5 mol-2]
'''
return np.polyval(self.cii, T)
def sgt_adim(self, T):
'''
sgt_adim(T)
Method that evaluates adimentional factor for temperature, pressure,
density, tension and distance for interfacial properties computations
with SGT.
Parameters
----------
T : float
absolute temperature [K]
Returns
-------
Tfactor : float
factor to obtain dimentionless temperature (K -> K)
Pfactor : float
factor to obtain dimentionless pressure (Pa -> Pa/RT)
rofactor : float
factor to obtain dimentionless density (mol/m3 -> mol/m3)
tenfactor : float
factor to obtain dimentionless surface tension (mN/m)
zfactor : float
factor to obtain dimentionless distance (Amstrong -> m)
'''
cii = self.ci(T) # computing temperature dependent cii
Tfactor = 1.
Pfactor = 1.
rofactor = 1.
tenfactor = np.sqrt(cii) * 1000 # To give tension in mN/m
zfactor = 10**-10
return Tfactor, Pfactor, rofactor, tenfactor, zfactor
def sgt_adim_fit(self, T):
Tfactor = 1
Pfactor = 1
rofactor = 1
tenfactor = 1. * 1000 # To give tension in mN/m
return Tfactor, Pfactor, rofactor, tenfactor
def a0ad_aux(self, rho, temp_aux, Xass0=None):
"""
a0ad_aux(ro, temp_aux, Xass0)
Method that computes the adimenstional Helmholtz density energy at
given density and temperature.
Parameters
----------
rho : float
density [mol/m^3]
temp_aux : list
temperature dependend parameters computed with temperature_aux(T)
Xass0: array, optional
Initial guess for the calculation of fraction of non-bonded sites
Returns
-------
a0ad: float
Helmholtz density energy [J/m^3]
Xass : array
computed fraction of nonbonded sites
"""
rhomolecular = rho * Na
a0, Xass = self.afcn_aux(rhomolecular, temp_aux, Xass0)
a0 *= rho
return a0, Xass
def a0ad(self, rho, T, Xass0=None):
"""
a0ad(ro, T, Xass0)
Method that computes the adimenstional Helmholtz density energy at
given density and temperature.
Parameters
----------
rho : float
density [mol/m^3]
T : float
absolute temperature [K]
Xass0: array, optional
Initial guess for the calculation of fraction of non-bonded sites
Returns
-------
a0ad: float
Helmholtz density energy [J/m^3]
"""
temp_aux = self.temperature_aux(T)
a0, Xass = self.a0ad_aux(rho, temp_aux, Xass0)
return a0
def muad_aux(self, rho, temp_aux, Xass0=None):
"""
muad_aux(rho, temp_aux, Xass0)
Method that computes the adimenstional chemical potential at given
density and temperature.
Parameters
----------
rho : float
density [mol/m^3]
temp_aux : list
temperature dependend parameters computed with temperature_aux(T)
Xass0: array, optional
Initial guess for the calculation of fraction of non-bonded sites
Returns
-------
muad: float
chemical potential [J/mol]
Xass : array
computed fraction of nonbonded sites
"""
rhomolecular = rho * Na
da, Xass = self.dafcn_aux(rhomolecular, temp_aux, Xass0)
afcn, dafcn = da
mu = afcn + rhomolecular * dafcn
return mu, Xass
def muad(self, rho, T, Xass0=None):
"""
muad(rho, T, Xass0)
Method that computes the adimenstional chemical potential at given
density and temperature.
Parameters
----------
rho : float
density [mol/m^3]
T : float
absolute temperature [K]
Xass0: array, optional
Initial guess for the calculation of fraction of non-bonded sites
Returns
-------
muad: float
chemical potential [J/mol]
"""
temp_aux = self.temperature_aux(T)
mu, Xass = self.muad_aux(rho, temp_aux, Xass0)
return mu
def dOm_aux(self, rho, temp_aux, mu, Psat, Xass0=None):
"""
dOm_aux(rho, temp_aux, mu, Psat, Xass0)
Method that computes the adimenstional Thermodynamic Grand potential
at given density and temperature.
Parameters
----------
rho : float
density [mol/m^3]
temp_aux : list
temperature dependend parameters computed with temperature_aux(T)
mu : float
adimentional chemical potential at equilibrium
Psat : float
adimentional pressure [Pa]
Xass0: array, optional
Initial guess for the calculation of fraction of non-bonded sites
Returns
-------
GPT: float
Thermodynamic Grand potential [Pa]
Xass : array
computed fraction of nonbonded sites
"""
a0, Xass = self.a0ad_aux(rho, temp_aux, Xass0)
GPT = a0 - rho*mu + Psat
return GPT, Xass
def dOm(self, rho, T, mu, Psat, Xass0=None):
"""
dOm(rho, T, mu, Psat, Xass0)
Method that computes the adimenstional Thermodynamic Grand potential
at given density and temperature.
Parameters
----------
rho : float
density [mol/m^3]
T : float
absolute temperature [K]
mu : float
adimentional chemical potential at equilibrium
Psat : float
adimentional pressure [Pa]
Xass0: array, optional
Initial guess for the calculation of fraction of non-bonded sites
Returns
-------
Out: float
Thermodynamic Grand potential [Pa]
"""
temp_aux = self.temperature_aux(T)
GPT, Xass = self.dOm_aux(rho, temp_aux, mu, Psat, Xass0)
return GPT
def EntropyR(self, T, P, state, v0=None, Xass0=None, T_step=0.1):
"""
EntropyR(T, P, state, v0, Xass0, T_step)
Method that computes the residual entropy at given temperature and
pressure.
Parameters
----------
T : float
absolute temperature [K]
P : float
pressure [Pa]
state : string
'L' for liquid phase and 'V' for vapour phase
v0: float, optional
initial guess for volume root [m^3/mol]
Xass0: array, optional
Initial guess for the calculation of fraction of non-bonded sites
T_step: float, optional
Step to compute the numerical temperature derivates of Helmholtz
free energy
Returns
-------
Sr : float
residual entropy [J/mol K]
"""
temp_aux = self.temperature_aux(T)
if v0 is None:
rho, Xass = self.density_aux(temp_aux, P, state, None, Xass0)
else:
rho0 = 1./v0
rho, Xass = self.density_aux(temp_aux, P, state, rho0, Xass0)
v = 1./rho
rhomolecular = Na * rho
a, Xass = ares(self, rhomolecular, temp_aux, Xass)
beta = temp_aux[0]
RT = Na/beta
Z = P * v / RT
h = T_step
temp_aux1 = self.temperature_aux(T+h)
temp_aux2 = self.temperature_aux(T+2*h)
temp_aux_1 = self.temperature_aux(T-h)
temp_aux_2 = self.temperature_aux(T-2*h)
a1, Xass1 = ares(self, rhomolecular, temp_aux1, Xass)
a2, Xass2 = ares(self, rhomolecular, temp_aux2, Xass)
a_1, Xass_1 = ares(self, rhomolecular, temp_aux_1, Xass)
a_2, Xass_2 = ares(self, rhomolecular, temp_aux_2, Xass)
F = a
dFdT = (a_2/12 - 2*a_1/3 + 2*a1/3 - a2/12)/h
Sr_TVN = -T*dFdT - F # residual entropy (TVN) divided by R
Sr_TPN = Sr_TVN + np.log(Z) # residual entropy (TPN) divided by R
Sr_TPN *= R
return Sr_TPN
def EnthalpyR(self, T, P, state, v0=None, Xass0=None, T_step=0.1):
"""
EnthalpyR(T, P, state, v0, Xass0, T_step)
Method that computes the residual enthalpy at given temperature and
pressure.
Parameters
----------
T : float
absolute temperature [K]
P : float
pressure [Pa]
state : string
'L' for liquid phase and 'V' for vapour phase
v0: float, optional
initial guess for volume root [m^3/mol]
Xass0: array, optional
Initial guess for the calculation of fraction of non-bonded sites
T_step: float, optional
Step to compute the numerical temperature derivates of Helmholtz
free energy
Returns
-------
Hr : float
residual enthalpy [J/mol]
"""
temp_aux = self.temperature_aux(T)
if v0 is None:
rho, Xass = self.density_aux(temp_aux, P, state, None, Xass0)
else:
rho0 = 1./v0
rho, Xass = self.density_aux(temp_aux, P, state, rho0, Xass0)
v = 1./rho
rhomolecular = Na * rho
a, Xass = ares(self, rhomolecular, temp_aux, Xass)
beta = temp_aux[0]
RT = Na/beta
Z = P * v / RT
h = T_step
temp_aux1 = self.temperature_aux(T+h)
temp_aux2 = self.temperature_aux(T+2*h)
temp_aux_1 = self.temperature_aux(T-h)
temp_aux_2 = self.temperature_aux(T-2*h)
a1, Xass1 = ares(self, rhomolecular, temp_aux1, Xass)
a2, Xass2 = ares(self, rhomolecular, temp_aux2, Xass)
a_1, Xass_1 = ares(self, rhomolecular, temp_aux_1, Xass)
a_2, Xass_2 = ares(self, rhomolecular, temp_aux_2, Xass)
F = a
dFdT = (a_2/12 - 2*a_1/3 + 2*a1/3 - a2/12)/h
Sr_TVN = -T*dFdT - F # residual entropy divided by R
Hr_TPN = F + Sr_TVN + Z - 1. # residual entalphy divided by RT
Hr_TPN *= RT
return Hr_TPN
def CvR(self, rho, T, Xass0=None, T_step=0.1):
"""
CvR(rho, T, Xass0, T_step)
Method that computes the residual isochoric heat capacity at given
density and temperature.
Parameters
----------
rho : float
density [mol/m^3]
T : float
absolute temperature [K]
Xass0: array, optional
Initial guess for the calculation of fraction of non-bonded sites
T_step: float, optional
Step to compute temperature numerical derivates of Helmholtz
free energy
Returns
-------
Cv: float
isochoric heat capacity [J/mol K]
"""
temp_aux = self.temperature_aux(T)
rhomolecular = Na * rho
a, Xass = ares(self, rhomolecular, temp_aux, Xass0)
h = T_step
temp_aux1 = self.temperature_aux(T+h)
temp_aux2 = self.temperature_aux(T+2*h)
temp_aux_1 = self.temperature_aux(T-h)
temp_aux_2 = self.temperature_aux(T-2*h)
a1, Xass1 = ares(self, rhomolecular, temp_aux1, Xass)
a2, Xass2 = ares(self, rhomolecular, temp_aux2, Xass)
a_1, Xass_1 = ares(self, rhomolecular, temp_aux_1, Xass)
a_2, Xass_2 = ares(self, rhomolecular, temp_aux_2, Xass)
dFdT = (a_2/12 - 2*a_1/3 + 2*a1/3 - a2/12)/h
d2FdT = (-a_2/12 + 4*a_1/3 - 5*a/2 + 4*a1/3 - a2/12)/h**2
Cvr_TVN = -T**2*d2FdT - 2*T*dFdT # residual isochoric heat capacity
Cvr_TVN *= R
return Cvr_TVN
def CpR(self, T, P, state, v0=None, Xass0=None, T_step=0.1):
"""
Cpr(T, P, state, v0, Xass0, T_step)
Method that computes the residual heat capacity at given temperature
and pressure.
Parameters
----------
T : float
absolute temperature [K]
P : float
pressure [Pa]
state : string
'L' for liquid phase and 'V' for vapour phase
v0: float, optional
initial guess for volume root [m^3/mol]
Xass0: array, optional
Initial guess for the calculation of fraction of non-bonded sites
T_step: float, optional
Step to compute the numerical temperature derivates of Helmholtz
free energy
Returns
-------
Cp: float
residual heat capacity [J/mol K]
"""
temp_aux = self.temperature_aux(T)
if v0 is None:
rho, Xass = self.density_aux(temp_aux, P, state, None, Xass0)
else:
rho0 = 1./v0
rho, Xass = self.density_aux(temp_aux, P, state, rho0, Xass0)
rhomolecular = Na * rho
d2a, Xass = d2ares_drho(self, rhomolecular, temp_aux, Xass)
beta = temp_aux[0]
RT = Na/beta
h = T_step
temp_aux1 = self.temperature_aux(T+h)
temp_aux2 = self.temperature_aux(T+2*h)
temp_aux_1 = self.temperature_aux(T-h)
temp_aux_2 = self.temperature_aux(T-2*h)
a1, Xass1 = dares_drho(self, rhomolecular, temp_aux1, Xass)
a2, Xass2 = dares_drho(self, rhomolecular, temp_aux2, Xass)
a_1, Xass_1 = dares_drho(self, rhomolecular, temp_aux_1, Xass)
a_2, Xass_2 = dares_drho(self, rhomolecular, temp_aux_2, Xass)
a = d2a[:2]
da_drho = a[1] * Na
d2a_drho = d2a[2] * Na**2
dFdT = (a_2/12 - 2*a_1/3 + 2*a1/3 - a2/12)/h
dFdT[1] *= Na
d2FdT = (-a_2/12 + 4*a_1/3 - 5*a/2 + 4*a1/3 - a2/12) / h**2
d2FdT[1] *= Na
dP_dT = RT*(rho**2 * dFdT[1]) + P/T
dP_drho = 2*rho*da_drho + 2.
dP_drho += rho**2 * d2a_drho - 1.
dP_drho *= RT
dP_dV = -rho**2 * dP_drho
# residual isochoric heat capacity
Cvr_TVN = R * (-T**2*d2FdT[0] - 2*T*dFdT[0])
# residual heat capacity
Cpr = Cvr_TVN - R - T*dP_dT**2/dP_dV
return Cpr
def speed_sound(self, T, P, state, v0=None, Xass0=None, T_step=0.1,
CvId=3*R/2, CpId=5*R/2):
"""
speed_sound(T, P, state, v0, Xass0, T_step, CvId, CpId)
Method that computes the speed of sound at given temperature
and pressure.
This calculation requires that the molar weight of the fluid has been
set in the component function.
By default the ideal gas Cv and Cp are set to 3R/2 and 5R/2, the user
can supply better values if available.
Parameters
----------
T : float
absolute temperature [K]
P : float
pressure [Pa]
state : string
'L' for liquid phase and 'V' for vapour phase
v0: float, optional
initial guess for volume root [m^3/mol]
Xass0: array, optional
Initial guess for the calculation of fraction of non-bonded sites
T_step: float, optional
Step to compute the numerical temperature derivates of Helmholtz
free energy
CvId: float, optional
Ideal gas isochoric heat capacity, set to 3R/2 by default [J/mol K]
CpId: float, optional
Ideal gas heat capacity, set to 3R/2 by default [J/mol K]
Returns
-------
w: float
speed of sound [m/s]
"""
temp_aux = self.temperature_aux(T)
if v0 is None:
rho, Xass = self.density_aux(temp_aux, P, state, None, Xass0)
else:
rho0 = 1./v0
rho, Xass = self.density_aux(temp_aux, P, state, rho0, Xass0)
rhomolecular = Na * rho
d2a, Xass = d2ares_drho(self, rhomolecular, temp_aux, Xass)
beta = temp_aux[0]
RT = Na/beta
h = T_step
temp_aux1 = self.temperature_aux(T+h)
temp_aux2 = self.temperature_aux(T+2*h)
temp_aux_1 = self.temperature_aux(T-h)
temp_aux_2 = self.temperature_aux(T-2*h)
a1, Xass1 = dares_drho(self, rhomolecular, temp_aux1, Xass)
a2, Xass2 = dares_drho(self, rhomolecular, temp_aux2, Xass)
a_1, Xass_1 = dares_drho(self, rhomolecular, temp_aux_1, Xass)
a_2, Xass_2 = dares_drho(self, rhomolecular, temp_aux_2, Xass)
a = d2a[:2]
da_drho = a[1] * Na
d2a_drho = d2a[2] * Na**2
dFdT = (a_2/12 - 2*a_1/3 + 2*a1/3 - a2/12)/h
dFdT[1] *= Na
d2FdT = (-a_2/12 + 4*a_1/3 - 5*a/2 + 4*a1/3 - a2/12) / h**2
d2FdT[1] *= Na
dP_dT = RT*(rho**2 * dFdT[1]) + P/T
dP_drho = 2*rho*da_drho + 2.
dP_drho += rho**2 * d2a_drho - 1.
dP_drho *= RT
dP_dV = -rho**2 * dP_drho
# residual isochoric heat capacity
Cvr_TVN = R * (-T**2*d2FdT[0] - 2*T*dFdT[0])
# residual heat capacity
Cpr = Cvr_TVN - R - T*dP_dT**2/dP_dV
# speed of sound calculation
Cp = CpId + Cpr
Cv = CvId + Cvr_TVN
betas = -rho * (Cv/Cp) / dP_dV
w2 = 1000./(rho * betas * self.Mw)
w = np.sqrt(w2)
return w
|
# Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
class ActionRunner(object):
def __init__(self, page, tab, page_test=None):
self._page = page
self._tab = tab
self._page_test = page_test
def RunAction(self, action):
if not action.WillWaitAfterRun():
action.WillRunAction(self._page, self._tab)
if self._page_test:
self._page_test.WillRunAction(self._page, self._tab, action)
try:
action.RunActionAndMaybeWait(self._page, self._tab)
finally:
if self._page_test:
self._page_test.DidRunAction(self._page, self._tab, action)
|
'''
Highway layers and multitask modules
Author: [email protected]
'''
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.nn.parameter import Parameter
from allennlp.modules.elmo import Elmo, batch_to_ids
import pickle
test_outputs=False
class PositionwiseNN(nn.Module):
def __init__(self, idim, hdim, dropout=None):
super(PositionwiseNN, self).__init__()
self.w_0 = nn.Conv1d(idim, hdim, 1)
self.w_1 = nn.Conv1d(hdim, hdim, 1)
self.dropout = dropout
def forward(self, x):
output = F.relu(self.w_0(x.transpose(1, 2)))
output = self.dropout(output)
output = self.w_1(output)
output = self.dropout(output).transpose(2, 1)
return output
class HighwayLayer(nn.Module):
def __init__(self, dim, target_dim=None, dropout=None, father=None):
super(HighwayLayer,self).__init__()
if target_dim is None:
target_dim = dim
self.linear_transform=False
else:
self.target_dim=target_dim
self.linear_transform=True
self.transform = nn.Conv1d(dim,target_dim, 1)
self.gate = nn.Conv1d(dim, target_dim, 1)
if self.linear_transform:
self.linear=nn.Conv1d(dim, target_dim, 1)
self.dropout = dropout
self.father = [father]
def forward(self,x):
tx=x.transpose(1,2)
gate=F.sigmoid(self.gate(tx))
trans = F.relu(self.transform(tx))
if self.linear_transform:
linear = self.linear(tx)
else:
linear = tx
res=(gate * trans + linear).transpose(2,1)
if self.dropout:
res=self.dropout(res)
if test_outputs:
print('test_outputs=', test_outputs)
gate_cpu=gate.cpu().detach().numpy()
with open('output_gate_{}.pt'.format(self.father[0].name),'wb') as f:
pickle.dump(gate_cpu,f)
print('written:output_gate_{}.pt'.format(self.father[0].name))
return res
class GateLayer(nn.Module):
def __init__(self, dim, target_dim=None, dropout=None):
super(GateLayer,self).__init__()
if target_dim is None:
target_dim = dim
self.linear_transform=False
else:
self.target_dim=target_dim
self.linear_transform=True
self.gate = nn.Conv1d(dim, target_dim, 1)
if self.linear_transform:
self.linear=nn.Conv1d(dim, target_dim, 1)
self.dropout=dropout
def forward(self,x):
tx=x.transpose(1,2)
gate=F.sigmoid(self.gate(tx))
if self.linear_transform:
linear = self.linear(tx)
else:
linear = tx
res = (gate * linear).transpose(2,1)
if self.dropout:
res=self.dropout(res)
return res
class HighwayNetwork(nn.Module):
def __init__(self, dim, target_dim=None, num_layers=1, size_format='shrink_first', dropout=None):
super(HighwayNetwork, self).__init__()
infered_dim = dim if target_dim is None else target_dim
module_list=[]
if size_format =='shrink_first':
module_list.append(HighwayLayer(dim, target_dim, dropout=dropout, father=self))
for i in range(1, num_layers):
module_list.append(HighwayLayer(infered_dim, None, dropout=dropout, father=self))
self.comp=nn.Sequential(*module_list)
elif size_format=="keep_first":
for i in range(0, num_layers-1):
module_list.append(HighwayLayer(dim, None, dropout=dropout))
module_list.append(HighwayLayer(dim, target_dim, dropout=dropout))
self.comp=nn.Sequential(*module_list)
self.dropout=dropout
self.name=None
def forward(self,x):
return self.comp(x)
class GateNetwork(nn.Module):
def __init__(self, dim, target_dim=None, num_layers=1, size_format='shrink_first', dropout=None):
super(GateNetwork, self).__init__()
infered_dim = dim if target_dim is None else target_dim
module_list=[]
if size_format =='shrink_first':
module_list.append(GateLayer(dim, target_dim, dropout=dropout))
for i in range(1, num_layers):
module_list.append(GateLayer(infered_dim, None, dropout=dropout))
self.comp=nn.Sequential(*module_list)
elif size_format=="keep_first":
for i in range(0, num_layers-1):
module_list.append(GateLayer(dim, None, dropout=dropout))
module_list.append(GateLayer(dim, target_dim, dropout=dropout))
self.comp=nn.Sequential(*module_list)
def forward(self,x):
return self.comp(x)
class MultiDatasetWrapper(nn.Module):
def __init__(self, opt):
super(MultiDatasetWrapper, self).__init__()
self.layer_set = {'-1' : None}
self.opt = opt
def add_layer(self, specific_name, layertype, *args, **kwargs):
for dataset in self.opt['train_datasets']:
id_layer = self.opt['dataset_configs'][dataset][specific_name]
if id_layer not in self.layer_set:
self.layer_set[id_layer] = layertype(*args, **kwargs)
self.layer_set[id_layer].name=specific_name+'_'+dataset
self.__setattr__(specific_name+'_'+dataset, self.layer_set[id_layer])
def forward(self, specific_name, dataset, *args):
try:
current_setup = self.__getattr__(specific_name+'_'+dataset)
except:
current_setup = self.__getattribute__(specific_name+'_'+dataset)
if current_setup:
return current_setup(*args)
else:
return args[0]
class LayerNorm(nn.Module):
def __init__(self, hidden_size, eps=1e-4):
super(LayerNorm, self).__init__()
self.alpha = Parameter(torch.ones(1,1,hidden_size)) # gain g
self.beta = Parameter(torch.zeros(1,1,hidden_size)) # bias b
self.eps = eps
def forward(self, x):
mu = torch.mean(x, 2, keepdim=True).expand_as(x)
sigma = torch.std(x, 2, keepdim=True).expand_as(x)
return (x - mu) / (sigma + self.eps) * self.alpha.expand_as(x) + self.beta.expand_as(x)
very_small_number=1e-40
class AttnSum(nn.Module):
"""Attention Sum Layer as in Kadlec et. al (2016):
Optionally don't normalize output weights.
"""
def __init__(self, x_size, y_size, identity=False):
super(AttnSum, self).__init__()
if not identity:
self.linear = nn.Linear(y_size, x_size)
else:
self.linear = None
def forward(self, x, y, x_mask, candidate_aggre):
"""
x = batch * len * h1
y = batch * h2
x_ans_mask = batch * len
candidate_aggre = batch * len * c
"""
x_ans_mask = candidate_aggre.sum(dim=2).ge(0).float()
Wy = self.linear(y) if self.linear is not None else y # batch * h1
p = torch.bmm(x,Wy.unsqueeze(2)).squeeze(2) # batch * len
p.data.masked_fill_(x_mask.data, -float('inf'))
pm = F.softmax(p, dim=1) * x_ans_mask # batch * len
unnormalized_probs=torch.bmm(pm.unsqueeze(1), candidate_aggre).squeeze(1) # batch * c
normalized_probs=unnormalized_probs/unnormalized_probs.sum(dim=1, keepdim=True)+very_small_number
if self.training:
return torch.log(normalized_probs)
else:
return normalized_probs
|
"""
Create a default folder and a file structure for a
python package based on the name of the project.
"""
import os
import sys
import json
import click
def make_skeleton(project_name, template=False):
"""
Create a default structure for a python project.
"""
if template:
# load the structure for the custom template
loaded_template = load_template(template)
else:
# load the structure for the default template
loaded_template = load_template()
for folder in loaded_template.keys(): # make the folders
makedir(folder, project_name)
for files in loaded_template[folder]: # write the files
makefile(files, project_name)
def load_template(template=False):
"""
Load the default or custom template for the python package.
"""
if template:
full_template = template + '.json' # template full name
if os.path.exists(os.path.join(os.getcwd(), full_template)):
# 1- search for a template in the same folder that do is excecuted
path = os.path.join(os.getcwd(), full_template)
else:
# 2- search for the template in the default templates folder
path = os.path.join(
os.path.dirname(__file__), 'templates', full_template)
else:
path = os.path.join(
os.path.dirname(__file__), 'templates', 'default_structure.json')
try:
with open(path, 'r') as template:
return json.load(template)
except FileNotFoundError:
click.echo('Template file not found. Aborted!')
sys.exit(1)
def makedir(directory, project_name):
"""
Make the folder tree.
"""
# change the name of base and bin for the name of the project
if (directory == 'base') or (directory == 'bin'):
directory = project_name
try: # write the folders
os.makedirs(directory)
os.chdir(directory)
except FileExistsError:
click.echo('Folder {} alredy exists. Aborted!'.format(directory))
sys.exit(1)
def makefile(file, project_name):
"""
Write the files for the project_name
"""
# change the names of project_name.py and test_project.py
if file == 'project.py':
file = '{}'.format(project_name + '.py')
elif file == 'test_project.py':
file = '{}'.format('test_' + project_name + '.py')
if file == '<--': # go back one directory
os.chdir('..')
else:
try:
with open(file, 'w') as f:
f.write('')
except Exception as e:
click.echo('Error wrinting {}. Aborted!'.format(file))
sys.exit(1)
if __name__ == '__main__':
pass
|
from header_common import *
from ID_animations import *
from header_mission_templates import *
from header_tableau_materials import *
from header_items import *
from module_constants import *
####################################################################################################################
# Each tableau material contains the following fields:
# 1) Tableau id (string): used for referencing tableaux in other files. The prefix tab_ is automatically added before each tableau-id.
# 2) Tableau flags (int). See header_tableau_materials.py for a list of available flags
# 3) Tableau sample material name (string).
# 4) Tableau width (int).
# 5) Tableau height (int).
# 6) Tableau mesh min x (int): divided by 1000 and used when a mesh is auto-generated using the tableau material
# 7) Tableau mesh min y (int): divided by 1000 and used when a mesh is auto-generated using the tableau material
# 8) Tableau mesh max x (int): divided by 1000 and used when a mesh is auto-generated using the tableau material
# 9) Tableau mesh max y (int): divided by 1000 and used when a mesh is auto-generated using the tableau material
# 10) Operations block (list): A list of operations. See header_operations.py for reference.
# The operations block is executed when the tableau is activated.
#
####################################################################################################################
#banner height = 200, width = 85 with wood, 75 without wood
tableaus = [
("game_character_sheet", 0, "tableau_with_transparency", 1024, 1024, 0, 0, 266, 532,
[
(store_script_param, ":script_param_1", 1),
(cur_tableau_set_background_color, 4287137928),
(cur_tableau_set_ambient_light, 10, 11, 15),
(set_fixed_point_multiplier, 100),
(cur_tableau_set_camera_parameters, 0, 40, 40, 0, 100000),
(init_position, 1),
(position_set_z, 1, 100),
(position_set_x, 1, -20),
(position_set_y, 1, -20),
(cur_tableau_add_tableau_mesh, "tableau_troop_character_color", ":script_param_1", 1, 0, 0),
(position_set_z, 1, 200),
(cur_tableau_add_tableau_mesh, "tableau_troop_character_alpha_mask", ":script_param_1", 1, 0, 0),
(position_set_z, 1, 300)
]),
("game_inventory_window", 0, "tableau_with_transparency", 1024, 1024, 0, 0, 180, 270,
[
(store_script_param, ":script_param_1", 1),
(cur_tableau_set_background_color, 4287137928),
(cur_tableau_set_ambient_light, 10, 11, 15),
(set_fixed_point_multiplier, 100),
(cur_tableau_set_camera_parameters, 0, 40, 40, 0, 100000),
(init_position, 1),
(position_set_z, 1, 100),
(position_set_x, 1, -20),
(position_set_y, 1, -20),
(cur_tableau_add_tableau_mesh, "tableau_troop_inventory_color", ":script_param_1", 1, 0, 0),
(position_set_z, 1, 200),
(cur_tableau_add_tableau_mesh, "tableau_troop_inventory_alpha_mask", ":script_param_1", 1, 0, 0),
(position_set_z, 1, 300)
]),
("game_profile_window", 0, "tableau_with_transparency", 1024, 1024, 0, 0, 320, 480,
[
(store_script_param, ":script_param_1", 1),
(assign, ":var_2", ":script_param_1"),
(val_mod, ":var_2", 2),
(try_begin),
(eq, ":var_2", 0),
(assign, ":value", "trp_multiplayer_profile_troop_male"),
(else_try),
(assign, ":value", "trp_multiplayer_profile_troop_female"),
(try_end),
(troop_set_face_key_from_current_profile, ":value"),
(cur_tableau_set_background_color, 4287137928),
(cur_tableau_set_ambient_light, 10, 11, 15),
(set_fixed_point_multiplier, 100),
(cur_tableau_set_camera_parameters, 0, 40, 40, 0, 100000),
(init_position, 1),
(position_set_z, 1, 100),
(position_set_x, 1, -20),
(position_set_y, 1, -20),
(cur_tableau_add_tableau_mesh, "tableau_troop_profile_color", ":value", 1, 0, 0),
(position_set_z, 1, 200),
(cur_tableau_add_tableau_mesh, "tableau_troop_profile_alpha_mask", ":value", 1, 0, 0)
]),
("game_party_window", 0, "tableau_with_transparency", 1024, 1024, 0, 0, 300, 300,
[
(store_script_param, ":script_param_1", 1),
(cur_tableau_set_background_color, 4287137928),
(cur_tableau_set_ambient_light, 10, 11, 15),
(set_fixed_point_multiplier, 100),
(cur_tableau_set_camera_parameters, 0, 40, 40, 0, 100000),
(init_position, 1),
(position_set_z, 1, 100),
(position_set_x, 1, -20),
(position_set_y, 1, -20),
(cur_tableau_add_tableau_mesh, "tableau_troop_party_color", ":script_param_1", 1, 0, 0),
(position_set_z, 1, 200),
(cur_tableau_add_tableau_mesh, "tableau_troop_party_alpha_mask", ":script_param_1", 1, 0, 0),
(position_set_z, 1, 300)
]),
("game_troop_label_banner", 0, "tableau_with_transparency", 256, 256, -128, 0, 128, 256,
[
(store_script_param, ":script_param_1", 1),
(cur_tableau_set_background_color, 4287137928),
(set_fixed_point_multiplier, 100),
(cur_tableau_set_camera_parameters, 0, 100, 100, 0, 100000),
(init_position, 1),
(position_set_y, 1, 120),
(cur_tableau_add_mesh, ":script_param_1", 1, 120, 0)
]),
("round_shield_1", 0, "sample_shield_round_1", 512, 256, 0, 0, 0, 0,
[
(store_script_param, ":script_param_1", 1),
(set_fixed_point_multiplier, 100),
(init_position, 1),
(position_set_x, 1, -50),
(position_set_y, 1, 125),
(cur_tableau_add_mesh, ":script_param_1", 1, 120, 0),
(init_position, 1),
(position_set_z, 1, 10),
(cur_tableau_add_mesh, "mesh_tableau_mesh_shield_round_1", 1, 0, 0),
(cur_tableau_set_camera_parameters, 0, 200, 100, 0, 100000)
]),
("round_shield_2", 0, "sample_shield_matte", 512, 256, 0, 0, 0, 0,
[
(store_script_param, ":script_param_1", 1),
(set_fixed_point_multiplier, 100),
(init_position, 1),
(position_set_x, 1, -50),
(position_set_y, 1, 120),
(cur_tableau_add_mesh, ":script_param_1", 1, 116, 0),
(init_position, 1),
(position_set_z, 1, 10),
(cur_tableau_add_mesh, "mesh_tableau_mesh_shield_round_2", 1, 0, 0),
(cur_tableau_set_camera_parameters, 0, 200, 100, 0, 100000)
]),
("round_shield_3", 0, "sample_shield_matte", 512, 256, 0, 0, 0, 0,
[
(store_script_param, ":script_param_1", 1),
(set_fixed_point_multiplier, 100),
(init_position, 1),
(position_set_x, 1, -50),
(position_set_y, 1, 120),
(cur_tableau_add_mesh, ":script_param_1", 1, 116, 0),
(init_position, 1),
(position_set_z, 1, 10),
(cur_tableau_add_mesh, "mesh_tableau_mesh_shield_round_3", 1, 0, 0),
(cur_tableau_set_camera_parameters, 0, 200, 100, 0, 100000)
]),
("round_shield_4", 0, "sample_shield_matte", 512, 256, 0, 0, 0, 0,
[
(store_script_param, ":script_param_1", 1),
(set_fixed_point_multiplier, 100),
(init_position, 1),
(position_set_x, 1, -50),
(position_set_y, 1, 125),
(cur_tableau_add_mesh, ":script_param_1", 1, 123, 0),
(init_position, 1),
(position_set_z, 1, 10),
(cur_tableau_add_mesh, "mesh_tableau_mesh_shield_round_4", 1, 0, 0),
(cur_tableau_set_camera_parameters, 0, 200, 100, 0, 100000)
]),
("round_shield_5", 0, "sample_shield_matte", 512, 256, 0, 0, 0, 0,
[
(store_script_param, ":script_param_1", 1),
(set_fixed_point_multiplier, 100),
(init_position, 1),
(position_set_x, 1, -50),
(position_set_y, 1, 125),
(cur_tableau_add_mesh, ":script_param_1", 1, 122, 0),
(init_position, 1),
(position_set_z, 1, 10),
(cur_tableau_add_mesh, "mesh_tableau_mesh_shield_round_5", 1, 0, 0),
(cur_tableau_set_camera_parameters, 0, 200, 100, 0, 100000)
]),
("small_round_shield_1", 0, "sample_shield_small_round_1", 512, 256, 0, 0, 0, 0,
[
(store_script_param, ":script_param_1", 1),
(set_fixed_point_multiplier, 100),
(init_position, 1),
(position_set_x, 1, -50),
(position_set_y, 1, 130),
(cur_tableau_add_mesh, ":script_param_1", 1, 127, 0),
(init_position, 1),
(position_set_z, 1, 10),
(cur_tableau_add_mesh, "mesh_tableau_mesh_shield_small_round_1", 1, 0, 0),
(cur_tableau_set_camera_parameters, 0, 200, 100, 0, 100000)
]),
("small_round_shield_2", 0, "sample_shield_small_round_2", 512, 256, 0, 0, 0, 0,
[
(store_script_param, ":script_param_1", 1),
(set_fixed_point_multiplier, 100),
(init_position, 1),
(position_set_x, 1, -50),
(position_set_y, 1, 130),
(cur_tableau_add_mesh, ":script_param_1", 1, 127, 0),
(init_position, 1),
(position_set_z, 1, 10),
(cur_tableau_add_mesh, "mesh_tableau_mesh_shield_small_round_2", 1, 0, 0),
(cur_tableau_set_camera_parameters, 0, 200, 100, 0, 100000)
]),
("small_round_shield_3", 0, "sample_shield_matte", 512, 256, 0, 0, 0, 0,
[
(store_script_param, ":script_param_1", 1),
(set_fixed_point_multiplier, 100),
(init_position, 1),
(position_set_x, 1, -50),
(position_set_y, 1, 130),
(cur_tableau_add_mesh, ":script_param_1", 1, 127, 0),
(init_position, 1),
(position_set_z, 1, 10),
(cur_tableau_add_mesh, "mesh_tableau_mesh_shield_small_round_3", 1, 0, 0),
(cur_tableau_set_camera_parameters, 0, 200, 100, 0, 100000)
]),
("kite_shield_1", 0, "sample_shield_matte", 512, 512, 0, 0, 0, 0,
[
(store_script_param, ":script_param_1", 1),
(set_fixed_point_multiplier, 100),
(init_position, 1),
(position_set_x, 1, -60),
(position_set_y, 1, 140),
(cur_tableau_add_mesh, ":script_param_1", 1, 116, 0),
(init_position, 1),
(position_set_z, 1, 10),
(cur_tableau_add_mesh, "mesh_tableau_mesh_shield_kite_1", 1, 0, 0),
(cur_tableau_set_camera_parameters, 0, 200, 200, 0, 100000)
]),
("kite_shield_2", 0, "sample_shield_matte", 512, 512, 0, 0, 0, 0,
[
(store_script_param, ":script_param_1", 1),
(set_fixed_point_multiplier, 100),
(init_position, 1),
(position_set_x, 1, -57),
(position_set_y, 1, 140),
(cur_tableau_add_mesh, ":script_param_1", 1, 116, 0),
(init_position, 1),
(position_set_z, 1, 10),
(cur_tableau_add_mesh, "mesh_tableau_mesh_shield_kite_2", 1, 0, 0),
(cur_tableau_set_camera_parameters, 0, 200, 200, 0, 100000)
]),
("kite_shield_3", 0, "sample_shield_matte", 512, 512, 0, 0, 0, 0,
[
(store_script_param, ":script_param_1", 1),
(set_fixed_point_multiplier, 100),
(init_position, 1),
(position_set_x, 1, -57),
(position_set_y, 1, 140),
(cur_tableau_add_mesh, ":script_param_1", 1, 116, 0),
(init_position, 1),
(position_set_z, 1, 10),
(cur_tableau_add_mesh, "mesh_tableau_mesh_shield_kite_3", 1, 0, 0),
(cur_tableau_set_camera_parameters, 0, 200, 200, 0, 100000)
]),
("kite_shield_4", 0, "sample_shield_matte", 512, 512, 0, 0, 0, 0,
[
(store_script_param, ":script_param_1", 1),
(set_fixed_point_multiplier, 100),
(init_position, 1),
(position_set_x, 1, -50),
(position_set_y, 1, 160),
(cur_tableau_add_mesh, ":script_param_1", 1, 120, 0),
(init_position, 1),
(position_set_z, 1, 10),
(cur_tableau_add_mesh, "mesh_tableau_mesh_shield_kite_4", 1, 0, 0),
(cur_tableau_set_camera_parameters, 0, 200, 200, 0, 100000)
]),
("heater_shield_1", 0, "sample_shield_matte", 512, 512, 0, 0, 0, 0,
[
(store_script_param, ":script_param_1", 1),
(set_fixed_point_multiplier, 100),
(init_position, 1),
(position_set_x, 1, -60),
(position_set_y, 1, 151),
(cur_tableau_add_mesh, ":script_param_1", 1, 116, 0),
(init_position, 1),
(position_set_z, 1, 10),
(cur_tableau_add_mesh, "mesh_tableau_mesh_shield_heater_1", 1, 0, 0),
(cur_tableau_set_camera_parameters, 0, 200, 200, 0, 100000)
]),
("heater_shield_2", 0, "sample_shield_matte", 512, 512, 0, 0, 0, 0,
[
(store_script_param, ":script_param_1", 1),
(set_fixed_point_multiplier, 100),
(init_position, 1),
(position_set_x, 1, -50),
(position_set_y, 1, 150),
(cur_tableau_add_mesh, ":script_param_1", 1, 116, 0),
(init_position, 1),
(position_set_z, 1, 10),
(cur_tableau_add_mesh, "mesh_tableau_mesh_shield_heater_2", 1, 0, 0),
(cur_tableau_set_camera_parameters, 0, 200, 200, 0, 100000)
]),
("pavise_shield_1", 0, "sample_shield_matte", 512, 512, 0, 0, 0, 0,
[
(store_script_param, ":script_param_1", 1),
(set_fixed_point_multiplier, 100),
(init_position, 1),
(position_set_x, 1, -54),
(position_set_y, 1, 120),
(cur_tableau_add_mesh, ":script_param_1", 1, 118, 0),
(init_position, 1),
(position_set_z, 1, 10),
(cur_tableau_add_mesh, "mesh_tableau_mesh_shield_pavise_1", 1, 0, 0),
(cur_tableau_set_camera_parameters, 0, 200, 200, 0, 100000)
]),
("pavise_shield_2", 0, "sample_shield_matte", 512, 512, 0, 0, 0, 0,
[
(store_script_param, ":script_param_1", 1),
(set_fixed_point_multiplier, 100),
(init_position, 1),
(position_set_x, 1, -54),
(position_set_y, 1, 120),
(cur_tableau_add_mesh, ":script_param_1", 1, 116, 0),
(init_position, 1),
(position_set_z, 1, 10),
(cur_tableau_add_mesh, "mesh_tableau_mesh_shield_pavise_2", 1, 0, 0),
(cur_tableau_set_camera_parameters, 0, 200, 200, 0, 100000)
]),
("heraldic_armor_a", 0, "sample_heraldic_armor_a", 512, 512, 0, 0, 0, 0,
[
(store_script_param, ":script_param_1", 1),
(set_fixed_point_multiplier, 100),
(store_sub, ":value", ":script_param_1", "mesh_arms_a01"),
(val_add, ":value", 1),
(troop_get_slot, ":banner_background_color_array_value", "trp_banner_background_color_array", ":value"),
(try_begin),
(eq, ":banner_background_color_array_value", 0),
(assign, ":banner_background_color_array_value", 4285690482),
(try_end),
(cur_tableau_set_background_color, ":banner_background_color_array_value"),
(init_position, 1),
(cur_tableau_add_mesh_with_vertex_color, "mesh_heraldic_armor_bg", 1, 200, 100, ":banner_background_color_array_value"),
(init_position, 1),
(position_set_z, 1, 50),
(position_set_x, 1, -25),
(position_set_y, 1, 130),
(cur_tableau_add_mesh, ":script_param_1", 1, 103, 0),
(init_position, 1),
(position_set_z, 1, 100),
(cur_tableau_add_mesh, "mesh_tableau_mesh_heraldic_armor_a", 1, 0, 0),
(cur_tableau_set_camera_parameters, 0, 200, 200, 0, 100000)
]),
("heraldic_armor_b", 0, "sample_heraldic_armor_b", 512, 512, 0, 0, 0, 0,
[
(store_script_param, ":script_param_1", 1),
(set_fixed_point_multiplier, 100),
(store_sub, ":value", ":script_param_1", "mesh_arms_a01"),
(val_add, ":value", 1),
(troop_get_slot, ":banner_background_color_array_value", "trp_banner_background_color_array", ":value"),
(try_begin),
(eq, ":banner_background_color_array_value", 0),
(assign, ":banner_background_color_array_value", 4285690482),
(try_end),
(cur_tableau_set_background_color, ":banner_background_color_array_value"),
(init_position, 1),
(cur_tableau_add_mesh_with_vertex_color, "mesh_heraldic_armor_bg", 1, 200, 100, ":banner_background_color_array_value"),
(init_position, 1),
(position_set_z, 1, 10),
(position_set_x, 1, -5),
(position_set_y, 1, 130),
(cur_tableau_add_mesh, ":script_param_1", 1, 113, 0),
(init_position, 1),
(position_set_z, 1, 100),
(cur_tableau_add_mesh, "mesh_tableau_mesh_heraldic_armor_b", 1, 0, 0),
(cur_tableau_set_camera_parameters, 0, 200, 200, 0, 100000)
]),
("heraldic_armor_c", 0, "sample_heraldic_armor_c", 512, 512, 0, 0, 0, 0,
[
(store_script_param, ":script_param_1", 1),
(set_fixed_point_multiplier, 100),
(store_sub, ":value", ":script_param_1", "mesh_arms_a01"),
(val_add, ":value", 1),
(troop_get_slot, ":banner_background_color_array_value", "trp_banner_background_color_array", ":value"),
(try_begin),
(eq, ":banner_background_color_array_value", 0),
(assign, ":banner_background_color_array_value", 4285690482),
(try_end),
(cur_tableau_set_background_color, ":banner_background_color_array_value"),
(init_position, 1),
(cur_tableau_add_mesh_with_vertex_color, "mesh_heraldic_armor_bg", 1, 200, 100, ":banner_background_color_array_value"),
(init_position, 1),
(position_set_z, 1, 10),
(position_set_x, 1, 0),
(position_set_y, 1, 130),
(cur_tableau_add_mesh, ":script_param_1", 1, 115, 0),
(init_position, 1),
(position_set_z, 1, 100),
(cur_tableau_add_mesh, "mesh_tableau_mesh_heraldic_armor_c", 1, 0, 0),
(cur_tableau_set_camera_parameters, 0, 200, 200, 0, 100000)
]),
("heraldic_armor_d", 0, "sample_heraldic_armor_d", 512, 512, 0, 0, 0, 0,
[
(store_script_param, ":script_param_1", 1),
(set_fixed_point_multiplier, 100),
(store_sub, ":value", ":script_param_1", "mesh_arms_a01"),
(val_add, ":value", 1),
(troop_get_slot, ":banner_background_color_array_value", "trp_banner_background_color_array", ":value"),
(try_begin),
(eq, ":banner_background_color_array_value", 0),
(assign, ":banner_background_color_array_value", 4285690482),
(try_end),
(cur_tableau_set_background_color, ":banner_background_color_array_value"),
(init_position, 1),
(cur_tableau_add_mesh_with_vertex_color, "mesh_heraldic_armor_bg", 1, 200, 100, ":banner_background_color_array_value"),
(init_position, 1),
(position_set_z, 1, 10),
(position_set_x, 1, 0),
(position_set_y, 1, 130),
(cur_tableau_add_mesh, ":script_param_1", 1, 113, 0),
(init_position, 1),
(position_set_z, 1, 100),
(cur_tableau_add_mesh, "mesh_tableau_mesh_heraldic_armor_d", 1, 0, 0),
(cur_tableau_set_camera_parameters, 0, 200, 200, 0, 100000)
]),
("troop_note_alpha_mask", 0, "mat_troop_portrait_mask", 1024, 1024, 0, 0, 400, 400,
[
(store_script_param, ":script_param_1", 1),
(cur_tableau_set_background_color, 8947848),
(cur_tableau_set_ambient_light, 10, 11, 15),
(cur_tableau_render_as_alpha_mask),
(call_script, "script_add_troop_to_cur_tableau", ":script_param_1")
]),
("troop_note_color", 0, "mat_troop_portrait_color", 1024, 1024, 0, 0, 400, 400,
[
(store_script_param, ":script_param_1", 1),
(cur_tableau_set_background_color, 4291214228),
(cur_tableau_set_ambient_light, 10, 11, 15),
(call_script, "script_add_troop_to_cur_tableau", ":script_param_1")
]),
("troop_character_alpha_mask", 0, "mat_troop_portrait_mask", 1024, 1024, 0, 0, 400, 400,
[
(store_script_param, ":script_param_1", 1),
(cur_tableau_set_background_color, 8947848),
(cur_tableau_set_ambient_light, 10, 11, 15),
(cur_tableau_render_as_alpha_mask),
(call_script, "script_add_troop_to_cur_tableau_for_character", ":script_param_1")
]),
("troop_character_color", 0, "mat_troop_portrait_color", 1024, 1024, 0, 0, 400, 400,
[
(store_script_param, ":script_param_1", 1),
(cur_tableau_set_background_color, 4292923313),
(cur_tableau_set_ambient_light, 10, 11, 15),
(call_script, "script_add_troop_to_cur_tableau_for_character", ":script_param_1")
]),
("troop_inventory_alpha_mask", 0, "mat_troop_portrait_mask", 1024, 1024, 0, 0, 400, 400,
[
(store_script_param, ":script_param_1", 1),
(cur_tableau_set_background_color, 8947848),
(cur_tableau_set_ambient_light, 10, 11, 15),
(cur_tableau_render_as_alpha_mask),
(call_script, "script_add_troop_to_cur_tableau_for_inventory", ":script_param_1")
]),
("troop_inventory_color", 0, "mat_troop_portrait_color", 1024, 1024, 0, 0, 400, 400,
[
(store_script_param, ":script_param_1", 1),
(cur_tableau_set_background_color, 4285159482),
(cur_tableau_set_ambient_light, 10, 11, 15),
(call_script, "script_add_troop_to_cur_tableau_for_inventory", ":script_param_1")
]),
("troop_profile_alpha_mask", 0, "mat_troop_portrait_mask", 1024, 1024, 0, 0, 400, 400,
[
(store_script_param, ":script_param_1", 1),
(cur_tableau_set_background_color, 8947848),
(cur_tableau_set_ambient_light, 10, 11, 15),
(cur_tableau_render_as_alpha_mask),
(call_script, "script_add_troop_to_cur_tableau_for_profile", ":script_param_1")
]),
("troop_profile_color", 0, "mat_troop_portrait_color", 1024, 1024, 0, 0, 400, 400,
[
(store_script_param, ":script_param_1", 1),
(cur_tableau_set_background_color, 4294567848),
(cur_tableau_set_ambient_light, 10, 11, 15),
(call_script, "script_add_troop_to_cur_tableau_for_profile", ":script_param_1")
]),
("troop_party_alpha_mask", 0, "mat_troop_portrait_mask", 1024, 1024, 0, 0, 400, 400,
[
(store_script_param, ":script_param_1", 1),
(cur_tableau_set_background_color, 8947848),
(cur_tableau_set_ambient_light, 10, 11, 15),
(cur_tableau_render_as_alpha_mask),
(call_script, "script_add_troop_to_cur_tableau_for_party", ":script_param_1")
]),
("troop_party_color", 0, "mat_troop_portrait_color", 1024, 1024, 0, 0, 400, 400,
[
(store_script_param, ":script_param_1", 1),
(cur_tableau_set_background_color, 4290681970),
(cur_tableau_set_ambient_light, 10, 11, 15),
(call_script, "script_add_troop_to_cur_tableau_for_party", ":script_param_1")
]),
("troop_note_mesh", 0, "tableau_with_transparency", 1024, 1024, 0, 0, 350, 350,
[
(store_script_param, ":script_param_1", 1),
(cur_tableau_set_background_color, 4287137928),
(cur_tableau_set_ambient_light, 10, 11, 15),
(set_fixed_point_multiplier, 100),
(cur_tableau_set_camera_parameters, 0, 40, 40, 0, 100000),
(init_position, 1),
(position_set_z, 1, 100),
(position_set_x, 1, -20),
(position_set_y, 1, -20),
(cur_tableau_add_tableau_mesh, "tableau_troop_note_color", ":script_param_1", 1, 0, 0),
(position_set_z, 1, 200),
(cur_tableau_add_tableau_mesh, "tableau_troop_note_alpha_mask", ":script_param_1", 1, 0, 0),
(position_set_z, 1, 300),
(cur_tableau_add_mesh, "mesh_portrait_blend_out", 1, 0, 0)
]),
("center_note_mesh", 0, "tableau_with_transparency", 1024, 1024, 0, 0, 200, 200,
[
(store_script_param, ":script_param_1", 1),
(set_fixed_point_multiplier, 100),
(cur_tableau_set_background_color, 8947848),
(cur_tableau_set_ambient_light, 10, 11, 15),
(init_position, 8),
(position_set_x, 8, -210),
(position_set_y, 8, 200),
(position_set_z, 8, 300),
(cur_tableau_add_point_light, 8, 550, 500, 450),
(cur_tableau_set_camera_parameters, 1, 10, 10, 10, 10000),
(init_position, 1),
(position_set_z, 1, 0),
(position_set_z, 1, -500),
(init_position, 1),
(position_set_y, 1, -100),
(position_set_x, 1, -100),
(position_set_z, 1, 100),
(position_rotate_z, 1, 200),
(party_get_icon, ":icon_script_param_1", ":script_param_1"),
(try_begin),
(ge, ":icon_script_param_1", 0),
(cur_tableau_add_map_icon, ":icon_script_param_1", 1, 0),
(try_end),
(init_position, 5),
(position_set_x, 5, -90),
(position_set_z, 5, 500),
(position_set_y, 5, 480),
(position_rotate_x, 5, -90),
(position_rotate_z, 5, 180),
(position_rotate_x, 5, -35),
(cur_tableau_set_camera_position, 5)
]),
("faction_note_mesh_for_menu", 0, "pic_arms_swadian", 1024, 512, 0, 0, 450, 225,
[
(store_script_param, ":script_param_1", 1),
(cur_tableau_set_background_color, 4294967295),
(set_fixed_point_multiplier, 100),
(try_begin),
(is_between, ":script_param_1", "fac_kingdom_1", "fac_kingdoms_end"),
(store_add, ":value", "mesh_pic_arms_swadian", ":script_param_1"),
(val_sub, ":value", "fac_kingdom_1"),
(init_position, 1),
(position_set_y, 1, -5),
(position_set_x, 1, -45),
(cur_tableau_add_mesh, ":value", 1, 0, 0),
(cur_tableau_set_camera_parameters, 0, 160, 80, 0, 100000),
(try_end)
]),
("faction_note_mesh", 0, "pic_arms_swadian", 1024, 512, 0, 0, 500, 250,
[
(store_script_param, ":script_param_1", 1),
(cur_tableau_set_background_color, 4294967295),
(set_fixed_point_multiplier, 100),
(try_begin),
(is_between, ":script_param_1", "fac_kingdom_1", "fac_kingdoms_end"),
(store_add, ":value", "mesh_pic_arms_swadian", ":script_param_1"),
(val_sub, ":value", "fac_kingdom_1"),
(init_position, 1),
(position_set_y, 1, -5),
(cur_tableau_add_mesh, ":value", 1, 0, 0),
(cur_tableau_set_camera_parameters, 0, 100, 50, 0, 100000),
(try_end)
]),
("faction_note_mesh_banner", 0, "tableau_with_transparency", 1024, 1024, 0, 0, 200, 200,
[
(store_script_param, ":script_param_1", 1),
(set_fixed_point_multiplier, 100),
(try_begin),
(faction_get_slot, ":script_param_1_1", ":script_param_1", 1),
(ge, ":script_param_1_1", 0),
(neq, ":script_param_1_1", 1),
(troop_get_slot, ":script_param_1_1_1", ":script_param_1_1", 1),
(store_add, ":value", "spr_banner_k21", 1),
(is_between, ":script_param_1_1_1", "spr_banner_a", ":value"),
(val_sub, ":script_param_1_1_1", "spr_banner_a"),
(store_add, ":value_2", ":script_param_1_1_1", "mesh_banner_a01"),
(init_position, 1),
(position_set_y, 1, 100),
(cur_tableau_add_mesh, ":value_2", 1, 0, 0),
(cur_tableau_set_camera_parameters, 0, 210, 210, 0, 100000),
(try_end)
]),
("2_factions_mesh", 0, "tableau_with_transparency", 1024, 1024, 0, 0, 200, 200,
[
(store_script_param, ":script_param_1", 1),
(store_mod, ":value", ":script_param_1", 128),
(val_div, ":script_param_1", 128),
(val_add, ":script_param_1", "fac_kingdom_1"),
(val_add, ":value", "fac_kingdom_1"),
(set_fixed_point_multiplier, 100),
(try_begin),
(faction_get_slot, ":script_param_1_1", ":script_param_1", 1),
(ge, ":script_param_1_1", 0),
(neq, ":script_param_1_1", 1),
(troop_get_slot, ":script_param_1_1_1", ":script_param_1_1", 1),
(store_add, ":value_2", "spr_banner_k21", 1),
(is_between, ":script_param_1_1_1", "spr_banner_a", ":value_2"),
(val_sub, ":script_param_1_1_1", "spr_banner_a"),
(store_add, ":value_3", ":script_param_1_1_1", "mesh_banner_a01"),
(init_position, 1),
(position_set_x, 1, -50),
(position_set_y, 1, 100),
(cur_tableau_add_mesh, ":value_3", 1, 0, 0),
(try_end),
(try_begin),
(faction_get_slot, ":script_param_1_1", ":value", 1),
(ge, ":script_param_1_1", 0),
(neq, ":script_param_1_1", 1),
(troop_get_slot, ":script_param_1_1_1", ":script_param_1_1", 1),
(store_add, ":value_2", "spr_banner_k21", 1),
(is_between, ":script_param_1_1_1", "spr_banner_a", ":value_2"),
(val_sub, ":script_param_1_1_1", "spr_banner_a"),
(store_add, ":value_3", ":script_param_1_1_1", "mesh_banner_a01"),
(init_position, 1),
(position_set_x, 1, 50),
(position_set_y, 1, 100),
(cur_tableau_add_mesh, ":value_3", 1, 0, 0),
(try_end),
(cur_tableau_set_camera_parameters, 0, 210, 210, 0, 100000)
]),
("color_picker", 0, "missiles", 32, 32, 0, 0, 0, 0,
[
(store_script_param, ":script_param_1", 1),
(set_fixed_point_multiplier, 100),
(init_position, 1),
(cur_tableau_add_mesh, "mesh_color_picker", 1, 0, 0),
(position_move_z, 1, 1),
(position_move_x, 1, -2),
(position_move_y, 1, -2),
(cur_tableau_add_mesh_with_vertex_color, "mesh_white_plane", 1, 200, 0, ":script_param_1"),
(cur_tableau_set_camera_parameters, 0, 20, 20, 0, 100000)
]),
("custom_banner_square_no_mesh", 0, "missiles", 512, 512, 0, 0, 300, 300,
[]),
("custom_banner_default", 0, "missiles", 512, 256, 0, 0, 0, 0,
[]),
("custom_banner_tall", 0, "missiles", 512, 256, 0, 0, 0, 0,
[]),
("custom_banner_square", 0, "missiles", 256, 256, 0, 0, 0, 0,
[]),
("custom_banner_short", 0, "missiles", 256, 512, 0, 0, 0, 0,
[]),
("background_selection", 0, "missiles", 512, 512, 0, 0, 100, 100,
[]),
("positioning_selection", 0, "missiles", 512, 512, 0, 0, 100, 100,
[]),
("retired_troop_alpha_mask", 0, "mat_troop_portrait_mask", 2048, 2048, 0, 0, 600, 600,
[
(store_script_param, ":script_param_1", 1),
(cur_tableau_set_background_color, 8947848),
(cur_tableau_set_ambient_light, 10, 11, 15),
(cur_tableau_render_as_alpha_mask),
(call_script, "script_add_troop_to_cur_tableau_for_retirement", ":script_param_1")
]),
("retired_troop_color", 0, "mat_troop_portrait_color", 2048, 2048, 0, 0, 600, 600,
[
(store_script_param, ":script_param_1", 1),
(cur_tableau_set_background_color, 4293383065),
(cur_tableau_set_ambient_light, 10, 11, 15),
(call_script, "script_add_troop_to_cur_tableau_for_retirement", ":script_param_1")
]),
("retirement_troop", 0, "tableau_with_transparency", 2048, 2048, 0, 0, 600, 600,
[
(store_script_param, ":script_param_1", 1),
(cur_tableau_set_background_color, 4287137928),
(cur_tableau_set_ambient_light, 10, 11, 15),
(set_fixed_point_multiplier, 100),
(cur_tableau_set_camera_parameters, 0, 40, 40, 0, 100000),
(init_position, 1),
(position_set_z, 1, 100),
(position_set_x, 1, -20),
(position_set_y, 1, -20),
(cur_tableau_add_tableau_mesh, "tableau_retired_troop_color", ":script_param_1", 1, 0, 0),
(position_set_z, 1, 200),
(cur_tableau_add_tableau_mesh, "tableau_retired_troop_alpha_mask", ":script_param_1", 1, 0, 0)
]),
("flag_itm", 0, "sample_shield_matte", 512, 512, 0, 0, 0, 0,
[
(store_script_param, ":script_param_1", 1),
(set_fixed_point_multiplier, 100),
(init_position, 1),
(position_set_x, 1, -54),
(position_set_y, 1, 120),
(cur_tableau_add_mesh, ":script_param_1", 1, 116, 0),
(init_position, 1),
(position_set_z, 1, 10),
(cur_tableau_add_mesh, "mesh_tableau_mesh_flag", 1, 0, 0),
(cur_tableau_set_camera_parameters, 0, 200, 200, 0, 100000)
]),
("fuck_troop", 0, "tableau_with_transparency", 8192, 8192, 0, 0, 600, 600,
[
(cur_tableau_set_background_color, 16777215),
(cur_tableau_set_ambient_light, 10, 11, 15),
(set_fixed_point_multiplier, 100),
(cur_tableau_set_camera_parameters, 0, 40, 40, 0, 100000),
(cur_tableau_set_background_color, 16777215),
(cur_tableau_set_ambient_light, 10, 11, 15),
(call_script, "script_add_troop_to_cur_tableau_for_fuck")
]),
("early_transitional_heraldic", 0, "sample_early_transitional_heraldic_banner", 1024, 1024, 0, 0, 0, 0,
[
(store_script_param, ":script_param_1", 1),
(set_fixed_point_multiplier, 100),
(store_sub, ":value", ":script_param_1", "mesh_arms_a01"),
(val_add, ":value", 1),
(troop_get_slot, ":banner_background_color_array_value", "trp_banner_background_color_array", ":value"),
(try_begin),
(eq, ":banner_background_color_array_value", 0),
(assign, ":banner_background_color_array_value", 4285690482),
(try_end),
(cur_tableau_set_background_color, ":banner_background_color_array_value"),
(init_position, 1),
(cur_tableau_add_mesh_with_vertex_color, "mesh_heraldic_armor_bg", 1, 200, 100, ":banner_background_color_array_value"),
(init_position, 1),
(position_set_x, 1, 17),
(position_set_y, 1, 150),
(cur_tableau_add_mesh, ":script_param_1", 1, 90, 0),
(init_position, 1),
(position_set_z, 1, 30),
(cur_tableau_add_mesh, "mesh_tableau_mesh_early_transitional_heraldic_banner", 1, 0, 0),
(cur_tableau_set_camera_parameters, 0, 200, 200, 0, 100000)
]),
("samurai_heraldic_flag", 0, "sample_samurai_nobori_heraldic", 1024, 1024, 0, 0, 0, 0,
[
(store_script_param, ":script_param_1", 1),
(set_fixed_point_multiplier, 100),
(store_sub, ":value", ":script_param_1", "mesh_arms_a01"),
(troop_get_slot, ":banner_background_color_array_value", "trp_banner_background_color_array", ":value"),
(cur_tableau_set_background_color, ":banner_background_color_array_value"),
(init_position, 1),
(cur_tableau_add_mesh_with_vertex_color, "mesh_heraldic_armor_bg", 1, 200, 100, ":banner_background_color_array_value"),
(init_position, 1),
(position_set_x, 1, -85),
(position_set_y, 1, -22),
(cur_tableau_add_mesh, ":script_param_1", 1, 43, 0),
(init_position, 1),
(position_set_z, 1, 100),
(cur_tableau_add_mesh, "mesh_tableau_mesh_samurai_heraldic_flag", 1, 0, 0),
(cur_tableau_set_camera_parameters, 0, 200, 200, 0, 100000)
]),
("banner_spear", 0, "sample_samurai_weapons", 1024, 1024, 0, 0, 0, 0,
[
(store_script_param, ":script_param_1", 1),
(set_fixed_point_multiplier, 100),
(store_sub, ":value", ":script_param_1", "mesh_arms_a01"),
(troop_get_slot, ":banner_background_color_array_value", "trp_banner_background_color_array", ":value"),
(cur_tableau_set_background_color, ":banner_background_color_array_value"),
(init_position, 1),
(cur_tableau_add_mesh_with_vertex_color, "mesh_heraldic_armor_bg", 1, 200, 100, ":banner_background_color_array_value"),
(init_position, 1),
(position_set_x, 1, -33),
(position_set_y, 1, -20),
(cur_tableau_add_mesh, ":script_param_1", 1, 43, 0),
(init_position, 1),
(position_set_z, 1, 100),
(cur_tableau_add_mesh, "mesh_tableau_mesh_banner_spear", 1, 0, 0),
(cur_tableau_set_camera_parameters, 0, 200, 200, 0, 100000)
]),
("blank_stack", 0, "tableau_with_transparency", 8192, 8192, 0, 0, 600, 600,
[
(cur_tableau_set_background_color, 16777215),
(cur_tableau_set_ambient_light, 10, 11, 15),
(set_fixed_point_multiplier, 100),
(cur_tableau_set_camera_parameters, 0, 40, 40, 0, 100000),
(cur_tableau_set_background_color, 16777215),
(cur_tableau_set_ambient_light, 10, 11, 15),
(call_script, "script_add_troop_to_cur_tableau_for_fuck")
]),
] |
Subsets and Splits