filename
stringlengths
13
19
text
stringlengths
134
1.04M
the-stack_106_30710
#!/usr/bin/env python3 # ver 0.1 - copy from rdf_itf.py (v0.1) and modify codes on 2/3/2018 import argparse parser = argparse.ArgumentParser( formatter_class=argparse.ArgumentDefaultsHelpFormatter, description='calculation scaling of Ree of single chain') ## args parser.add_argument('-i', '--input', default='traj.trr', nargs='?', help='input trajectory file') parser.add_argument('-s', '--structure', default='topol.tpr', nargs='?', help='.tpr structure file') parser.add_argument('-select', '--select', nargs='?', help='selection of each molecule') parser.add_argument('-nmol', '--nmol', nargs='?', type=int, help='# molecules') parser.add_argument('-cutoff', '--cutoff', default=0.0, nargs='?', type=float, help='cut-off checking distance between atoms in a molecule (d_cutoff < d_neighbor_atoms: stop)') parser.add_argument('-b', '--begin', default=-1, nargs='?', type=int, help='begining frame (-1: last half trajectory)') parser.add_argument('-o', '--output', default='pol.ree.scal', nargs='?', help='output filename for scaling of Ree file') parser.add_argument('args', nargs=argparse.REMAINDER) parser.add_argument('-v', '--version', action='version', version='%(prog)s 0.1') ## read args args = parser.parse_args() ## Check arguments for log print(" input arguments: {0}".format(args)) ## import modules import sys sys.path.append('/home/htjung/Utility/python/') import hjung from hjung import * import MDAnalysis as mda from MDAnalysis.analysis import distances import numpy as np from scipy.spatial.distance import pdist ## timer start_proc, start_prof = hjung.time.init() ## read files u = mda.Universe(args.structure,args.input) n_frames = len(u.trajectory) if args.begin == -1: skip_frames = int(n_frames/2) print(" skip {} frames".format(skip_frames)) else: skip_frames = args.begin if args.begin >= n_frames: raise ValueError("wrong args.begin because of > n_frames") n_frames = n_frames - skip_frames atomtxt = open(args.select).read() #hjung.polymer.check_traj_connectivity(u,str(atomtxt),args.nmol,args.cutoff,'simple') ## data setting select_mol = u.select_atoms(str(atomtxt)) if len(select_mol)%args.nmol != 0: raise ValueError("wrong # molecules, (args.nmol, select_mol) {} {} ".format(args.nmol, len(select_mol))) n_deg = int(len(select_mol)/args.nmol) print("assume all molecules has {} atoms".format(n_deg)) data_ree = np.zeros((args.nmol,n_deg-1)) #data_ree_vec = np.zeros((args.nmol,n_deg,3)) # make a list, the indices which are in the same lag def list_idx_decr(start,end,init_step): #print("in {} {} {}".format(start,end,init_step)) list_i = [] while start < end: if start < 0: print(" something wrong (bug?)!") break list_i.append(int(start)) init_step = init_step - 1 start = start + init_step return list_i # make list set (list of list of indices) where the elements (in fact, list of indices) are grouped by lag # pair_data_size number of pairs between any two points excluding duplicates # which is the same as the length of result array in scipy.spatial.distance.pdist # n_data_points number of datas you used for scipy.spatial.distance.pdist # which is the same as the length of argument in scipy.spatial.distance.pdist def main_list_idx(pair_data_size,n_data_points): # check validity of arguments #print(" main_list_idx:") expect_size = int((n_data_points-1)*n_data_points/2) if int(pair_data_size) != expect_size: raise ValueError(" Your arugments are wrong because {}(input) != {}(expect) based on {} ".format(pair_data_size,expect_size,n_data_points)) list_set = [] i_end = pair_data_size max_lag = n_data_points for i_start in range(n_data_points): #print(" lag: {}".format(i_start)) i_end = i_end - i_start if i_end < i_start: break list_set.append(list_idx_decr(i_start,i_end,max_lag)) return list_set ## read trajectory i_frame = 0 imod = hjung.time.process_init() list_sets = main_list_idx(int((n_deg-1)*n_deg/2),n_deg) #print(list_sets) print(len(list_sets)) for ts in u.trajectory[skip_frames:]: for i_mol in range(args.nmol): pair_dist = pdist(select_mol.positions[i_mol*n_deg:(i_mol+1)*n_deg],metric='euclidean') #print(pair_dist[list_sets[len(list_sets)-1]]) #print(np.mean(pair_dist[list_sets[len(list_sets)-1]])) for i_lag in range(len(list_sets)): data_ree[i_mol,i_lag] = data_ree[i_mol,i_lag] + np.mean(pair_dist[list_sets[i_lag]]) i_frame = i_frame + 1 imod = hjung.time.process_print(i_frame, n_frames, imod) #print(float(n_frames)) norm_data_ree = data_ree/float(n_frames) norm_data_ree = np.transpose(norm_data_ree) # save raw rg data file np.savetxt(args.output, norm_data_ree, header='time-averaged scaling of Ree of {} single chains'.format(args.nmol), fmt='%e', comments='# ') np.save(args.output, norm_data_ree) # save avg file if args.nmol > 1: data_ree_avg = np.column_stack((np.mean(norm_data_ree, axis=1),np.std(norm_data_ree, axis=1))) np.savetxt(args.output+'.avg', data_ree_avg, header='molecule-averaged scaling of Ree of {} chains'.format(args.nmol), fmt='%e', comments='# ') print(" saved average Ree files") ## timer hjung.time.end_print(start_proc, start_prof)
the-stack_106_30713
# Copyright 2013-2020 Lawrence Livermore National Security, LLC and other # Spack Project Developers. See the top-level COPYRIGHT file for details. # # SPDX-License-Identifier: (Apache-2.0 OR MIT) from spack import * class SuiteSparse(Package): """ SuiteSparse is a suite of sparse matrix algorithms """ homepage = 'http://faculty.cse.tamu.edu/davis/suitesparse.html' url = 'https://github.com/DrTimothyAldenDavis/SuiteSparse/archive/v4.5.3.tar.gz' git = 'https://github.com/DrTimothyAldenDavis/SuiteSparse.git' version('5.7.2', sha256='fe3bc7c3bd1efdfa5cffffb5cebf021ff024c83b5daf0ab445429d3d741bd3ad') version('5.7.1', sha256='5ba5add1663d51a1b6fb128b50fe869b497f3096765ff7f8212f0ede044b9557') version('5.6.0', sha256='76d34d9f6dafc592b69af14f58c1dc59e24853dcd7c2e8f4c98ffa223f6a1adb') version('5.5.0', sha256='63c73451734e2bab19d1915796c6776565ea6aea5da4063a9797ecec60da2e3d') version('5.4.0', sha256='d9d62d539410d66550d0b795503a556830831f50087723cb191a030525eda770') version('5.3.0', sha256='d8ef4bee4394d2f07299d4688b83bbd98e9d3a2ebbe1c1632144b6f7095ce165') version('5.2.0', sha256='68c431aef3d9a0b02e97803eb61671c5ecb9d36fd292a807db87067dadb36e53') version('5.1.2', sha256='97dc5fdc7f78ff5018e6a1fcc841e17a9af4e5a35cebd62df6922349bf12959e') version('5.1.0', sha256='0b0e03c63e67b04529bb6248808d2a8c82259d40b30fc5a7599f4b6f7bdd4dc6') version('5.0.0', sha256='2f8694d9978033659f10ceb8bdb19147d3c519a0251b8de84be6ba8824d30517') version('4.5.6', sha256='1c7b7a265a1d6c606095eb8aa3cb8e27821f1b7f5bc04f28df6d62906e02f4e4') version('4.5.5', sha256='80d1d9960a6ec70031fecfe9adfe5b1ccd8001a7420efb50d6fa7326ef14af91') version('4.5.3', sha256='b6965f9198446a502cde48fb0e02236e75fa5700b94c7306fc36599d57b563f4') variant('tbb', default=False, description='Build with Intel TBB') variant('pic', default=True, description='Build position independent code (required to link with shared libraries)') variant('cuda', default=False, description='Build with CUDA') variant('openmp', default=False, description='Build with OpenMP') depends_on('blas') depends_on('lapack') depends_on('m4', type='build', when='@5.0.0:') depends_on('cmake', when='@5.2.0:', type='build') depends_on('[email protected]', when='@4.5.1:') # in @4.5.1. TBB support in SPQR seems to be broken as TBB-related linkng # flags does not seem to be used, which leads to linking errors on Linux. depends_on('tbb', when='@4.5.3:+tbb') depends_on('cuda', when='+cuda') patch('tbb_453.patch', when='@4.5.3:4.5.5+tbb') # This patch removes unsupported flags for pgi compiler patch('pgi.patch', when='%pgi') patch('pgi.patch', when='%nvhpc') # This patch adds '-lm' when linking libgraphblas and when using clang. # Fixes 'libgraphblas.so.2.0.1: undefined reference to `__fpclassify'' patch('graphblas_libm_dep.patch', when='@5.2.0:5.2.99%clang') conflicts('%gcc@:4.8', when='@5.2.0:', msg='gcc version must be at least 4.9 for [email protected]:') def install(self, spec, prefix): # The build system of SuiteSparse is quite old-fashioned. # It's basically a plain Makefile which include an header # (SuiteSparse_config/SuiteSparse_config.mk)with a lot of convoluted # logic in it. Any kind of customization will need to go through # filtering of that file cc_pic_flag = self.compiler.cc_pic_flag if '+pic' in spec else '' f77_pic_flag = self.compiler.f77_pic_flag if '+pic' in spec else '' make_args = [ # By default, the Makefile uses the Intel compilers if # they are found. The AUTOCC flag disables this behavior, # forcing it to use Spack's compiler wrappers. 'AUTOCC=no', # CUDA=no does NOT disable cuda, it only disables internal search # for CUDA_PATH. If in addition the latter is empty, then CUDA is # completely disabled. See # [SuiteSparse/SuiteSparse_config/SuiteSparse_config.mk] for more. 'CUDA=no', 'CUDA_PATH=%s' % (spec['cuda'].prefix if '+cuda' in spec else ''), 'CFOPENMP=%s' % (self.compiler.openmp_flag if '+openmp' in spec else ''), 'CFLAGS=-O3 %s' % cc_pic_flag, # Both FFLAGS and F77FLAGS are used in SuiteSparse makefiles; # FFLAGS is used in CHOLMOD, F77FLAGS is used in AMD and UMFPACK. 'FFLAGS=%s' % f77_pic_flag, 'F77FLAGS=%s' % f77_pic_flag, # use Spack's metis in CHOLMOD/Partition module, # otherwise internal Metis will be compiled 'MY_METIS_LIB=%s' % spec['metis'].libs.ld_flags, 'MY_METIS_INC=%s' % spec['metis'].prefix.include, # Make sure Spack's Blas/Lapack is used. Otherwise System's # Blas/Lapack might be picked up. Need to add -lstdc++, following # with the TCOV path of SparseSuite 4.5.1's Suitesparse_config.mk, # even though this fix is ugly 'BLAS=%s' % (spec['blas'].libs.ld_flags + ( ' -lstdc++' if '@4.5.1' in spec else '')), 'LAPACK=%s' % spec['lapack'].libs.ld_flags, ] # Recent versions require c11 but some demos do not get the c11 from # GraphBLAS/CMakeLists.txt, for example the file # GraphBLAS/Demo/Program/wildtype_demo.c. For many compilers this is # not an issue because c11 or newer is their default. However, for some # compilers (e.g. xlc) the c11 flag is necessary. if spec.satisfies('@5.4:5.7.1') and ('%xl' in spec or '%xl_r' in spec): make_args += ['CFLAGS+=%s' % self.compiler.c11_flag] # 64bit blas in UMFPACK: if (spec.satisfies('^openblas+ilp64') or spec.satisfies('^intel-mkl+ilp64') or spec.satisfies('^intel-parallel-studio+mkl+ilp64')): make_args.append('UMFPACK_CONFIG=-DLONGBLAS="long long"') # SuiteSparse defaults to using '-fno-common -fexceptions' in # CFLAGS, but not all compilers use the same flags for these # optimizations if any([x in spec for x in ('%apple-clang', '%clang', '%gcc', '%intel')]): make_args += ['CFLAGS+=-fno-common -fexceptions'] elif '%pgi' in spec: make_args += ['CFLAGS+=--exceptions'] if spack_f77.endswith('xlf') or spack_f77.endswith('xlf_r'): make_args += ['CFLAGS+=-DBLAS_NO_UNDERSCORE'] # Intel TBB in SuiteSparseQR if 'tbb' in spec: make_args += [ 'SPQR_CONFIG=-DHAVE_TBB', 'TBB=%s' % spec['tbb'].libs.ld_flags, ] if '@5.3:' in spec: # Without CMAKE_LIBRARY_PATH defined, the CMake file in the # Mongoose directory finds libsuitesparseconfig.so in system # directories like /usr/lib. make_args += [ 'CMAKE_OPTIONS=-DCMAKE_INSTALL_PREFIX=%s' % prefix + ' -DCMAKE_LIBRARY_PATH=%s' % prefix.lib] # In those SuiteSparse versions calling "make install" in one go is # not possible, mainly because of GraphBLAS. Thus compile first and # install in a second run. if '@5.4.0:' in self.spec: make('library', *make_args) make_args.append('INSTALL=%s' % prefix) make('install', *make_args) @property def libs(self): """Export the libraries of SuiteSparse. Sample usage: spec['suite-sparse'].libs.ld_flags spec['suite-sparse:klu,btf'].libs.ld_flags """ # Component libraries, ordered by dependency. Any missing components? all_comps = ['klu', 'btf', 'umfpack', 'cholmod', 'colamd', 'amd', 'camd', 'ccolamd', 'cxsparse', 'ldl', 'rbio', 'spqr', 'suitesparseconfig'] query_parameters = self.spec.last_query.extra_parameters comps = all_comps if not query_parameters else query_parameters libs = find_libraries(['lib' + c for c in comps], root=self.prefix.lib, shared=True, recursive=False) if not libs: return None libs += find_system_libraries('librt') return libs
the-stack_106_30715
""" Author of this code work, Tsubasa Kuwabara. c FFRI Security, Inc. 2020 """ import subprocess import shutil import os import json from util import extract_file_recursive CWD_DIR = os.getcwd() def is_die_packingdata_detectable(path, result): label = os.path.basename(os.path.dirname(path)) label = label.replace("WinUpack", "(Win)Upack") label = label.replace("BeRoEXEPacker", "BeRo") label = label.replace("Yoda`s Crpyter", "Yoda's Crypter") if "detects" not in result: return False, [label] detects = result["detects"] protector_list = [] for i in detects: if ( "type" not in i or "string" not in i or ( i["type"] != "protector" and i["type"] != "packer" and i["type"] != "installer" ) ): continue if "Nullsoft Scriptable Install System" in i["string"]: i["string"] += "NSIS" protector_list.append(i["string"]) if len(protector_list) <= 0: return False, [label] detectable_bool = False for protector in protector_list: if label.lower() in protector.lower(): detectable_bool = True break return detectable_bool, [label] def is_die_rcelab_detectable(path, result): label = os.path.basename(os.path.dirname(path)) if "ZProtect 1.4.4.0/UnPackMe2" in path or "ZProtect 1.4.4.0/UnPackMe1" in path: label = "ZProtect" label = label.replace("dot", ".") with open(os.path.join(CWD_DIR, "rce_label_convert.json"), "r") as f: json_data = json.load(f) replace_bool = False for i in json_data: if i in label and "die" in json_data[i]: label = json_data[i]["die"] replace_bool = True break if not replace_bool: new_label = "" for i in range(len(label.split(" ")) - 1): new_label += label.split(" ")[i] + " " if len(label.split(" ")) <= 1: new_label = label + " " label = new_label[:-1] detects = result["detects"] protector_list = [] for i in detects: if ( "type" not in i or "string" not in i or (i["type"] != "protector" and i["type"] != "packer") ): continue protector_list.append(i["string"]) if len(protector_list) <= 0: return False, [label] detectable_bool = False for protector in protector_list: if label.lower() in protector.lower(): detectable_bool = True break return detectable_bool, [label] def is_detectable(path, dataset_name, result): if dataset_name == "PackingData": return is_die_packingdata_detectable(path, result) elif dataset_name == "RCE_Lab": return is_die_rcelab_detectable(path, result) else: return False, [] def scan_file_recursive(path, dataset_name, json_result): for name in os.listdir(path): new_path = os.path.join(path, name) if os.path.isdir(new_path): scan_file_recursive(new_path, dataset_name, json_result) else: if ".exe" in new_path.lower() or ".dll" in new_path.lower(): tmp_path = os.path.join(CWD_DIR, "test.exe") shutil.copy(new_path, tmp_path) result = subprocess.check_output(["./diec.sh", "-j", tmp_path]) os.remove(tmp_path) result = json.loads(result) detectable_bool, label_list = is_detectable( new_path, dataset_name, result ) json_result.append( { "path": os.path.dirname(new_path), "name": os.path.basename(new_path), "scan": result, "detectable": detectable_bool, "labels": label_list, } ) def scan(path, dataset_name): for name in os.listdir(path): new_path = os.path.join(path, name) if not os.path.isdir(new_path): continue if ".git" in name: continue print(new_path) json_result = [] scan_file_recursive(new_path, dataset_name, json_result) with open( os.path.join(CWD_DIR, "result/die/", dataset_name, name + ".json"), "w" ) as f: json.dump(json_result, f, indent=4) print("create json: " + new_path + ".json") def main(): os.chdir("die_lin64_portable_3.00/die_lin64_portable/") path = os.path.join(CWD_DIR, "dataset/PackingData/") scan(path, "PackingData") path = os.path.join(CWD_DIR, "dataset/UnpackMe/") extract_file_recursive(path) scan(path, "RCE_Lab") if __name__ == "__main__": main()
the-stack_106_30717
# # squeeze paddle model generator # import numpy as np from save_model import saveModel import paddle as pdpd import sys data_type = 'float32' def squeeze(name : str, x, axes : list): pdpd.enable_static() with pdpd.static.program_guard(pdpd.static.Program(), pdpd.static.Program()): node_x = pdpd.static.data(name='x', shape=x.shape, dtype = data_type) out = pdpd.fluid.layers.squeeze(node_x, axes=axes, name='squeeze') cpu = pdpd.static.cpu_places(1) exe = pdpd.static.Executor(cpu[0]) # startup program will call initializer to initialize the parameters. exe.run(pdpd.static.default_startup_program()) outs = exe.run( feed={'x': x}, fetch_list=[out]) saveModel(name, exe, feedkeys=['x'], fetchlist=[out], inputs=[x], outputs=[outs[0]], target_dir=sys.argv[1]) return outs[0] def main(): data = np.random.rand(1, 3, 1, 4).astype(data_type) squeeze("squeeze", data, [0, -2]) squeeze("squeeze_null_axes", data, []) if __name__ == "__main__": main()
the-stack_106_30718
import pytest from tests.support.inline import inline # 15.1.3 "Let source be the result returned from the outerHTML IDL attribute # of the document element" def test_source_matches_outer_html(session): session.url = inline("<html><head><title>Cheese</title><body>Peas") expected_source = session.execute_script( "return document.documentElement.outerHTML") assert session.source == expected_source
the-stack_106_30719
import opensim import math import numpy as np import os from .utils.mygym import convert_to_gym import gym class Osim(object): # Initialize simulation model = None state = None state0 = None joints = [] bodies = [] brain = None maxforces = [] curforces = [] def __init__(self, model_path, visualize): self.model = opensim.Model(model_path) self.model.initSystem() self.brain = opensim.PrescribedController() # Enable the visualizer self.model.setUseVisualizer(visualize) self.muscleSet = self.model.getMuscles() self.forceSet = self.model.getForceSet() self.bodySet = self.model.getBodySet() self.jointSet = self.model.getJointSet() self.contactGeometrySet = self.model.getContactGeometrySet() for j in range(self.muscleSet.getSize()): func = opensim.Constant(1.0) self.brain.addActuator(self.muscleSet.get(j)) self.brain.prescribeControlForActuator(j, func) self.maxforces.append(self.muscleSet.get(j).getMaxIsometricForce()) self.curforces.append(1.0) self.model.addController(self.brain) def set_strength(self, strength): self.curforces = strength for i in range(len(self.curforces)): self.muscleSet.get(i).setMaxIsometricForce(self.curforces[i] * self.maxforces[i]) def get_body(self, name): return self.bodySet.get(name) def get_joint(self, name): return self.jointSet.get(name) def get_muscle(self, name): return self.muscleSet.get(name) def get_contact_geometry(self, name): return self.contactGeometrySet.get(name) def get_force(self, name): return self.forceSet.get(name) def initializeState(self): self.state = self.model.initializeState() def revert(self, state): self.state = state class Spec(object): def __init__(self, *args, **kwargs): self.id = 0 self.timestep_limit = 1000 class OsimEnv(gym.Env): stepsize = 0.01 integration_accuracy = 1e-3 timestep_limit = 1000 test = False action_space = None observation_space = None osim_model = None istep = 0 model_path = "" visualize = False ninput = 0 noutput = 0 last_action = None spec = None metadata = { 'render.modes': ['human'], 'video.frames_per_second' : 50 } def __getstate__(self): state = self.__dict__.copy() del state['osim_model'] print ("HERE1") return state def __setstate__(self, newstate): self.__dict__.update(newstate) self.osim_model = Osim(self.model_path, True) self.configure() def angular_dist(self, t,s): x = (t-s) % (2*math.pi) return min(x, 2*math.pi-x) def compute_reward(self): return 0.0 def is_done(self): return False def terminate(self): pass def __init__(self, visualize = True, noutput = None): self.visualize = visualize self.osim_model = Osim(self.model_path, self.visualize) self.noutput = noutput if not noutput: self.noutput = self.osim_model.muscleSet.getSize() if not self.action_space: self.action_space = ( [0.0] * self.noutput, [1.0] * self.noutput ) if not self.observation_space: self.observation_space = ( [-math.pi] * self.ninput, [math.pi] * self.ninput ) self.action_space = convert_to_gym(self.action_space) self.observation_space = convert_to_gym(self.observation_space) self.spec = Spec() self.horizon = self.spec.timestep_limit self.configure() # self.reset() def configure(self): pass super(OsimEnv, self).reset() self.istep = 0 self.osim_model.initializeState() return self.get_observation() def sanitify(self, x): if math.isnan(x): return 0.0 BOUND = 1000.0 if x > BOUND: x = BOUND if x < -BOUND: x = -BOUND return x def activate_muscles(self, action): if np.any(np.isnan(action)): raise ValueError("NaN passed in the activation vector. Values in [0,1] interval are required.") action = np.clip(action, 0.0, 1.0) self.last_action = action brain = opensim.PrescribedController.safeDownCast(self.osim_model.model.getControllerSet().get(0)) functionSet = brain.get_ControlFunctions() for j in range(functionSet.getSize()): func = opensim.Constant.safeDownCast(functionSet.get(j)) func.setValue( float(action[j]) ) def step(self, action): self.activate_muscles(action) # Integrate one step if self.istep == 0: print ("Initializing the model!") self.manager = opensim.Manager(self.osim_model.model) self.osim_model.state.setTime(self.stepsize * self.istep) self.manager.initialize(self.osim_model.state) try: self.osim_model.state = self.manager.integrate(self.stepsize * (self.istep + 1)) except Exception as e: print (e) return self.get_observation(), -500, True, {} self.istep = self.istep + 1 res = [ self.get_observation(), self.compute_reward(), self.is_done(), {} ] return res def render(self, mode='human', close=False): pass
the-stack_106_30720
import torch from torch import nn class FeedForward(nn.Module): def __init__(self, dim, hidden_dim, dropout = 0.): super().__init__() self.net = nn.Sequential( nn.Linear(dim, hidden_dim), nn.GELU(), nn.Dropout(dropout), nn.Linear(hidden_dim, dim), nn.Dropout(dropout) ) def forward(self, x): return self.net(x) class PreNorm(nn.Module): def __init__(self, dim, fn): super().__init__() self.norm = nn.LayerNorm(dim) self.fn = fn def forward(self, x, **kwargs): return self.fn(self.norm(x), **kwargs) class FNetBlock(nn.Module): def __init__(self): super().__init__() def forward(self, x): x = torch.fft.fft(torch.fft.fft(x, dim=-1), dim=-2).real return x class FNet(nn.Module): def __init__(self, dim, depth, mlp_dim, dropout = 0.): super().__init__() self.layers = nn.ModuleList([]) for _ in range(depth): self.layers.append(nn.ModuleList([ PreNorm(dim, FNetBlock()), PreNorm(dim, FeedForward(dim, mlp_dim, dropout = dropout)) ])) def forward(self, x): for attn, ff in self.layers: x = attn(x) + x x = ff(x) + x return x
the-stack_106_30721
import torch import torch.nn as nn import torch.nn.parallel import torch.backends.cudnn as cudnn import torch.optim as optim from torch.autograd import Variable import numpy as np def variable_hook(grad): return grad def one_hot(y,depth,cuda=True): if not cuda: y_onehot = torch.FloatTensor(y.size(0),depth) else: y_onehot = torch.cuda.FloatTensor(y.size(0),depth) y_onehot.zero_() y_onehot.scatter_(1, y.data.unsqueeze(1), 1) return Variable(y_onehot) def sender_action(sender, images_vectors, opt): sender_probs, s_emb = sender(images_vectors) sender_probs = sender_probs + sender.eps sample = torch.multinomial(sender_probs, 1) sample = sample.squeeze(-1) if not opt.multi_layer: one_hot_signal = one_hot(sample, sender.vocab_size,cuda=opt.cuda) else: one_hot_signal = one_hot(sample, sender.given_vocab_size,cuda=opt.cuda) one_hot_signal = Variable(one_hot_signal.data, requires_grad = True) return one_hot_signal, sender_probs, s_emb def receiver_action(receiver, images_vectors, one_hot_signal, opt): receiver_probs, r_emb = receiver(images_vectors, one_hot_signal) receiver_probs = receiver_probs + receiver.eps sample = torch.multinomial(receiver_probs, 1) sample = sample.squeeze(-1) one_hot_output = one_hot(sample, receiver.game_size, cuda=opt.cuda) one_hot_output = Variable(one_hot_output.data, requires_grad = True) return one_hot_output, receiver_probs, r_emb class Communication(torch.nn.Module): def __init__(self): super(Communication, self).__init__() def forward(self, y, predictions): _, amax = predictions.max(dim=1) _, amax_gt = y.max(dim=1) rewards = (amax == amax_gt).float() return rewards
the-stack_106_30728
import os import tempfile import pytest from sqli.vuln_app import create_app @pytest.fixture def vulnerable_app(): db_fd, db_path = tempfile.mkstemp() app = create_app({"VULNERABLE": True, "DATABASE": db_path}) yield app os.close(db_fd) os.unlink(db_path) @pytest.fixture() def patched_app(): db_fd, db_path = tempfile.mkstemp() app = create_app({"VULNERABLE": False, "DATABASE": db_path}) yield app os.close(db_fd) os.unlink(db_path) @pytest.fixture def vulnerable_client(vulnerable_app): return vulnerable_app.test_client() @pytest.fixture def patched_client(patched_app): return patched_app.test_client()
the-stack_106_30735
# Copyright 2013-2020 Lawrence Livermore National Security, LLC and other # Spack Project Developers. See the top-level COPYRIGHT file for details. # # SPDX-License-Identifier: (Apache-2.0 OR MIT) from spack import * class Snphylo(Package): """A pipeline to generate a phylogenetic tree from huge SNP data""" homepage = "http://chibba.pgml.uga.edu/snphylo/" url = "http://chibba.pgml.uga.edu/snphylo/snphylo.tar.gz" version('2016-02-04', sha256='d9e144021c83dbef97bebf743b92109ad0afcfe70f37c244059b43f11b8a50da') depends_on('python', type=('build', 'run')) depends_on('r', type=('build', 'run')) depends_on('r-phangorn', type=('build', 'run')) depends_on('r-gdsfmt', type=('build', 'run')) depends_on('r-snprelate', type=('build', 'run')) depends_on('r-getopt', type=('build', 'run')) depends_on('muscle') depends_on('phylip') def install(self, spec, prefix): install_answer = ['y', 'y', 'y', 'y'] install_answer_input = 'spack-config.in' with open(install_answer_input, 'w') as f: f.writelines(install_answer) with open(install_answer_input, 'r') as f: bash = which('bash') bash('./setup.sh', input=f) install_tree('.', prefix) def setup_run_environment(self, env): env.prepend_path('PATH', self.spec.prefix)
the-stack_106_30737
import sys, argparse, logging import pandas as pd import numpy as np import matplotlib.colors import matplotlib.pyplot as plt from adjustText import adjust_text color_map_green_yellow_red = matplotlib.colors.LinearSegmentedColormap.from_list("", ["green","yellow","red"]) def gradient_image(ax, extent, direction=0.3, cmap=color_map_green_yellow_red, cmap_range=(0, 1), interpolation='bicubic', **kwargs): """ Draw a gradient image based on a colormap. Parameters ---------- ax : Axes The axes to draw on. extent The extent of the image as (xmin, xmax, ymin, ymax). By default, this is in Axes coordinates but may be changed using the *transform* kwarg. direction : float The direction of the gradient. This is a number in range 0 (=vertical) to 1 (=horizontal). cmap_range : float, float The fraction (cmin, cmax) of the colormap that should be used for the gradient, where the complete colormap is (0, 1). **kwargs Other parameters are passed on to `.Axes.imshow()`. In particular useful is *cmap*. """ phi = direction * np.pi / 2 v = np.array([np.cos(phi), np.sin(phi)]) X = np.array([[v @ [1, 0], v @ [1, 1]], [v @ [0, 0], v @ [0, 1]]]) a, b = cmap_range X = a + (b - a) / X.max() * X im = ax.imshow(X, extent=extent, interpolation=interpolation, vmin=0, vmax=1.0, cmap=cmap, **kwargs) return im def scatter_and_annotate_risk_data_points(): #https://stackoverflow.com/questions/14432557/matplotlib-scatter-plot-with-different-text-at-each-data-point #https://stackoverflow.com/questions/19073683/matplotlib-overlapping-annotations-text return im def decorate_axes(ax, graph_font): ax.set_xlabel('Impact', **graph_font) im = ax.set_ylabel('Likelihood', **graph_font) ax.tick_params(axis='both', labelsize=8) return im def decorate_figure(ax, graph_font): im = ax.set_title('Risk Matrix Graph', **graph_font) return im def calculate_simple_risk(df): # the risk is impact * likelihood^T return df['impact']*df['likelihood'] def main(csv_filename, graph_filename): # load the data files logging.debug("Parcing CSV file: {}".format(csv_filename)) df = pd.read_csv(csv_filename) print(df.to_string()) # calculate risk r = calculate_simple_risk(df) # generate the graph xmin, xmax = xlim = -1, 6 ymin, ymax = ylim = -1, 6 min_impact, max_impact = impact_lim = 0, 5 min_likelihood, max_likelihood = likelihood_lim = 0, 5 fig, ax = plt.subplots() im = gradient_image(ax, direction=0.5, extent=(min_impact, xmax, min_likelihood, ymax), cmap_range=(0.00, 1.16), interpolation='bicubic') ax.set(xlim=(min_impact, xmax),ylim=(min_likelihood, ymax), autoscale_on=False) ax.set_aspect('equal') ax.scatter(x=df['impact'], y=df['likelihood'], s=r*2, c='black',alpha=0.2) graph_font = {'fontname':'Helvetica', 'fontsize': 8} # annotate texts = [] for index,id in enumerate(df['id']): texts.append( ax.annotate(str(df['id'][index]), (df['impact'][index], df['likelihood'][index]), bbox=dict(boxstyle="round,pad=0.1", fc='w', alpha=0.4, ec="black", lw=0.5), **graph_font ) ) # adjust text adjust_text(texts, ax=ax, expand_text=(1.5, 1.3), expand_points=(1.3, 1.3), expand_objects=(1.3, 1.3), force_text=(1.55, 1.35), force_points=(1.15, 1.3), force_objects=(1.3, 1.3), only_move=dict(points='xy', text='xy', object='xy'), va='center', ha='left', precision=0.1, arrowprops=dict(arrowstyle="-", color='black', lw=0.5), lim=2000) decorate_axes(ax, graph_font) decorate_figure(ax, graph_font) plt.savefig(graph_filename) if __name__=="__main__": parser = argparse.ArgumentParser( description = "Takes a csv risk matrix with a header and generates a risk matrix graph", epilog = "", fromfile_prefix_chars = '@' ) parser.add_argument( "csv_filename", help = "Path to the .csv file containing the risk matrix", metavar = "CSV_FILENAME") parser.add_argument( "graph_filename", help = "Path to the .png output file", metavar = "PNG_FILENAME") parser.add_argument( "-v", "--verbose", help="increase output verbosity", action="store_true") args = parser.parse_args() # Setup logging loglevel = logging.INFO if args.verbose: loglevel = logging.DEBUG logging.basicConfig(format="%(levelname)s: %(message)s", level=loglevel) main(args.csv_filename, args.graph_filename)
the-stack_106_30738
import re def input_error(func): def inner(adress_book, com): my_error_1 = "Missing name in database!" my_error_2 = "Wrong phone-number (must be in format XXX-XXX-XX-XX)!" my_error_3 = "You maked the fail by inputing the command!" my_error_4 = "You maked the fail by inputing the number of arguments!" my_error_5 = "You wrote wrong the compound commmnd 'good bye'!" my_error_6 = "You wrote wrong the compound command 'show all'!" try: res = func(adress_book, com) except KeyError: return my_error_1 except ValueError: return my_error_2 except UnboundLocalError: return my_error_3 except IndexError: return my_error_4 except TypeError: return my_error_5 except NameError: return my_error_6 else: return res return inner def hello(): return "How can I help you?" def add(adress_book, name, phone): if re.match(r"\d{3}-\d{3}-\d{2}-\d{2}", phone): adress_book[name] = phone return "added " + name + " " + phone else: raise ValueError def change(adress_book, name, phone): if re.match(r"\d{3}-\d{3}-\d{2}-\d{2}", phone): adress_book.pop(name) adress_book[name] = phone return "chahged " + name + " " + phone else: raise ValueError def phone(adress_book, name): phone = adress_book.pop(name) adress_book[name] = phone return phone def showall(adress_book): return str(adress_book) def ausgang(): return "Good bye!" @input_error def parser(adress_book, com): commands = {"hello": hello, "add": add, "change": change, "phone": phone, "show all": showall, "exit": ausgang, "close": ausgang, "good bye": ausgang} if com[0] == "exit" or com[0] == "close": result = handler(commands[com[0]]) elif com[0] == "good" and len(com) > 1: if com[1] == "bye": result = handler(commands[com[0] + " " + com[1]]) else: raise TypeError elif com[0] == "hello": result = handler(commands[com[0]]) elif com[0] == "add": result = handler(commands[com[0]], adress_book, com[1], com[2]) elif com[0] == "change": result = handler(commands[com[0]], adress_book, com[1], com[2]) elif com[0] == "phone": result = handler(commands[com[0]], adress_book, com[1]) elif com[0] == "show" and len(com) > 1: if com[1] == "all": result = handler(commands[com[0] + " " + com[1]], adress_book) else: raise NameError return result def handler(func, *arg): return func(*arg) def main(): adress_book = dict() result = "" print("{:>20}{:<300}".format("Your assistent: ", "Hello")) while result != "Good bye!": command = input("{:>20}".format("User: ")) com = command.lower().split(" ") result = parser(adress_book, com) print("{:>20}{:<300}".format("Your assistent: ", result)) if __name__ == "__main__": main()
the-stack_106_30740
""" MIT License Copyright (c) 2019 Soham Pal, Yash Gupta, Aditya Shukla, Aditya Kanade, Shirish Shevade, Vinod Ganapathy. Indian Institute of Science. Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. """ import tensorflow as tf import numpy as np def pairwise_distances(A, B): na = tf.reduce_sum(tf.square(A), 1) nb = tf.reduce_sum(tf.square(B), 1) na = tf.reshape(na, [-1, 1]) nb = tf.reshape(nb, [1, -1]) D = tf.sqrt(tf.maximum(na - 2*tf.matmul(A, B, False, True) + nb, 0.0)) return D class KCenter(object): def __init__(self): self.A = tf.placeholder(tf.float32, shape=[None,None]) self.B = tf.placeholder(tf.float32, shape=[None, None]) D = pairwise_distances(self.A, self.B) D_min = tf.reduce_min(D, axis=1) self.D_min_max = tf.reduce_max(D_min) self.D_min_argmax = tf.argmax(D_min)
the-stack_106_30742
import pytest # noqa: F401 import pyomo.core as po from calliope.test.common.util import build_test_model as build_model from calliope.backend.pyomo.util import get_domain, get_param, invalid @pytest.fixture(scope="class") def model(): return build_model({}, "simple_supply,two_hours,investment_costs") class TestGetParam: def test_get_param_with_timestep_existing(self): """ """ m = build_model({}, "simple_supply,two_hours,investment_costs") m.run() param = get_param( m._backend_model, "resource", ("b", "test_demand_elec", m._backend_model.timesteps[1]), ) assert po.value(param) == -5 # see demand_elec.csv def test_get_param_no_timestep_existing(self): """ """ m = build_model({}, "simple_supply,two_hours,investment_costs") m.run() param = get_param( m._backend_model, "energy_eff", ("b", "test_supply_elec", m._backend_model.timesteps[1]), ) assert po.value(param) == 0.9 # see test model.yaml def test_get_param_no_timestep_possible(self): """ """ m = build_model({}, "simple_supply,two_hours,investment_costs") m.run() param = get_param(m._backend_model, "energy_cap_max", ("b", "test_supply_elec")) assert po.value(param) == 10 # see test model.yaml param = get_param( m._backend_model, "cost_energy_cap", ("monetary", "a", "test_supply_elec") ) assert po.value(param) == 10 def test_get_param_from_default(self): """ """ m = build_model({}, "simple_supply_and_supply_plus,two_hours,investment_costs") m.run() param = get_param( m._backend_model, "parasitic_eff", ("b", "test_supply_plus", m._backend_model.timesteps[1]), ) assert po.value(param) == 1 # see defaults.yaml param = get_param( m._backend_model, "resource_cap_min", ("a", "test_supply_plus") ) assert po.value(param) == 0 # see defaults.yaml param = get_param( m._backend_model, "cost_resource_cap", ("monetary", "b", "test_supply_plus") ) assert po.value(param) == 0 # see defaults.yaml def test_get_param_no_default_defined(self): """ If a default is not defined, raise KeyError """ m = build_model({}, "simple_supply,two_hours,investment_costs") m.run() with pytest.raises(KeyError): get_param( m._backend_model, "random_param", ("b", "test_demand_elec", m._backend_model.timesteps[1]), ) get_param(m._backend_model, "random_param", ("b", "test_supply_elec")) class TestGetDomain: @pytest.mark.parametrize( "var, domain", ( ("energy_cap_max", "NonNegativeReals"), ("resource", "Reals"), ("cost_energy_cap", "Reals"), ("force_resource", "NonNegativeReals"), ("name", "Any"), ), ) def test_dtypes(self, model, var, domain): assert get_domain(model._model_data[var]) == domain class TestInvalid: def test_invalid(self): pyomo_model = po.ConcreteModel() pyomo_model.new_set = po.Set(initialize=["a", "b"]) pyomo_model.new_param = po.Param( pyomo_model.new_set, initialize={"a": 1}, mutable=True, within=po.NonNegativeReals, ) assert invalid(pyomo_model.new_param["a"]) is False assert invalid(pyomo_model.new_param["b"]) is True
the-stack_106_30744
# Copyright ยฉ 2019 Province of British Columbia # # Licensed under the Apache License, Version 2.0 (the 'License'); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an 'AS IS' BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Service class to control all the operations related to Payment.""" from datetime import datetime from typing import Any, Dict, Tuple from flask import current_app from pay_api.exceptions import BusinessException from pay_api.factory.payment_system_factory import PaymentSystemFactory from pay_api.utils.enums import Status from pay_api.utils.errors import Error from .base_payment_system import PaymentSystemService from .fee_schedule import FeeSchedule from .invoice import Invoice from .payment import Payment from .payment_account import PaymentAccount from .payment_line_item import PaymentLineItem from .payment_transaction import PaymentTransaction class PaymentService: # pylint: disable=too-few-public-methods """Service to manage Payment related operations.""" @classmethod def create_payment(cls, payment_request: Tuple[Dict[str, Any]], current_user: str = None): # pylint: disable=too-many-locals, too-many-statements """Create payment related records. Does the following; 1. Calculate the fees based on the filing types received. 2. Check if the payment account exists, 2.1 If yes, use the one from database. 2.2 Else create one in payment system and update database. 3. Create payment record in database and flush. 4. Create invoice record in database and flush. 5. Create payment line items in database and flush. 6. Create invoice in payment system; 6.1 If successful update the invoice table with references from payment system. 6.1.1 If failed adjust the invoice to zero and roll back the transaction. 6.2 If fails rollback the transaction """ current_app.logger.debug('<create_payment') payment_info = payment_request.get('paymentInfo') business_info = payment_request.get('businessInfo') contact_info = business_info.get('contactInfo') filing_info = payment_request.get('filingInfo') current_app.logger.debug('Creating PaymentSystemService impl') pay_service: PaymentSystemService = PaymentSystemFactory.create( payment_info.get('methodOfPayment', None), business_info.get('corpType', None) ) current_app.logger.debug('Calculate the fees') # Calculate the fees fees = [] for filing_type_info in filing_info.get('filingTypes'): current_app.logger.debug('Getting fees for {} '.format(filing_type_info.get('filingTypeCode'))) fee: FeeSchedule = FeeSchedule.find_by_corp_type_and_filing_type( corp_type=business_info.get('corpType', None), filing_type_code=filing_type_info.get('filingTypeCode', None), valid_date=filing_info.get('date', None), jurisdiction=None, priority=filing_info.get('priority', None), ) if filing_type_info.get('filingDescription'): fee.description = filing_type_info.get('filingDescription') fees.append(fee) current_app.logger.debug('Check if payment account exists') payment_account: PaymentAccount = PaymentAccount.find_account( business_info.get('businessIdentifier', None), business_info.get('corpType', None), pay_service.get_payment_system_code(), ) if not payment_account.id: current_app.logger.debug('No payment account, creating new') party_number, account_number, site_number = pay_service.create_account( business_info.get('businessName'), contact_info ) payment_account = PaymentAccount.create( business_info, (account_number, party_number, site_number), pay_service.get_payment_system_code() ) current_app.logger.debug('Creating payment record for account : {}'.format(payment_account.id)) payment: Payment = None pay_system_invoice: Dict[str, any] = None try: payment: Payment = Payment.create(payment_info, current_user, pay_service.get_payment_system_code()) current_app.logger.debug(payment) current_app.logger.debug('Creating Invoice record for payment {}'.format(payment.id)) invoice = Invoice.create(payment_account, payment.id, fees, current_user) line_items = [] for fee in fees: current_app.logger.debug('Creating line items') line_items.append(PaymentLineItem.create(invoice.id, fee)) current_app.logger.debug('Handing off to payment system to create invoice') pay_system_invoice = pay_service.create_invoice(payment_account, line_items, invoice.id) current_app.logger.debug('Updating invoice record') invoice = Invoice.find_by_id(invoice.id, skip_auth_check=True) invoice.invoice_status_code = Status.CREATED.value invoice.reference_number = pay_system_invoice.get('reference_number', None) invoice.invoice_number = pay_system_invoice.get('invoice_number', None) invoice.save() payment.commit() payment = Payment.find_by_id(payment.id, skip_auth_check=True) except Exception as e: current_app.logger.error('Rolling back as error occured!') current_app.logger.error(e) if payment: payment.rollback() if pay_system_invoice: pay_service.cancel_invoice( (payment_account.party_number, payment_account.account_number, payment_account.site_number), pay_system_invoice.get('invoice_number'), ) raise current_app.logger.debug('>create_payment') return payment.asdict() @classmethod def get_payment(cls, payment_id, jwt): """Get payment related records.""" try: payment: Payment = Payment.find_by_id(payment_id, jwt=jwt) if not payment.id: raise BusinessException(Error.PAY005) return payment.asdict() except Exception as e: current_app.logger.debug('Error on get payment {}', e) raise @classmethod def update_payment(cls, payment_id: int, payment_request: Tuple[Dict[str, Any]], current_user: str = None): # pylint: disable=too-many-locals,too-many-statements """Update payment related records. Does the following; 1. Calculate the fees based on the filing types received. 2. Check if the payment account exists, 3.1 If yes, use the one from database. 3.2 Else create one in payment system and update database. 3. Check PayBC invoice status 1.1 If payment completed, do not update the payment, 1.2 Else continue the process. 4. Get invoice record in database. 5. Invalidate old payment line items and create new payment line items in database and flush. 6. Update invoice in payment system; 6.1 If successful update the invoice table with references from payment system. 6.1.1 If failed adjust the invoice to zero and roll back the transaction. 6.2 If fails rollback the transaction 7. Update payment record in database and flush. """ current_app.logger.debug('<update_payment') payment_info = payment_request.get('paymentInfo') business_info = payment_request.get('businessInfo') filing_info = payment_request.get('filingInfo') current_app.logger.debug('Creating PaymentSystemService impl') pay_service: PaymentSystemService = PaymentSystemFactory.create( payment_info.get('methodOfPayment', None), business_info.get('corpType', None) ) current_app.logger.debug('Calculate the fees') # Calculate the fees fees = [] for filing_type_info in filing_info.get('filingTypes'): current_app.logger.debug('Getting fees for {} '.format(filing_type_info.get('filingTypeCode'))) fee: FeeSchedule = FeeSchedule.find_by_corp_type_and_filing_type( corp_type=business_info.get('corpType', None), filing_type_code=filing_type_info.get('filingTypeCode', None), valid_date=filing_info.get('date', None), jurisdiction=None, priority=filing_info.get('priority', None), ) if filing_type_info.get('filingDescription'): fee.description = filing_type_info.get('filingDescription') fees.append(fee) current_app.logger.debug('Check if payment account exists') payment: Payment = None try: # get existing payment transaction transaction: PaymentTransaction = PaymentTransaction.find_active_by_payment_id(payment_id) current_app.logger.debug(transaction) if transaction: # check existing payment status in PayBC; PaymentTransaction.update_transaction(payment_id, transaction.id, None, skip_auth_check=True) # update transaction function will update the status from PayBC payment: Payment = Payment.find_by_id(payment_id, skip_auth_check=True) current_app.logger.debug(payment) if payment.payment_status_code == Status.COMPLETED.value: raise BusinessException(Error.PAY010) if payment.payment_status_code == Status.CANCELLED.value: raise BusinessException(Error.PAY011) current_app.logger.debug('Updating Invoice record for payment {}'.format(payment.id)) invoices = payment.invoices for invoice in invoices: if invoice.invoice_status_code in (Status.DRAFT.value, Status.CREATED.value, Status.PARTIAL.value): payment_line_items = invoice.payment_line_items # Invalidate active payment line items for payment_line_item in payment_line_items: if payment_line_item.line_item_status_code != Status.CANCELLED.value: payment_line_item.line_item_status_code = Status.CANCELLED.value payment_line_item.save() # add new payment line item(s) line_items = [] for fee in fees: current_app.logger.debug('Creating line items') line_items.append(PaymentLineItem.create(invoice.id, fee)) current_app.logger.debug('Handing off to payment system to update invoice') payment_account: PaymentAccount = PaymentAccount.find_by_id(invoice.account_id) # update invoice pay_service.update_invoice( (payment_account.party_number, payment_account.account_number, payment_account.site_number), invoice.invoice_number, ) current_app.logger.debug('Updating invoice record') invoice = Invoice.find_by_id(invoice.id, skip_auth_check=True) invoice.updated_on = datetime.now() invoice.updated_by = current_user invoice.total = sum(fee.total for fee in fees) invoice.save() payment.updated_on = datetime.now() payment.updated_by = current_user payment.save() payment.commit() # return payment with updated contents payment = Payment.find_by_id(payment.id, skip_auth_check=True) except Exception as e: current_app.logger.error('Rolling back as error occurred!') current_app.logger.error(e) if payment: payment.rollback() raise current_app.logger.debug('>update_payment') return payment.asdict()
the-stack_106_30747
""" Replaces values in multiple dataframes based on Pandas `replace` method. """ from tasrif.processing_pipeline import PandasOperator from tasrif.processing_pipeline.validators import InputsAreDataFramesValidatorMixin class ReplaceOperator(InputsAreDataFramesValidatorMixin, PandasOperator): """Replaces a value by another on the datasets based on Pandas `replace` method. Examples -------- >>> df = pd.DataFrame({'id': [1, 2, 3], 'colors': ['red', 'white', 'blue'], "importance": [1, 3, 2]}) >>> df = ReplaceOperator(to_replace="red", value="green").process(df)[0] >>> df id colors importance 1 green 1 3 blue 2 2 white 3 """ def __init__(self, **kwargs): """Replaces values in the datasets using the Pandas function `replace`. Args: **kwargs: key word arguments passed to pandas `DataFrame.replace` method """ super().__init__(kwargs) self.kwargs = kwargs def _process(self, *data_frames): """Replaces values in multiple datasets using the Pandas function `replace`. Args: data_frames (list of pd.DataFrame): Variable number of pandas dataframes to be processed Returns: data_frames (list of pd.DataFrame): Resulting dataframes after applying the replace function. """ # Gets one single processed = [] for data_frame in data_frames: data_frame = data_frame.replace(**self.kwargs) processed.append(data_frame) return processed
the-stack_106_30748
# # Licensed to the Apache Software Foundation (ASF) under one or more # contributor license agreements. See the NOTICE file distributed with # this work for additional information regarding copyright ownership. # The ASF licenses this file to You under the Apache License, Version 2.0 # (the "License"); you may not use this file except in compliance with # the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # import requests if __name__ == '__main__': from flask import Flask, jsonify app = Flask(__name__) @app.route('/users', methods=['POST', 'GET']) def application(): requests.get('http://provider:9091/insert_many', timeout=5) requests.get('http://provider:9091/find_one', timeout=5) res = requests.get('http://provider:9091/delete_one', timeout=5) return jsonify(res.json()) PORT = 9090 app.run(host='0.0.0.0', port=PORT, debug=True)
the-stack_106_30749
#!/usr/bin/python3 import os,sys,time,pathlib import filelist as fl from datetime import datetime from options import sizeonly,ignoretimes,update, ignore_existing from message import send,receive,verbose from sender import pickling, cleaner def get_lastname(p): return os.path.normpath(os.path.basename(p)) def get_common(p): return pathlib.Path(*(pathlib.Path(p)).parts[1:]) def search_in_list(e, L): for x in L: if get_lastname(e) == get_lastname(x[0]): #print(get_common(e), get_common(x[0])) return True return False def compare(SRC_LIST,DEST_LIST): GEN_LIST = [] try: for S in SRC_LIST: for D in DEST_LIST: #print(f"comparing {get_lastname(S[0])} to {get_lastname(D[0])}") if os.path.isdir(S[0]) and S not in GEN_LIST: GEN_LIST.append(S) if get_common(cleaner(S[0])) == get_common(cleaner(D[0])): if str(get_lastname(S[0])) == str(get_lastname(D[0])): if os.path.isfile(S[0]): if ignore_existing(): if verbose() > 1: print(f"gen ~ skipping {get_lastname(D[0])} (existing)") elif sizeonly(): if S[1] == D[1]: if verbose() > 1: print(f"gen ~ skipping {get_lastname(D[0])} (size-only)") elif update(): date_source = datetime.strptime(S[2], "%Y-%m-%d %H:%M:%S") date_dest = datetime.strptime(D[2], "%Y-%m-%d %H:%M:%S") if search_in_list(S[0], DEST_LIST) and date_dest > date_source: print(f"gen ~ skipping {get_lastname(S[0])} (update)") elif not(search_in_list(S[0],GEN_LIST)) and not(search_in_list(S[0],DEST_LIST)): GEN_LIST.append(S) except: print("gen ~ error while comparing") return GEN_LIST def main(L1,L2,rfd,wfd): try: S_DIRS = L1[0] S_FILES = L1[1] S_LINKS = L1[2] D_DIRS = L2[0] D_FILES = L2[1] D_LINKS = L2[2] GEN_DIRS = compare(S_DIRS,D_DIRS) if len(D_FILES) == 0: GEN_FILES = S_FILES else: GEN_FILES = compare(S_FILES,D_FILES) GEN_LINKS = compare(S_LINKS,D_LINKS) GEN_LIST = [GEN_DIRS,GEN_FILES,GEN_LINKS] content = pickling(GEN_LIST) while content: sent = send('rqst',wfd,content) if verbose() >1: print(f"gen ~ {sent} bytes sent") content = content[sent:] text = pickling("Generator closed") send('genQ', wfd, text) except Exception as e: send('erro',wfd,pickling(e))
the-stack_106_30751
'''https://practice.geeksforgeeks.org/problems/count-bst-nodes-that-lie-in-a-given-range/1 Count BST nodes that lie in a given range Medium Accuracy: 50.5% Submissions: 52649 Points: 4 Given a Binary Search Tree (BST) and a range l-h(inclusive), count the number of nodes in the BST that lie in the given range. The values smaller than root go to the left side The values greater and equal to the root go to the right side Example 1: Input: 10 / \ 5 50 / / \ 1 40 100 l = 5, h = 45 Output: 3 Explanation: 5 10 40 are the node in the range Example 2: Input: 5 / \ 4 6 / \ 3 7 l = 2, h = 8 Output: 5 Explanation: All the nodes are in the given range. Your Task: This is a function problem. You don't have to take input. You are required to complete the function getCountOfNode() that takes root, l ,h as parameters and returns the count. Expected Time Complexity: O(N) Expected Auxiliary Space: O(Height of the BST). Constraints: 1 <= Number of nodes <= 100 1 <= l < h < 103 ''' # User function Template for python3 # Function to count number of nodes in BST that lie in the given range. from collections import deque def getCount(root, l, h): # Your code here if root: if (l <= root.data and root.data <= h): return getCount(root.left, l, h) + 1 + getCount(root.right, l, h) else: return getCount(root.left, l, h) + getCount(root.right, l, h) return 0 # { # Driver Code Starts # Initial Template for Python 3 # Tree Node class Node: def __init__(self, val): self.right = None self.data = val self.left = None # Function to Build Tree def buildTree(s): # Corner Case if(len(s) == 0 or s[0] == "N"): return None # Creating list of strings from input # string after spliting by space ip = list(map(str, s.split())) # Create the root of the tree root = Node(int(ip[0])) size = 0 q = deque() # Push the root to the queue q.append(root) size = size+1 # Starting from the second element i = 1 while(size > 0 and i < len(ip)): # Get and remove the front of the queue currNode = q[0] q.popleft() size = size-1 # Get the current node's value from the string currVal = ip[i] # If the left child is not null if(currVal != "N"): # Create the left child for the current node currNode.left = Node(int(currVal)) # Push it to the queue q.append(currNode.left) size = size+1 # For the right child i = i+1 if(i >= len(ip)): break currVal = ip[i] # If the right child is not null if(currVal != "N"): # Create the right child for the current node currNode.right = Node(int(currVal)) # Push it to the queue q.append(currNode.right) size = size+1 i = i+1 return root if __name__ == "__main__": t = int(input()) for _ in range(0, t): s = input() root = buildTree(s) l, r = map(int, input().split()) print(getCount(root, l, r)) # } Driver Code Ends
the-stack_106_30752
import io import pickle import lmdb import torch from PIL import Image from torch.utils.data import Dataset def pickle_reader(byte_str): return pickle.loads(byte_str) def torch_reader(byte_str): return torch.load(io.BytesIO(byte_str), map_location=lambda storage, loc: storage) def raw_reader(byte_str): return byte_str def str_reader(byte_str): return byte_str.decode("utf-8") class LMDBReader: def __init__( self, path, reader="torch", map_size=1024 ** 4, max_readers=126, lazy=True ): self.path = path self.map_size = map_size self.max_readers = max_readers self.env = None self.length = None self.reader = self.get_reader(reader) def open(self): self.env = lmdb.open( self.path, self.map_size, readonly=True, create=False, readahead=False, lock=False, max_readers=self.max_readers, ) if not self.env: raise IOError(f"Cannot open lmdb dataset {self.path}") try: self.length = int(self.get(b"length", "str")) except KeyError: self.length = 0 def get_reader(self, reader): if isinstance(reader, str): read_fn = { "pickle": pickle_reader, "torch": torch_reader, "raw": raw_reader, "str": str_reader, }[reader] elif callable(reader): read_fn = reader else: raise ValueError( 'reader should be "pickle", "torch", "raw", "str" or callable' ) return read_fn def get(self, key, reader=None): if self.env is None: self.open() if reader is not None: read_fn = self.get_reader(reader) else: read_fn = self.reader with self.env.begin(write=False) as txn: value = txn.get(key) if value is None: raise KeyError(f"lmdb dataset does not have key {key}") return read_fn(value) def __len__(self): if self.length is None: self.open() self.close() return self.length def __iter__(self): i = 0 while i < self.length: yield self.__getitem__(i) i += 1 def __getitem__(self, index): return self.get(str(index).encode("utf-8")) def close(self): if self.env is not None: self.env.close() self.env = None def __del__(self): self.close() def __enter__(self): return self def __exit__(self, exc_type, exc_value, traceback): self.close() class LMDBDataset(Dataset): def __init__(self, path, transform, decode=True): self.data = LMDBReader(path, reader="raw") self.transform = transform self.decode = decode def __len__(self): return len(self.data) def __getitem__(self, index): img = self.data[index] class_id = int(img[:4].decode("utf-8")) if self.decode: buffer = io.BytesIO(img[4:]) img = Image.open(buffer).convert("RGB") img = self.transform(img) else: img = img[4:] return img, class_id
the-stack_106_30753
from __future__ import absolute_import from __future__ import division from __future__ import print_function import torch import torch.nn as nn import torch.nn.functional as F from torch.nn import init import csv import pickle as pkl class SkipGram(nn.Module): def __init__(self, vocab_size, embedding_size): super(SkipGram, self).__init__() self.u_embeddings = nn.Embedding( vocab_size, embedding_size, sparse=True) self.v_embeddings = nn.Embedding( vocab_size, embedding_size, sparse=True) initrange = 1.0 / embedding_size init.uniform_(self.u_embeddings.weight.data, -initrange, initrange) init.constant_(self.v_embeddings.weight.data, 0) def forward(self, focus, pos_context, neg_context, batch_size): embed_u = self.u_embeddings(focus) embed_v = self.v_embeddings(pos_context) score = torch.mul(embed_u, embed_v) score = torch.sum(score, dim=1) log_target = F.logsigmoid(score).squeeze() neg_embed_v = self.v_embeddings(neg_context) neg_score = torch.bmm(neg_embed_v, embed_u.unsqueeze(2)).squeeze() neg_score = torch.sum(neg_score, dim=1) sum_log_sampled = F.logsigmoid(-1*neg_score).squeeze() loss = log_target + sum_log_sampled return -1*loss.sum()/batch_size def save_embedding(self, ix_to_word, file_name='saved_embeddings/skipgram_imp'): with open(file_name, 'wb') as embed_file: embedding = {} embedding_matrix = self.u_embeddings.weight.data for word_index in range(len(embedding_matrix)): word = ix_to_word[word_index] embedding[word] = list(embedding_matrix[word_index].numpy()) pkl.dump(embedding, embed_file)
the-stack_106_30755
import os import cv2 import numpy as np from skimage import transform import torch import torch.nn.functional as NF def parse_batch(batch, device): batch['image1'] = batch['image1'].to(device, dtype=torch.float) batch['img1_info'] = batch['img1_info'].to(device, dtype=torch.float) batch['homo12'] = batch['homo12'].to(device, dtype=torch.float) batch['image2'] = batch['image2'].to(device, dtype=torch.float) batch['img2_info'] = batch['img2_info'].to(device, dtype=torch.float) batch['homo21'] = batch['homo21'].to(device, dtype=torch.float) batch['img1_raw'] = batch['img1_raw'].to(device, dtype=torch.float) batch['img2_raw'] = batch['img2_raw'].to(device, dtype=torch.float) return batch def parse_output(output): output['dense_features1'] = output['dense_features1'].squeeze(0) output['scores1'] = output['scores1'].squeeze(0) output['dense_features2'] = output['dense_features2'].squeeze(0) output['scores2'] = output['scores2'].squeeze(0) return output def parse_batch_test(batch, device): batch['image1'] = batch['image1'].to(device, dtype=torch.float) batch['homo12'] = batch['homo12'].to(device, dtype=torch.float) batch['image2'] = batch['image2'].to(device, dtype=torch.float) batch['homo21'] = batch['homo21'].to(device, dtype=torch.float) return batch def L2Norm(input, dim=-1): input = input / (torch.norm(input, p=2, dim=dim, keepdim=True) + 1e-6) return input def to_npy(t): t = (t * 255).permute(1, 2, 0).cpu().detach().numpy() return t def gen_identity_orient(bs, device): orient = torch.FloatTensor([1, 0]) orient = orient.repeat(bs, 1).reshape(bs, 2).to(device) return orient def sample_patches(img, patch_size): c, h, w = img.shape # Tensor format: C, H, W assert(h >= patch_size and w >= patch_size) # print('img size', c, h, w) # Generate patch offset step = int(0.75 * patch_size) # 96 x = [i for i in range(0, w - patch_size, step)] y = [i for i in range(0, h - patch_size, step)] x.append(w - patch_size) # Add last patch y.append(h - patch_size) patch_n = len(x) * len(y) y_offset, x_offset = torch.meshgrid([ torch.IntTensor(y), torch.IntTensor(x) ]) x_offset = x_offset.contiguous().view(-1).unsqueeze(1) y_offset = y_offset.contiguous().view(-1).unsqueeze(1) # Generate patch coordinates y_c, x_c = torch.meshgrid([ torch.linspace(0, patch_size-1, patch_size), torch.linspace(0, patch_size-1, patch_size) ]) x_c = x_c.contiguous().view(-1).to(torch.int) # (patch_size * patch_size) x_c = x_c.repeat(patch_n).view(patch_n, -1) # (patch_n, patch_size * patch_size) y_c = y_c.contiguous().view(-1).to(torch.int) y_c = y_c.repeat(patch_n).view(patch_n, -1) # print('_c shape', x_c.shape) # print('_offset shape', y_offset.shape) x = x_offset + x_c # (patch_n, patch_size * patch_size) y = y_offset + y_c pixel_idx = (y * w + x).view(-1).to(torch.long) # (patch_n * patch_size * patch_size) # print('shape', x.shape, y.shape) img_flat = img.view(c, -1) # (C, H * W) patches_0 = img_flat[0].gather(0, pixel_idx).reshape(patch_n, patch_size, patch_size) patches_1 = img_flat[1].gather(0, pixel_idx).reshape(patch_n, patch_size, patch_size) patches_2 = img_flat[2].gather(0, pixel_idx).reshape(patch_n, patch_size, patch_size) patches = torch.stack([patches_0, patches_1, patches_2], dim=0) patches = patches.permute(1, 0, 2, 3) # print('patches', patches.shape) return patches def sample_patches_orient(img, patch_size, orients, device): c, h, w = img.shape # Tensor format: C, H, W assert(h >= patch_size and w >= patch_size) # print('img size', c, h, w) patch_n = orients.shape[0] # Generate patch offset step = int(0.75 * patch_size) # 96 patch_size_half = patch_size // 2 # 64 x = [i + patch_size_half for i in range(0, w - patch_size, step)] y = [i + patch_size_half for i in range(0, h - patch_size, step)] x.append(w - patch_size_half) # Add last patch y.append(h - patch_size_half) patch_n = len(x) * len(y) y_offset, x_offset = torch.meshgrid([ torch.FloatTensor(y).to(device=device), torch.FloatTensor(x).to(device=device) ]) x_offset = x_offset.contiguous().view(-1).unsqueeze(1) y_offset = y_offset.contiguous().view(-1).unsqueeze(1) # Generate patch coordinates y_c, x_c = torch.meshgrid([ torch.linspace(-patch_size_half, patch_size_half-1, patch_size, device=device), torch.linspace(-patch_size_half, patch_size_half-1, patch_size, device=device) ]) x_c = x_c.contiguous().view(-1) # (patch_size * patch_size) y_c = y_c.contiguous().view(-1) # (patch_size * patch_size) one_t = x_c.new_full(x_c.size(), fill_value=1) grid = torch.stack((x_c, y_c, one_t)) grid = grid.view(-1).repeat(patch_n).view(patch_n, 3, -1) # Set rotate matrix cos = orients[:, 0].unsqueeze(1) sin = orients[:, 1].unsqueeze(1) zeros = cos.new_full(cos.size(), fill_value=0) ones = cos.new_full(cos.size(), fill_value=1) rot_mat = torch.cat((cos, -sin, zeros, sin, cos, zeros, zeros, zeros, ones), dim=1) rot_mat = rot_mat.view(-1, 3, 3) # Apply rotation matrix # print('rot_mat shape:', rot_mat.shape, 'grid shape:', grid.shape) # print('rot_mat', rot_mat) # print('grid', grid) grid = torch.matmul(rot_mat, grid) x = (x_offset + grid[:, 0, :]).view(-1) # (patch_n * patch_size * patch_size) y = (y_offset + grid[:, 1, :]).view(-1) # Setup interpolation x0, y0 = x.floor(), y.floor() x1, y1 = x0 + 1, y0 + 1 x0 = x0.clamp(min=0, max=w-1) x1 = x1.clamp(min=0, max=w-1) y0 = y0.clamp(min=0, max=h-1) y1 = y1.clamp(min=0, max=h-1) # Interpolation weights # print('interpolation:', x0.shape, x1.shape, x.shape) w_a = (x1 - x) * (y1 - y) w_b = (x1 - x) * (y - y0) w_c = (x - x0) * (y1 - y) w_d = (x - x0) * (y - y0) x0, x1, y0, y1 = x0.long(), x1.long(), y0.long(), y1.long() base_y0 = y0 * w base_y1 = y1 * w # All idx are in (patch_n * patch_size * patch_size) idx_a = base_y0 + x0 # Left-top pixel idx_b = base_y1 + x0 # Left-bottom pixel idx_c = base_y0 + x1 # Right-top pixel idx_d = base_y1 + x1 # Right-bottom pixel img_flat = img.view(c, -1) # (C, H * W) patches = torch.zeros((c, patch_n * patch_size * patch_size)).to(device=device) for i in range(c): img_a = img_flat[i].gather(0, idx_a) img_b = img_flat[i].gather(0, idx_b) img_c = img_flat[i].gather(0, idx_c) img_d = img_flat[i].gather(0, idx_d) patches[i] = w_a * img_a + w_b * img_b + w_c * img_c + w_d * img_d patches = torch.stack([patches[0], patches[1], patches[2]], dim=0) patches = patches.reshape(c, patch_n, patch_size, patch_size).permute(1, 0, 2, 3) return patches def show_patches(patches, mean=None, std=None, prefix=''): out_folder = 'tmp/patches' if not os.path.exists(out_folder): os.makedirs(out_folder) print('Show Patches:', patches.shape, 'Save to', out_folder) print('Norm mean:', mean, 'Norm std:', std) bs, c, h, w = patches.shape mean = torch.FloatTensor(mean).reshape(3, 1, 1).to(patches.device) std = torch.FloatTensor(std).reshape(3, 1, 1).to(patches.device) if prefix != '': prefix += '_' for i in range(bs): img = patches[i] * std + mean img = to_npy(img) out_path = os.path.join(out_folder, f'{prefix}{i}.png') cv2.imwrite(out_path, img) def warp_img(img, homo): img = img.unsqueeze(0) b, c, h, w = img.shape y_c, x_c = torch.meshgrid([ torch.arange(h), torch.arange(w) ]) x_c = x_c.contiguous().view(-1) # (patch_size * patch_size) y_c = y_c.contiguous().view(-1) # (patch_size * patch_size) one_t = x_c.new_full(x_c.size(), fill_value=1) grid = torch.stack((x_c, y_c, one_t)) grid = grid.view(-1).repeat(b).view(b, 3, -1) grid = grid.type_as(homo).to(homo.device) grid_w = torch.matmul(homo, grid).to(torch.float) grid_w = grid_w.permute(0, 2, 1) # (B, H*W, 3) grid_w = grid_w.div(grid_w[:, :, 2].unsqueeze(-1) + 1e-8) # (B, H*W, 3) grid_w = grid_w.view(b, h, w, -1)[:, :, :, :2] # (B, H, W, 2) grid_w[:, :, :, 0] = grid_w[:, :, :, 0].div(w - 1) * 2 - 1 grid_w[:, :, :, 1] = grid_w[:, :, :, 1].div(h - 1) * 2 - 1 img_w = NF.grid_sample(img, grid_w) # (B, C, H, W) return img_w.squeeze(0) def distance_matrix_vector(anchor, positive): """ Given batch of anchor descriptors and positive descriptors calculate distance matrix :param anchor: (B, 128) :param positive: (B, 128) :return: """ eps = 1e-8 FeatSimi_Mat = 2 - 2 * (anchor.dot(np.transpose(positive))) # [0, 4] FeatSimi_Mat = np.clip(FeatSimi_Mat, eps, 4.0) FeatSimi_Mat = np.sqrt(FeatSimi_Mat) # euc [0, 2] return FeatSimi_Mat def im_rescale(im, output_size): h, w = im.shape[:2] if isinstance(output_size, int): if h > w: new_h, new_w = output_size * h / w, output_size else: new_h, new_w = output_size, output_size * w / h else: new_h, new_w = output_size new_h, new_w = int(new_h), int(new_w) img = transform.resize(im, (new_h, new_w), mode='constant') return img, h, w, new_w / w, new_h / h
the-stack_106_30756
# coding:utf-8 #!/usr/bin/python # # Copyright (c) Contributors to the Open 3D Engine Project. # For complete copyright and license terms please see the LICENSE at the root of this distribution. # # SPDX-License-Identifier: Apache-2.0 OR MIT # # # -- This line is 75 characters ------------------------------------------- from __future__ import unicode_literals """ This module fullfils the maya bootstrap pattern as described in their docs https://tinyurl.com/y2aoz8es Pattern is similar to Lumberyard Editor\\Scripts\\bootstrap.py For now the proper way to initiate Maya boostrapping the DCCsi, is to use the provided env and launcher bat files. If you are developing for the DCCsi you can use this launcher to start Maya: DccScriptingInterface\Launchers\Windows\Launch_Maya_2020.bat" To Do: ATOM-5861 """ __project__ = 'DccScriptingInterface' # it is really hard to debug userSetup bootstrapping # this enables some rudimentary logging for debugging _BOOT_INFO = True # ------------------------------------------------------------------------- # built in's import os import sys import site import inspect import traceback import logging as _logging # -- DCCsi Extension Modules import azpy from azpy.constants import * from azpy.env_base import _BASE_ENVVAR_DICT from azpy.env_bool import env_bool from azpy.constants import ENVAR_DCCSI_GDEBUG from azpy.constants import ENVAR_DCCSI_DEV_MODE # -- maya imports import maya.cmds as cmds import maya.mel as mel #from pymel.all import * # ------------------------------------------------------------------------- # ------------------------------------------------------------------------- # global space _DCCSI_GDEBUG = env_bool(ENVAR_DCCSI_GDEBUG, False) _DCCSI_DEV_MODE = env_bool(ENVAR_DCCSI_DEV_MODE, False) #_DCCSI_DEV_MODE = True # force true for debugger testing _ORG_TAG = r'Amazon::Lumberyard' _APP_TAG = r'DCCsi' _TOOL_TAG = r'SDK.Maya.Scripts.userSetup' _TYPE_TAG = r'entrypoint' # bootstrap _MODULENAME = str('{0}.{1}'.format(_APP_TAG, _TOOL_TAG)) _LOGGER = azpy.initialize_logger(_MODULENAME, default_log_level=int(20)) _LOGGER.info('Initializing: {0}.'.format({_MODULENAME})) _LOGGER.info('DCCSI_GDEBUG: {0}.'.format({_DCCSI_GDEBUG})) _LOGGER.info('DCCSI_DEV_MODE: {0}.'.format({_DCCSI_DEV_MODE})) # flag to turn off setting up callbacks, until they are fully implemented # To Do: consider making it a settings option to define and enable/disable _G_LOAD_CALLBACKS = True # couple bugs, couple NOT IMPLEMENTED _LOGGER.info('DCCSI_MAYA_SET_CALLBACKS: {0}.'.format({_G_LOAD_CALLBACKS})) # early attach WingIDE debugger (can refactor to include other IDEs later) if _DCCSI_DEV_MODE: from azpy.test.entry_test import connect_wing foo = connect_wing() # ------------------------------------------------------------------------- # ------------------------------------------------------------------------- # To Do REMOVE this block and replace with dev module # debug prints, To Do: this should be moved to bootstrap config #_G_DEBUGGER = os.getenv(ENVAR_DCCSI_GDEBUGGER, "WING") #if _DCCSI_DEV_MODE: #if _G_DEBUGGER == "WING": #_LOGGER.info('{0}'.format('-' * 74)) #_LOGGER.info('Developer Debug Mode: {0}, Basic debugger: {1}'.format(_G_DEBUG, _G_DEBUGGER)) #try: #_LOGGER.info('Attempting to start basic WING debugger') #import azpy.lmbr.test #_LOGGER.info('Package Imported: azpy.test') #ouput = azpy.entry_test.main(verbose=False, #connectDebugger=True, #returnOuput=_G_DEBUG) #_LOGGER.info(ouput) #pass #except Exception as e: #_LOGGER.info("Error: azpy.test, entry_test (didn't perform)") #_LOGGER.info("Exception: {0}".format(e)) #pass #elif _G_DEBUGGER == "PYCHARM": ## https://github.com/juggernate/PyCharm-Maya-Debugging #_LOGGER.info('{0}'.format('-' * 74)) #_LOGGER.info('Developer Debug Mode: {0}, Basic debugger: {1}'.format(_G_DEBUG, _G_DEBUGGER)) #sys.path.append('C:\Program Files\JetBrains\PyCharm 2019.1.3\debug-eggs\pydevd-pycharm.egg') #try: #_LOGGER.info('Attempting to start basic PYCHARM debugger') ## Inside Maya Python Console (Tip: add to a shelf button for quick access) #import pydevd #_LOGGER.info('Package Imported: pydevd') #pydevd.settrace('localhost', port=7720, suspend=False) #_LOGGER.info('PYCHARM Debugger Attach Success!!!') ## To disconnect run: ## pydevd.stoptrace() #pass #except Exception as e: #_LOGGER.info("Error: pydevd.settrace (didn't perform)") #_LOGGER.info("Exception: {0}".format(e)) #pass #else: #pass ## ------------------------------------------------------------------------- # ------------------------------------------------------------------------- # validate access to the DCCsi and it's Lib site-packages # bootstrap site-packages by version from azpy.constants import PATH_DCCSI_PYTHON_LIB_PATH try: os.path.exists(PATH_DCCSI_PYTHON_LIB_PATH) site.addsitedir(PATH_DCCSI_PYTHON_LIB_PATH) _LOGGER.info('azpy 3rdPary site-packages: is: {0}'.format(PATH_DCCSI_PYTHON_LIB_PATH)) except Exception as e: _LOGGER.error('ERROR: {0}, {1}'.format(e, PATH_DCCSI_PYTHON_LIB_PATH)) raise e # 3rdparty from unipath import Path from box import Box # ------------------------------------------------------------------------- # ------------------------------------------------------------------------- # Maya is frozen #_MODULE_PATH = Path(__file__) # https://tinyurl.com/y49t3zzn # module path when frozen _MODULE_FILEPATH = os.path.abspath(inspect.getfile(inspect.currentframe())) _MODULE_PATH = os.path.dirname(_MODULE_FILEPATH) if _BOOT_INFO: _LOGGER.debug('Boot: CWD: {}'.format(os.getcwd())) _LOGGER.debug('Frozen: _MODULE_FILEPATH: {}'.format(_MODULE_FILEPATH)) _LOGGER.debug('Frozen: _MODULE_PATH: {}'.format(_MODULE_PATH)) _LOGGER.debug('Module __name__: {}'.format(__name__)) # root: INFO: Module __name__: __main__ _LOGGER.info('_MODULENAME: {}'.format(_MODULENAME)) # ------------------------------------------------------------------------- # check some env var tags (fail if no, likely means no proper code access) _STR_ERROR_ENVAR = "Envar 'key' does not exist in base_env: {0}" _DCCSI_SDK_PATH = None try: _DCCSI_SDK_PATH = _BASE_ENVVAR_DICT[ENVAR_DCCSI_SDK_PATH] except Exception as e: _LOGGER.critical(_STR_ERROR_ENVAR.format(_BASE_ENVVAR_DICT[ENVAR_DCCSI_SDK_PATH])) _O3DE_PROJECT_PATH = None try: _O3DE_PROJECT_PATH = _BASE_ENVVAR_DICT[ENVAR_O3DE_PROJECT_PATH] except Exception as e: _LOGGER.critical(_STR_ERROR_ENVAR.format(_BASE_ENVVAR_DICT[ENVAR_O3DE_PROJECT_PATH])) # check some env var tags (fail if no, likely means no proper code access) _O3DE_DEV = _BASE_ENVVAR_DICT[ENVAR_O3DE_DEV] _O3DE_DCCSIG_PATH = _BASE_ENVVAR_DICT[ENVAR_DCCSIG_PATH] _O3DE_DCCSI_LOG_PATH = _BASE_ENVVAR_DICT[ENVAR_DCCSI_LOG_PATH] _O3DE_AZPY_PATH = _BASE_ENVVAR_DICT[ENVAR_DCCSI_AZPY_PATH] # ------------------------------------------------------------------------- # ------------------------------------------------------------------------- # To Do: implement data driven config # Currently not used, but will be where we store the ordered dict # which is parsed from the project bootstrapping config files. _G_app_config = {} # global scope maya callbacks container _G_callbacks = Box(box_dots=True) # global scope container # used to store fixPaths in the global scope _fix_paths = None # ------------------------------------------------------------------------- # ------------------------------------------------------------------------- # add appropriate common tools paths to the maya environment variables def startup(): """Early starup execution before mayautils.executeDeferred(). Some things like UI and plugins should be defered to avoid failure""" _LOGGER.info('startup() fired') # get known paths _KNOWN_PATHS = site._init_pathinfo() if os.path.isdir(_DCCSI_SDK_PATH): site.addsitedir(_DCCSI_SDK_PATH, _KNOWN_PATHS) try: import azpy.test _LOGGER.info('SUCCESS, import azpy.test') except Exception as e: _LOGGER.warning('startup(), could not import azpy.test') _LOGGER.info('startup(), COMPLETE') return 0 # ------------------------------------------------------------------------- # ------------------------------------------------------------------------- # verify Shared\Python exists and add it as a site dir. Begin imports and config. def post_startup(): """Allows for a defered execution startup sequence""" _LOGGER.info('post_startup() fired') # plugins, To Do: these should be moved to bootstrapping config try: maya.cmds.loadPlugin("dx11Shader") except Exception as e: _LOGGER.error(e) # not a hard failure # Lumberyard DCCsi environment ready or error out. try: import azpy.maya _LOGGER.info('Python module imported: azpy.maya') except Exception as e: _LOGGER.error(e) _LOGGER.error(traceback.print_exc()) return 1 # Dccsi azpy maya ready or error out. try: azpy.maya.init() _LOGGER.info('SUCCESS, azpy.maya.init(), code accessible.') except Exception as e: _LOGGER.error(e) _LOGGER.error(traceback.print_exc()) return 1 # callbacks, To Do: these should also be moved to the bootstrapping config # Defered startup after the Ui is running. _G_CALLBACKS = Box(box_dots=True) # this just ensures a global scope container if _G_LOAD_CALLBACKS: from set_callbacks import _G_CALLBACKS # ^ need to hold on to this as the install repopulate set # this ensures the fixPaths callback is loaded # even when the other global callbacks are disabled from set_callbacks import install_fix_paths install_fix_paths() # set the project workspace #_O3DE_PROJECT_PATH = _BASE_ENVVAR_DICT[ENVAR_O3DE_PROJECT_PATH] _project_workspace = os.path.join(_O3DE_PROJECT_PATH, TAG_MAYA_WORKSPACE) if os.path.isfile(_project_workspace): try: # load workspace maya.cmds.workspace(_O3DE_PROJECT_PATH, openWorkspace=True) _LOGGER.info('Loaded workspace file: {0}'.format(_project_workspace)) maya.cmds.workspace(_O3DE_PROJECT_PATH, update=True) except Exception as e: _LOGGER.error(e) else: _LOGGER.warning('Workspace file not found: {1}'.format(_O3DE_PROJECT_PATH)) # Set up Lumberyard, maya default setting from set_defaults import set_defaults set_defaults() # Setup UI tools if not maya.cmds.about(batch=True): _LOGGER.info('Add UI dependent tools') # wrap in a try, because we haven't implmented it yet try: mel.eval(str(r'source "{}"'.format(TAG_O3DE_DCC_MAYA_MEL))) except Exception as e: _LOGGER.error(e) # manage custom menu in a sub-module from set_menu import set_main_menu set_main_menu() # To Do: manage custom shelf in a sub-module _LOGGER.info('post_startup(), COMPLETE') _LOGGER.info('DCCsi Bootstrap, COMPLETE') return 0 # ------------------------------------------------------------------------- # ------------------------------------------------------------------------- if __name__ == '__main__': try: # Early startup config. startup() # This allows defered action post boot (atfer UI is active) from maya.utils import executeDeferred post = executeDeferred(post_startup) except Exception as e: traceback.print_exc()
the-stack_106_30757
# -*- coding: utf-8 -*- from __future__ import absolute_import import math from django.template import Library from django.utils import six register = Library() # The templatetag below is copied from sorl.thumbnail filesize_formats = ['k', 'M', 'G', 'T', 'P', 'E', 'Z', 'Y'] filesize_long_formats = { 'k': 'kilo', 'M': 'mega', 'G': 'giga', 'T': 'tera', 'P': 'peta', 'E': 'exa', 'Z': 'zetta', 'Y': 'yotta', } def filesize(bytes, format='auto1024'): """ Returns the number of bytes in either the nearest unit or a specific unit (depending on the chosen format method). Acceptable formats are: auto1024, auto1000 convert to the nearest unit, appending the abbreviated unit name to the string (e.g. '2 KiB' or '2 kB'). auto1024 is the default format. auto1024long, auto1000long convert to the nearest multiple of 1024 or 1000, appending the correctly pluralized unit name to the string (e.g. '2 kibibytes' or '2 kilobytes'). kB, MB, GB, TB, PB, EB, ZB or YB convert to the exact unit (using multiples of 1000). KiB, MiB, GiB, TiB, PiB, EiB, ZiB or YiB convert to the exact unit (using multiples of 1024). The auto1024 and auto1000 formats return a string, appending the correct unit to the value. All other formats return the floating point value. If an invalid format is specified, the bytes are returned unchanged. """ format_len = len(format) # Check for valid format if format_len in (2, 3): if format_len == 3 and format[0] == 'K': format = 'k%s' % format[1:] if not format[-1] == 'B' or format[0] not in filesize_formats: return bytes if format_len == 3 and format[1] != 'i': return bytes elif format not in ('auto1024', 'auto1000', 'auto1024long', 'auto1000long'): return bytes # Check for valid bytes try: bytes = int(bytes) if six.PY3 else long(bytes) # NOQA - flake8/py3 reports error: long does not exists in py3 except (ValueError, TypeError): return bytes # Auto multiple of 1000 or 1024 if format.startswith('auto'): if format[4:8] == '1000': base = 1000 else: base = 1024 logarithm = bytes and math.log(bytes, base) or 0 index = min(int(logarithm) - 1, len(filesize_formats) - 1) if index >= 0: if base == 1000: bytes = bytes and bytes / math.pow(1000, index + 1) else: bytes = bytes >> (10 * (index)) bytes = bytes and bytes / 1024.0 unit = filesize_formats[index] else: # Change the base to 1000 so the unit will just output 'B' not 'iB' base = 1000 unit = '' if bytes >= 10 or ('%.1f' % bytes).endswith('.0'): bytes = '%.0f' % bytes else: bytes = '%.1f' % bytes if format.endswith('long'): unit = filesize_long_formats.get(unit, '') if base == 1024 and unit: unit = '%sbi' % unit[:2] unit = '%sbyte%s' % (unit, bytes != '1' and 's' or '') else: unit = '%s%s' % (base == 1024 and unit.upper() or unit, base == 1024 and 'iB' or 'B') return '%s %s' % (bytes, unit) if bytes == 0: return bytes base = filesize_formats.index(format[0]) + 1 # Exact multiple of 1000 if format_len == 2: return bytes / (1000.0 ** base) # Exact multiple of 1024 elif format_len == 3: bytes = bytes >> (10 * (base - 1)) return bytes / 1024.0 register.filter(filesize)
the-stack_106_30758
"""The module is used to convert the sklearn.svm.SVC into a Flutter/Dart model.""" import numpy as np from sklearn.svm import SVC, LinearSVC from .base import SKLiteBase class SkliteSVCClassifier(SKLiteBase): """SVC implementation.""" @property def validate_(self) -> str: """In order to check whether the classifier has been fitted.""" return "n_support_" @property def class_(self): """Used for checking the data-type of the estimator in the constructor.""" return SVC def build(self) -> dict: """Fetches and transforms all the data from the classifier. Returns ------- dict """ data = {} attributes = ["support_vectors_", "dual_coef_", "n_support_", "_intercept_"] for attr in attributes: data[attr] = getattr(self._estimator, attr).tolist() for attr in ["kernel", "coef0", "degree", "_gamma"]: data[attr] = getattr(self._estimator, attr) # pylint: disable=protected-access data["_gamma"] = float(self._estimator._gamma) data["classes_"] = self._estimator.classes_.astype( np.int32).tolist() return data class SkliteLinearSVCClassifier(SKLiteBase): """LinearSVC implementation.""" @property def class_(self): """Used for checking the data-type of the estimator in the constructor.""" return LinearSVC def build(self) -> dict: """Fetches and transforms all the data from the classifier. Returns ------- dict """ data = {} attributes = ["intercept_"] for attr in attributes: data[attr] = getattr(self._estimator, attr).tolist() data["classes_"] = self._estimator.classes_.astype(np.int32).tolist() if len(data["classes_"]) == 2: data["coef_"] = self._estimator.coef_[0].tolist() else: data["coef_"] = self._estimator.coef_.tolist() return data
the-stack_106_30759
# coding:utf-8 # flake8: noqa import json from input_adapter import InputDevAdapter from assistant.adapters.storage.storage_adapter import StorageAdapter class CRMAdapter(InputDevAdapter): def __init__(self, file_path): super(CRMAdapter, self).__init__() self.file_path = file_path self.storage_adapter = StorageAdapter() def process_input(self, **kwargs): agent_name = kwargs.get('agent_name') with open(self.file_path) as data_file: data = json.load(data_file) developer, created = self.storage_adapter.func_object( 'Developer', ['objects', 'get_or_create'] ) agent = self.storage_adapter.save( 'AgentSerializer', data={ 'developer': developer.pk, 'username': agent_name if agent_name else 'username' } ) for domain in data['domain']: domain_ser = self.storage_adapter.get_object( 'Domain', name=domain['name'] ) if not domain_ser: domain_ser = self.storage_adapter.save( 'DomainSerializer', data={'agent': agent.pk, 'name': domain['name']} ) for intent in domain['intent']: intent_ser = self.storage_adapter.get_object( 'Intent', name=intent['name'] ) if not intent_ser: intent_ser = self.storage_adapter.save( 'IntentSerializer', data={'domain': domain_ser.pk, 'name': intent['name']} ) for parameter in intent['parameters']: self.storage_adapter.save( 'ParameterSerializer', data={ 'intent': intent_ser.pk, 'name': parameter['name'], 'is_obligatory': bool(parameter['is_obligatory']), 'value': parameter['value'] } )
the-stack_106_30760
# Copyright 2014 Hewlett-Packard Development Company, L.P. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from searchlight.api.v1 import search from searchlight.common import wsgi class API(wsgi.Router): """WSGI router for Searchlight v1 API requests.""" def __init__(self, mapper): reject_method_resource = wsgi.Resource(wsgi.RejectMethodController()) search_catalog_resource = search.create_resource() mapper.connect('/search', controller=search_catalog_resource, action='search', conditions={'method': ['GET']}) mapper.connect('/search', controller=search_catalog_resource, action='search', conditions={'method': ['POST']}) mapper.connect('/search', controller=reject_method_resource, action='reject', allowed_methods='GET, POST', conditions={'method': ['PUT', 'DELETE', 'PATCH', 'HEAD']}) mapper.connect('/search/plugins', controller=search_catalog_resource, action='plugins_info', conditions={'method': ['GET']}) mapper.connect('/search/plugins', controller=reject_method_resource, action='reject', allowed_methods='GET', conditions={'method': ['POST', 'PUT', 'DELETE', 'PATCH', 'HEAD']}) mapper.connect('/search/facets', controller=search_catalog_resource, action='facets', conditions={'method': ['GET']}), mapper.connect('/search/facets', controller=reject_method_resource, action='reject', allowed_methods='GET', conditions={'method': ['POST', 'PUT', 'DELETE', 'PATCH', 'HEAD']}) super(API, self).__init__(mapper)
the-stack_106_30761
from enum import Enum from .header import MgmtHeader, MgmtGroup, MgmtOp, MgmtErr, CmdBase, RequestBase, ResponseBase import cbor import time import sys class MgmtIdImg(Enum): STATE = 0 UPLOAD = 1 FILE = 2 CORELIST = 3 CORELOAD = 4 ERASE = 5 class SlotDescription(object): _flags = ('confirmed', 'pending', 'active', 'permanent') def __init__(self, slot_obj): try: self.slot = slot_obj['slot'] self.version = slot_obj['version'] self.hash = slot_obj['hash'].hex() self.bootable = slot_obj['bootable'] self.confirmed = slot_obj['confirmed'] self.pending = slot_obj['pending'] self.active = slot_obj['active'] self.permanent = slot_obj['permanent'] except KeyError as e: raise KeyError('key expected in slot: {}'.format(str(e))) from None def hash_str(self): return self.hash def __str__(self): flags = ','.join([ flag for flag in self._flags if getattr(self, flag) ]) return 'slot:{} version:{} hash:{} bootable:{} flags:{}'.format( self.slot, self.version, self.hash, 'true' if self.bootable else 'false', flags ) def __repr__(self): return '{}({})'.format(self.__class__.__name__, str(self)) class ImgDescription(object): def __init__(self, obj): self.slots = [] if not 'images' in obj: raise ValueError('key \'images\' expected') images = obj['images'] if not isinstance(images, list): raise ValueError('list expected for key \'images\'') for slot in images: self.slots.append(SlotDescription(slot)) def active_slot(self): for s in self.slots: if s.active: return s return None def confirmed_slot(self): for s in self.slots: if s.confirmed: return s return None def pending_slot(self): for s in self.slots: if s.pending: return s return None def other_slot(self): for s in self.slots: if not s.active: return s return None def state_sane(self): for s in self.slots: if (s.active and not s.confirmed) or (not s.active and s.confirmed) or s.pending: return False return True def __str__(self): return str(self.slots) def __repr__(self): return '{}({})'.format(self.__class__.__name__, str(self)) class CmdImg(CmdBase): _group = MgmtGroup.IMAGE _group.registerGroupIDs(MgmtIdImg) @staticmethod def setState(new_state, seq=0): hdr = MgmtHeader(MgmtOp.WRITE, MgmtGroup.IMAGE, MgmtIdImg.STATE, seq=seq) if CmdBase._debug: print(str(hdr)) print(str(new_state)) return CmdImg(hdr, new_state) @staticmethod def setStateCompleted(rsp, allow_missing_rc=False): cmd = CmdImg.decode_header(rsp) # no complete packet if not cmd.hdr or not cmd.payload_bytes: if CmdBase._debug: print('decode common ret: ', str(cmd)) return cmd if cmd.hdr.op != MgmtOp.WRITE_RSP or cmd.hdr.group != MgmtGroup.IMAGE or cmd.hdr.id != MgmtIdImg.STATE: raise ValueError('Unexpected response: {}'.format(cmd.hdr)) dec_msg = cmd.decode() if CmdBase._debug: print('decoded:', str(dec_msg)) err = MgmtErr.from_response(dec_msg, allow_missing=allow_missing_rc) if not err: img_desc = ImgDescription(dec_msg) if CmdBase._debug: print(img_desc) for idx, sl in enumerate(img_desc.slots): print('image:{} {}'.format(idx, str(sl))) else: raise ValueError('{}: {}'.format(err.value, str(err))) return ResponseBase(err, dec_msg, img_desc) @staticmethod def getStateCompleted(rsp): cmd = CmdImg.decode_header(rsp) # no complete packet if not cmd.hdr or not cmd.payload_bytes: if CmdBase._debug: print('decode common ret: ', str(cmd)) return cmd if cmd.hdr.op != MgmtOp.READ_RSP or cmd.hdr.group != MgmtGroup.IMAGE or cmd.hdr.id != MgmtIdImg.STATE: raise ValueError('Unexpected response: {}'.format(cmd.hdr)) dec_msg = cmd.decode() err = MgmtErr.from_response(dec_msg, allow_missing=True) if not err: img_desc = ImgDescription(dec_msg) if CmdBase._debug: print(img_desc) for idx, sl in enumerate(img_desc.slots): print('image:{} {}'.format(idx, str(sl))) else: raise ValueError('{}: {}'.format(err.value, str(err))) return ResponseBase(err, dec_msg, img_desc) @staticmethod def getState(seq=0): hdr = MgmtHeader(MgmtOp.READ, MgmtGroup.IMAGE, MgmtIdImg.STATE, seq=seq) return CmdImg(hdr, {}) @staticmethod def imageConfirm(seq=0): return CmdImg.setState({ 'confirm': True }, seq=seq) @staticmethod def imageUploadStart(img_bytes, offset, max_len, sha, seq=0): hdr = MgmtHeader(MgmtOp.WRITE, MgmtGroup.IMAGE, MgmtIdImg.UPLOAD, seq=seq) # Need to check how much data to append. cmd = CmdImg(hdr, { 'off': offset, 'len': len(img_bytes), 'data': b'', 'sha': sha }) pkt_len = len(cmd.encode()) if pkt_len >= max_len: # can we fragement it ? can we start w/o data attribute? raise ValueError('MTU to short for this packet') d_len = max_len - pkt_len if CmdBase._debug: print('Lengths: max_len:', max_len, 'pkt_len:', pkt_len, 'data_len:', d_len) print('data_len:', len(img_bytes[offset:(offset + d_len)])) print('Adding', d_len, 'bytes of data') # putting 'data' at end will trigger corruption in cbor on the FW. ( on z2.0.0) # ('data' key pointer will LSB will be set to 0, while copying sha value) # new accepted 'data' key will be some arbitrary string at that location return CmdImg(hdr, { 'off': offset, 'data': img_bytes[offset:(offset + d_len)], 'sha': sha, 'len': len(img_bytes), }) @staticmethod def imageUploadContinue(img_bytes, offset, max_len, seq=0, data_len_hint=0): # Done if offset >= len(img_bytes) or offset < 0: return None hdr = MgmtHeader(MgmtOp.WRITE, MgmtGroup.IMAGE, MgmtIdImg.UPLOAD, seq=seq) # TODO: size hint does not work as encoded offset len increases (1byte/2byte/4byte..) # allocate 4 bytes for it, should not need more if not data_len_hint: # Need to check how much data to append. cmd = CmdImg(hdr, { 'off': 65536, 'data': b'' }) pkt_len = len(cmd.encode()) if pkt_len >= max_len: # can we fragement it ? can we start w/o data attribute? raise ValueError('MTU to short for this packet') d_len = max_len - pkt_len else: d_len = data_len_hint if CmdBase._debug: print('UL Continue at offset', offset) if d_len <= 0: raise ValueError('Data len calculation failed, got:', d_len) return CmdImg(hdr, { 'off': offset, 'data': img_bytes[offset:(offset + d_len)], }) @staticmethod def imageErase(seq=0): hdr = MgmtHeader(MgmtOp.WRITE, MgmtGroup.IMAGE, MgmtIdImg.ERASE, seq=seq) return CmdImg(hdr, {}) @staticmethod def imageEraseCompleted(rsp): cmd = CmdImg.decode_header(rsp) # no complete packet if not cmd.hdr or not cmd.payload_bytes: if CmdBase._debug: print('decode common ret: ', str(cmd)) return cmd if cmd.hdr.op != MgmtOp.WRITE_RSP or cmd.hdr.group != MgmtGroup.IMAGE or cmd.hdr.id != MgmtIdImg.ERASE: raise ValueError('Unexpected response: {}'.format(cmd.hdr)) dec_msg = cmd.decode() if CmdBase._debug: print('decoded:', str(dec_msg)) err = MgmtErr.from_response(dec_msg) return ResponseBase(err, dec_msg, None) class ImageErase(RequestBase): def __init__(self): super().__init__() def message(self): if self.response_data: return None return CmdImg.imageErase() def parse_response(self, rsp): rsp_cmd = CmdImg.imageEraseCompleted(rsp) self.response_data = rsp_cmd return self.response_data class ImageList(RequestBase): def __init__(self): super().__init__() def message(self): if self.response_data: return None return CmdImg.getState() def parse_response(self, rsp): self.response_data = CmdImg.getStateCompleted(rsp) return self.response_data class ImageTest(RequestBase): def __init__(self, sha): super().__init__() if isinstance(sha, str): if len(sha) != 64: raise ValueError("Wrong hash length: {}".format(len(sha))) sha_b = bytes([ int(sha[idx:idx+2], 16) for idx in range(0, len(sha), 2) ]) elif isinstance(sha, bytes): if len(sha) != 32: raise ValueError("Wrong hash length: {}".format(len(sha))) sha_b = sha assert type(sha_b) == bytes self.sha = sha self._sha_b = sha_b def message(self): if self.response_data: return None return CmdImg.setState({ 'confirm': False, 'hash': self._sha_b }) def parse_response(self, rsp): self.response_data = CmdImg.setStateCompleted(rsp, allow_missing_rc=True) return self.response_data def __str__(self): return '{}(hash={})'.format(self.__class__.__name__, self.sha) class ImageConfirm(RequestBase): def __init__(self): super().__init__() def message(self): if self.response_data: return None return CmdImg.setState({ 'confirm': True }) def parse_response(self, rsp): self.response_data = CmdImg.setStateCompleted(rsp, allow_missing_rc=True) return self.response_data class ImageUpload(RequestBase): def __init__(self, mcuboot_image, mtu=252, progress=False): super().__init__() self.image = mcuboot_image self.sha = mcuboot_image.hash() self.current_offset = 0 self.next_offset = 0 self.retry_offset = 3 self.len = len(mcuboot_image.data) self.image_slot = 0 # Need 12 b for other proto layers? self.mtu = mtu - 5 self.data_len_hint = 0 self.seq = 0 self.progress = progress self._progress_last = 0 self._progress_init_offset = 0 self.starttime = None def message(self): # Nothing received => starting message if self.response_data == None: self.starttime = time.time() cmd = CmdImg.imageUploadStart(self.image.data, self.current_offset, self.mtu, self.sha) self.next_offset = self.current_offset + len(cmd.payload_dict['data']) elif self.response_data.err: # last rsp was mgmt err return None elif self.current_offset >= len(self.image.data): # we are done if self.progress: et = time.time() elapsed = (et - self.starttime) speed = (self.len - self._progress_init_offset) / elapsed print('{} / {} (100%)'.format(len(self.image.data), len(self.image.data))) print('{}s ({:3.1f} kb/s)'.format(int(elapsed), speed/1024), flush=True) return None # should not completely restart (in case MTU is to small to include first data) # newer mcumgr version will not even accept it else: cmd = CmdImg.imageUploadContinue(self.image.data, self.current_offset, self.mtu, data_len_hint=self.data_len_hint) self.data_len_hint = len(cmd.payload_dict['data']) self.next_offset = self.current_offset + len(cmd.payload_dict['data']) return cmd def parse_response(self, rsp): hdr = MgmtHeader.decode(rsp) if hdr.op != MgmtOp.WRITE_RSP or hdr.group != MgmtGroup.IMAGE or hdr.id != MgmtIdImg.UPLOAD: raise ValueError('Unexpected response: {}'.format(hdr)) if len(rsp) > hdr.size: dec_msg = cbor.loads(rsp[hdr.size:]) if CmdBase._debug: print(dec_msg) else: # transport should handle this case raise ValueError('Complete header w/o payload: {}'.format(hdr)) err = MgmtErr.from_response(dec_msg) if err: self.response_data = ResponseBase(err, dec_msg, None) return self.response_data if not 'off' in dec_msg: raise ValueError('Missing key \'off\' in response') # account for aligned data requests if dec_msg['off'] != self.next_offset and dec_msg['off'] != (self.next_offset - (self.next_offset % 4)): if dec_msg['off'] < self.next_offset: if self.retry_offset >= 0: print('Missed a packet, resending offset:', dec_msg['off'], 'expected:', self.next_offset, file=sys.stderr) self.retry_offset -= 1 else: raise ValueError("Exeeded retries on resend, off:", dec_msg['off'], 'expected:', self.next_offset) else: print('Upload continue ...') self._progress_init_offset = dec_msg['off'] else: self.retry_offset = 3 self.current_offset = dec_msg['off'] if self.progress: percent = int((self.current_offset / len(self.image.data)) * 100) # prevent exessive printing if percent - self._progress_last >= 1: self._progress_last = percent print('{} / {} ({}%)'.format(self.current_offset, len(self.image.data), percent), flush=True) self.response_data = ResponseBase(err, dec_msg, None) return self.response_data def __str__(self): return '{}(image=MCUBootImage(version={},hash={}))'.format(self.__class__.__name__, self.image.version, self.image.hash_str()) def _image_hash(val): if isinstance(val, str): if len(val) != 64: raise ValueError("Wrong hash string length") return val raise ValueError("Wrong format: hash") def registerImageCommandArguments(sub_parsers): img_cmd_parser = sub_parsers.add_parser('image', help='Manage images on a device') img_subs = img_cmd_parser.add_subparsers(title='Available Commands', dest='img_cmd') img_subs.add_parser('list', help='Show images on a device') img_subs.add_parser('confirm', help='Confirm active image') testparser = img_subs.add_parser('test', help='Test an image on next reboot') # testparser.add_argument('hash', type=str, required=True) testparser.add_argument('hash', type=_image_hash, default=None) uploadparser = img_subs.add_parser('upload', help='Upload image to a device') # uploadparser.add_argument('hash', type=str, required=True) uploadparser.add_argument('file', default=None) img_subs.add_parser('erase', help='Erase unused image on a device') return img_cmd_parser
the-stack_106_30762
# Copyright 2011 OpenStack Foundation # Copyright 2013 Rackspace Hosting # Copyright 2013 Hewlett-Packard Development Company, L.P. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import os from troveclient import base from troveclient import common from troveclient import exceptions from troveclient import utils from troveclient.v1 import modules as core_modules from swiftclient import client as swift_client REBOOT_SOFT = 'SOFT' REBOOT_HARD = 'HARD' class Instance(base.Resource): """An Instance is an opaque instance used to store Database instances.""" def list_databases(self): return self.manager.databases.list(self) def delete(self): """Delete the instance.""" self.manager.delete(self) def force_delete(self): """Force delete the instance""" self.manager.reset_status(self) self.manager.delete(self) def restart(self): """Restart the database instance.""" self.manager.restart(self.id) def detach_replica(self): """Stops the replica database from being replicated to.""" self.manager.edit(self.id, detach_replica_source=True) class DatastoreLog(base.Resource): """A DatastoreLog is a log on the database guest instance.""" def __repr__(self): return "<DatastoreLog: %s>" % self.name class Instances(base.ManagerWithFind): """Manage :class:`Instance` resources.""" resource_class = Instance def _get_swift_client(self): if hasattr(self.api.client, 'auth'): auth_url = self.api.client.auth.auth_url user = self.api.client.auth._username key = self.api.client.auth._password tenant_name = self.api.client.auth._project_name auth_version = "3.0" else: auth_url = self.api.client.auth_url user = self.api.client.username key = self.api.client.password tenant_name = self.api.client.tenant auth_version = "2.0" # remove '/tokens' from the end of auth_url so it works for swift token_str = "/tokens" if auth_url.endswith(token_str): auth_url = auth_url[:-len(token_str)] region_name = self.api.client.region_name os_options = {'tenant_name': tenant_name, 'region_name': region_name} return swift_client.Connection( auth_url, user, key, auth_version=auth_version, os_options=os_options) def create(self, name, flavor_id, volume=None, databases=None, users=None, restorePoint=None, availability_zone=None, datastore=None, datastore_version=None, nics=None, configuration=None, replica_of=None, replica_count=None, modules=None, locality=None, region_name=None): """Create (boot) a new instance.""" body = {"instance": { "name": name, "flavorRef": flavor_id }} datastore_obj = {} if volume: body["instance"]["volume"] = volume if databases: body["instance"]["databases"] = databases if users: body["instance"]["users"] = users if restorePoint: body["instance"]["restorePoint"] = restorePoint if availability_zone: body["instance"]["availability_zone"] = availability_zone if datastore: datastore_obj["type"] = datastore if datastore_version: datastore_obj["version"] = datastore_version if datastore_obj: body["instance"]["datastore"] = datastore_obj if nics: body["instance"]["nics"] = nics if configuration: body["instance"]["configuration"] = base.getid(configuration) if replica_of: body["instance"]["replica_of"] = base.getid(replica_of) if replica_count: body["instance"]["replica_count"] = replica_count if modules: body["instance"]["modules"] = self._get_module_list(modules) if locality: body["instance"]["locality"] = locality if region_name: body["instance"]["region_name"] = region_name return self._create("/instances", body, "instance") def modify(self, instance, configuration=None): body = { "instance": { } } if configuration is not None: body["instance"]["configuration"] = base.getid(configuration) url = "/instances/%s" % base.getid(instance) resp, body = self.api.client.put(url, body=body) common.check_for_exceptions(resp, body, url) def edit(self, instance, configuration=None, name=None, detach_replica_source=False, remove_configuration=False): body = { "instance": { } } if configuration and remove_configuration: raise Exception("Cannot attach and detach configuration " "simultaneously.") if remove_configuration: body["instance"]["configuration"] = None if configuration is not None: body["instance"]["configuration"] = base.getid(configuration) if name is not None: body["instance"]["name"] = name if detach_replica_source: body["instance"]["replica_of"] = None url = "/instances/%s" % base.getid(instance) resp, body = self.api.client.patch(url, body=body) common.check_for_exceptions(resp, body, url) def upgrade(self, instance, datastore_version): """Upgrades an instance with a new datastore version.""" body = { "instance": { "datastore_version": datastore_version } } url = "/instances/%s" % base.getid(instance) resp, body = self.api.client.patch(url, body=body) common.check_for_exceptions(resp, body, url) def list(self, limit=None, marker=None, include_clustered=False): """Get a list of all instances. :rtype: list of :class:`Instance`. """ return self._paginated("/instances", "instances", limit, marker, {"include_clustered": include_clustered}) def get(self, instance): """Get a specific instances. :rtype: :class:`Instance` """ return self._get("/instances/%s" % base.getid(instance), "instance") def backups(self, instance, limit=None, marker=None): """Get the list of backups for a specific instance. :param instance: instance for which to list backups :param limit: max items to return :param marker: marker start point :rtype: list of :class:`Backups`. """ url = "/instances/%s/backups" % base.getid(instance) return self._paginated(url, "backups", limit, marker) def delete(self, instance): """Delete the specified instance. :param instance: A reference to the instance to delete """ url = "/instances/%s" % base.getid(instance) resp, body = self.api.client.delete(url) common.check_for_exceptions(resp, body, url) def reset_status(self, instance): """Reset the status of an instance. :param instance: A reference to the instance """ body = {'reset_status': {}} self._action(instance, body) def force_delete(self, instance): """Force delete the specified instance. :param instance: A reference to the instance to force delete """ self.reset_status(instance) self.delete(instance) def _action(self, instance, body): """Perform a server "action" -- reboot/rebuild/resize/etc.""" url = "/instances/%s/action" % base.getid(instance) resp, body = self.api.client.post(url, body=body) common.check_for_exceptions(resp, body, url) if body: return self.resource_class(self, body, loaded=True) return body def resize_volume(self, instance, volume_size): """Resize the volume on an existing instances.""" body = {"resize": {"volume": {"size": volume_size}}} self._action(instance, body) def resize_instance(self, instance, flavor_id): """Resizes an instance with a new flavor.""" body = {"resize": {"flavorRef": flavor_id}} self._action(instance, body) def restart(self, instance): """Restart the database instance. :param instance: The :class:`Instance` (or its ID) of the database instance to restart. """ body = {'restart': {}} self._action(instance, body) def configuration(self, instance): """Get a configuration on instances. :rtype: :class:`Instance` """ return self._get("/instances/%s/configuration" % base.getid(instance), "instance") def promote_to_replica_source(self, instance): """Promote a replica to be the new replica_source of its set :param instance: The :class:`Instance` (or its ID) of the database instance to promote. """ body = {'promote_to_replica_source': {}} self._action(instance, body) def eject_replica_source(self, instance): """Eject a replica source from its set :param instance: The :class:`Instance` (or its ID) of the database instance to eject. """ body = {'eject_replica_source': {}} self._action(instance, body) def modules(self, instance): """Get the list of modules for a specific instance.""" return self._modules_get(instance) def module_query(self, instance): """Query an instance about installed modules.""" return self._modules_get(instance, from_guest=True) def module_retrieve(self, instance, directory=None, prefix=None): """Retrieve the module data file from an instance. This includes the contents of the module data file. """ if directory: try: os.makedirs(directory, exist_ok=True) except TypeError: # py27 try: os.makedirs(directory) except OSError: if not os.path.isdir(directory): raise else: directory = '.' prefix = prefix or '' if prefix and not prefix.endswith('_'): prefix += '_' module_list = self._modules_get( instance, from_guest=True, include_contents=True) saved_modules = {} for module in module_list: filename = '%s%s_%s_%s.dat' % (prefix, module.name, module.datastore, module.datastore_version) full_filename = os.path.expanduser( os.path.join(directory, filename)) with open(full_filename, 'wb') as fh: fh.write(utils.decode_data(module.contents)) saved_modules[module.name] = full_filename return saved_modules def _modules_get(self, instance, from_guest=None, include_contents=None): url = "/instances/%s/modules" % base.getid(instance) query_strings = {} if from_guest is not None: query_strings["from_guest"] = from_guest if include_contents is not None: query_strings["include_contents"] = include_contents url = common.append_query_strings(url, **query_strings) resp, body = self.api.client.get(url) common.check_for_exceptions(resp, body, url) return [core_modules.Module(self, module, loaded=True) for module in body['modules']] def module_apply(self, instance, modules): """Apply modules to an instance.""" url = "/instances/%s/modules" % base.getid(instance) body = {"modules": self._get_module_list(modules)} resp, body = self.api.client.post(url, body=body) common.check_for_exceptions(resp, body, url) return [core_modules.Module(self, module, loaded=True) for module in body['modules']] def _get_module_list(self, modules): """Build a list of module ids.""" module_list = [] for module in modules: module_info = {'id': base.getid(module)} module_list.append(module_info) return module_list def module_remove(self, instance, module): """Remove a module from an instance. """ url = "/instances/%s/modules/%s" % (base.getid(instance), base.getid(module)) resp, body = self.api.client.delete(url) common.check_for_exceptions(resp, body, url) def log_list(self, instance): """Get a list of all guest logs. :param instance: The :class:`Instance` (or its ID) of the database instance to get the log for. :rtype: list of :class:`DatastoreLog`. """ url = '/instances/%s/log' % base.getid(instance) resp, body = self.api.client.get(url) common.check_for_exceptions(resp, body, url) return [DatastoreLog(self, log, loaded=True) for log in body['logs']] def log_show(self, instance, log_name): return self._log_action(instance, log_name) def log_enable(self, instance, log_name): return self._log_action(instance, log_name, enable=True) def log_disable(self, instance, log_name, discard=None): return self._log_action(instance, log_name, disable=True, discard=discard) def log_publish(self, instance, log_name, disable=None, discard=None): return self._log_action(instance, log_name, disable=disable, publish=True, discard=discard) def log_discard(self, instance, log_name): return self._log_action(instance, log_name, discard=True) def _log_action(self, instance, log_name, enable=None, disable=None, publish=None, discard=None): """Perform action on guest log. :param instance: The :class:`Instance` (or its ID) of the database instance to get the log for. :param log_name: The name of <log> to publish :param enable: Turn on <log> :param disable: Turn off <log> :param publish: Publish log to associated container :param discard: Delete the associated container :rtype: List of :class:`DatastoreLog`. """ body = {"name": log_name} if enable: body.update({'enable': int(enable)}) if disable: body.update({'disable': int(disable)}) if publish: body.update({'publish': int(publish)}) if discard: body.update({'discard': int(discard)}) url = "/instances/%s/log" % base.getid(instance) resp, body = self.api.client.post(url, body=body) common.check_for_exceptions(resp, body, url) return DatastoreLog(self, body['log'], loaded=True) def _get_container_info(self, instance, log_name, publish): try: log_info = self._log_action(instance, log_name, publish=publish) container = log_info.container prefix = log_info.prefix metadata_file = log_info.metafile return container, prefix, metadata_file except swift_client.ClientException as ex: if ex.http_status == 404: raise exceptions.GuestLogNotFoundError() raise def log_generator(self, instance, log_name, publish=None, lines=50, swift=None): """Return generator to yield the last <lines> lines of guest log. :param instance: The :class:`Instance` (or its ID) of the database instance to get the log for. :param log_name: The name of <log> to publish :param publish: Publish updates before displaying log :param lines: Display last <lines> lines of log (0 for all lines) :param swift: Connection to swift :rtype: generator function to yield log as chunks. """ if not swift: swift = self._get_swift_client() def _log_generator(instance, log_name, publish, lines, swift): try: container, prefix, metadata_file = self._get_container_info( instance, log_name, publish) head, body = swift.get_container(container, prefix=prefix) log_obj_to_display = [] if lines: total_lines = lines partial_results = False parts = sorted(body, key=lambda obj: obj['last_modified'], reverse=True) for part in parts: obj_hdrs = swift.head_object(container, part['name']) obj_lines = int(obj_hdrs['x-object-meta-lines']) log_obj_to_display.insert(0, part) if obj_lines >= lines: partial_results = True break lines -= obj_lines if not partial_results: lines = total_lines part = log_obj_to_display.pop(0) hdrs, log_obj = swift.get_object(container, part['name']) log_by_lines = log_obj.splitlines() yield "\n".join(log_by_lines[-1 * lines:]) + "\n" else: log_obj_to_display = sorted( body, key=lambda obj: obj['last_modified']) for log_part in log_obj_to_display: headers, log_obj = swift.get_object(container, log_part['name']) yield log_obj except swift_client.ClientException as ex: if ex.http_status == 404: raise exceptions.GuestLogNotFoundError() raise return lambda: _log_generator(instance, log_name, publish, lines, swift) def log_save(self, instance, log_name, publish=None, filename=None): """Saves a guest log to a file. :param instance: The :class:`Instance` (or its ID) of the database instance to get the log for. :param log_name: The name of <log> to publish :param publish: Publish updates before displaying log :rtype: Filename to which log was saved """ written_file = filename or (instance.name + '-' + log_name + ".log") log_gen = self.log_generator(instance, log_name, publish, 0) with open(written_file, 'w') as f: for log_obj in log_gen(): f.write(log_obj) return written_file class InstanceStatus(object): ACTIVE = "ACTIVE" BLOCKED = "BLOCKED" BUILD = "BUILD" FAILED = "FAILED" REBOOT = "REBOOT" RESIZE = "RESIZE" SHUTDOWN = "SHUTDOWN" RESTART_REQUIRED = "RESTART_REQUIRED" PROMOTING = "PROMOTING" EJECTING = "EJECTING" LOGGING = "LOGGING"
the-stack_106_30766
# Contribution from @fredguth, https://github.com/fredguth/fastai_playground. from fastai.torch_core import * from fastai.callback import * from fastai.basic_train import * __all__ = ['TerminateOnNaNCallback', 'EarlyStoppingCallback', 'SaveModelCallback', 'TrackerCallback', 'ReduceLROnPlateauCallback' ] class TerminateOnNaNCallback(Callback): "A `Callback` that terminates training if loss is NaN." def __init__(self): self.stop = False def on_batch_end(self, last_loss, epoch, num_batch, **kwargs:Any)->None: "Test if `last_loss` is NaN and interrupts training." if self.stop: return True #to skip validation after stopping during traning if torch.isnan(last_loss): print (f'Epoch/Batch ({epoch}/{num_batch}): Invalid loss, terminating training.') self.stop = True return True def on_epoch_end(self, **kwargs:Any)->None: return self.stop @dataclass class TrackerCallback(LearnerCallback): "A `LearnerCallback` that keeps track of the best value in `monitor`." monitor:str='val_loss' mode:str='auto' def __post_init__(self): if self.mode not in ['auto', 'min', 'max']: warn(f'{self.__class__} mode {self.mode} is invalid, falling back to "auto" mode.') self.mode = 'auto' mode_dict = {'min': np.less, 'max':np.greater} mode_dict['auto'] = np.less if 'loss' in self.monitor else np.greater self.operator = mode_dict[self.mode] def on_train_begin(self, **kwargs:Any)->None: "Initializes the best value." self.best = float('inf') if self.operator == np.less else -float('inf') def get_monitor_value(self): "Pick the monitored value." if self.monitor=='trn_loss' and len(self.learn.recorder.losses) == 0: return None elif len(self.learn.recorder.val_losses) == 0: return None values = {'trn_loss':self.learn.recorder.losses[-1:][0].cpu().numpy(), 'val_loss':self.learn.recorder.val_losses[-1:][0]} for i, name in enumerate(self.learn.recorder.names[3:]): values[name]=self.learn.recorder.metrics[-1:][0][i] if values.get(self.monitor) is None: warn(f'{self.__class__} conditioned on metric `{self.monitor}` which is not available. Available metrics are: {", ".join(map(str, self.learn.recorder.names[1:]))}') return values.get(self.monitor) @dataclass class EarlyStoppingCallback(TrackerCallback): "A `TrackerCallback` that terminates training when monitored quantity stops improving." min_delta:int=0 patience:int=0 def __post_init__(self): super().__post_init__() if self.operator == np.less: self.min_delta *= -1 def on_train_begin(self, **kwargs:Any)->None: "Initialize inner arguments." self.wait = 0 super().on_train_begin(**kwargs) def on_epoch_end(self, epoch, **kwargs:Any)->None: "Compare the value monitored to its best score and maybe stop training." current = self.get_monitor_value() if current is None: return if self.operator(current - self.min_delta, self.best): self.best,self.wait = current,0 else: self.wait += 1 if self.wait > self.patience: print(f'Epoch {epoch}: early stopping') return True @dataclass class SaveModelCallback(TrackerCallback): "A `TrackerCallback` that saves the model when monitored quantity is best." every:str='improvement' name:str='bestmodel' def __post_init__(self): if self.every not in ['improvement', 'epoch']: warn(f'SaveModel every {self.every} is invalid, falling back to "improvement".') self.every = 'improvement' super().__post_init__() def on_epoch_end(self, epoch, **kwargs:Any)->None: "Compare the value monitored to its best score and maybe save the model." if self.every=="epoch": self.learn.save(f'{self.name}_{epoch}') else: #every="improvement" current = self.get_monitor_value() if current is not None and self.operator(current, self.best): self.best = current self.learn.save(f'{self.name}') def on_train_end(self, **kwargs): "Load the best model." if self.every=="improvement" and (self.learn.path/f'{self.learn.model_dir}/{self.name}.pth').is_file(): self.learn.load(f'{self.name}') @dataclass class ReduceLROnPlateauCallback(TrackerCallback): "A `TrackerCallback` that reduces learning rate when a metric has stopped improving." patience:int=0 factor:float=0.2 min_delta:int=0 def __post_init__(self): super().__post_init__() if self.operator == np.less: self.min_delta *= -1 def on_train_begin(self, **kwargs:Any)->None: "Initialize inner arguments." self.wait, self.opt = 0, self.learn.opt super().on_train_begin(**kwargs) def on_epoch_end(self, epoch, **kwargs:Any)->None: "Compare the value monitored to its best and maybe reduce lr." current = self.get_monitor_value() if current is None: return if self.operator(current - self.min_delta, self.best): self.best,self.wait = current,0 else: self.wait += 1 if self.wait > self.patience: self.opt.lr *= self.factor self.wait = 0 print(f'Epoch {epoch}: reducing lr to {self.opt.lr}')
the-stack_106_30767
import os import sys from setuptools import setup, find_packages os.environ["DJANGO_SETTINGS_MODULE"] = "test_project.settings" # Add test_plus to Python path BASE_DIR = os.path.dirname(__file__) sys.path.insert(0, os.path.join(BASE_DIR, "test_project")) f = open(os.path.join(BASE_DIR, "README.md")) readme = f.read() f.close() setup( name="django-test-plus", version="1.3.1", description="django-test-plus provides useful additions to Django's default TestCase", long_description=readme, long_description_content_type="text/markdown", author="Frank Wiles", author_email="[email protected]", url="https://github.com/revsys/django-test-plus/", include_package_data=True, packages=find_packages(), zip_safe=False, entry_points={"pytest11": ["test_plus = test_plus.plugin"]}, setup_requires=["pytest-runner", "pytest-django"], tests_require=[ "pytest<5.0", "pytest-django==3.5.1", "pytest-cov==2.7.1", "pytest-pythonpath==0.7.3", "factory-boy>=2.11.1", ], classifiers=[ "Development Status :: 5 - Production/Stable", "Environment :: Web Environment", "Framework :: Django", "Framework :: Django :: 1.11", "Framework :: Django :: 2.0", "Framework :: Django :: 2.1", "Framework :: Django :: 2.2", "Framework :: Pytest", "Intended Audience :: Developers", "License :: OSI Approved :: BSD License", "Operating System :: OS Independent", "Programming Language :: Python :: 2", "Programming Language :: Python :: 2.7", "Programming Language :: Python :: 3", "Programming Language :: Python :: 3.4", "Programming Language :: Python :: 3.5", "Programming Language :: Python :: 3.6", "Programming Language :: Python :: 3.7", "Programming Language :: Python :: 3.8", ], )
the-stack_106_30768
from os import path, mkdir from time import sleep from colorama import init from src.generator import main from src.interface.menu import color_text, main_menu from requests import get from re import findall __version__ = "1.0.3" def choose(valid): while True: try: entry = str(input(valid)).strip().lower()[0] except IndexError: pass else: if entry == 'y': return True elif entry == 'n': return False else: print(color_text('yellow', 'Choose between the two options!')) pass def update(): try: r = get("https://raw.githubusercontent.com/Godofcoffe/CWL/main/main.py") remote_version = str(findall('__version__ = "(.*)"', r.text)[0]) local_version = __version__ if remote_version != local_version: print(color_text('yellow', "Update Available!\n" + f"You are running version {local_version}. Version {remote_version} " f"is available at https://github.com/Godofcoffe/CWL")) except Exception as error: print(color_text('red', f"A problem occured while checking for an update: {error}")) # saida de arquivos .txt OUT_TXT = 'characters.txt' OUT_NUM = 'numbers.txt' OUT_WIFI = 'WiFi.txt' OUT_ROT = 'Router.txt' folder = 'well/' init() if not path.exists(folder): mkdir(folder) while True: print(color_text("red", r""" _________ __ __.____ \_ ___ \/ \ / \ | / \ \/\ \/\/ / | \ \____\ /| |___ \______ / \__/\ / |_______ \ \/ \/ \/ """)) update() opc = main_menu(['Standard', 'Wifi']) # A opรงรฃo padrรฃo pode ser usada para forรงa bruta em contas que usam apps de geraรงรฃo de senha # A opรงรฃo wifi se aplica a senhas de seguranรงa baixa, # como nรบmeros de telefone ou nomes de pessoas com datas ou nรบmeros aleรกtorios. # ou quando voce sabe pelo menos um pedaรงo da senha if opc == 1: while True: symbols = choose(color_text("white", 'Do you want to add symbols? [y/n]: ')) cap_letters = choose(color_text("white", 'Do you want to add capital letters? [y/n]: ')) numbers = choose(color_text("white", 'Want it to contain numbers? [y/n]: ')) print() print(color_text("yellow", "Procedures:")) print(f"{color_text('green', 'symbols')}: {symbols}") print(f"{color_text('green', 'capital letters')}: {cap_letters}") print(f"{color_text('green', 'numbers')}: {numbers}") print() if choose('Continue...? [y/n]: '): if symbols and cap_letters and numbers: main(folder + OUT_TXT, uppers=True, numbers=True, symbols=True) elif not symbols and not cap_letters and not numbers: main(folder + OUT_TXT) elif not symbols and cap_letters and not numbers: main(folder + OUT_TXT, uppers=True) elif symbols and not cap_letters and not numbers: main(folder + OUT_TXT, symbols=True) elif not symbols and not cap_letters and numbers: main(folder + OUT_TXT, numbers=True) elif symbols and cap_letters and not numbers: main(folder + OUT_TXT, symbols=True, uppers=True) elif symbols and not cap_letters and numbers: main(folder + OUT_TXT, symbols=True, numbers=True) break else: break elif opc == 2: opc2 = main_menu(['Numbers', 'Keyword', 'Default password']) if opc2 == 1: main(folder + OUT_NUM, only_num=True) elif opc2 == 2: selection = choose('Do you want the word to be at the beginning? [y/n]: ') name = str(input('What is the word: ')) print('Spaces will be filled with random characters...') sleep(3) spaces_num = choose('Do you want spaces to be numbers? [y/n]: ') print() print(color_text("yellow", "Procedures:")) print(f"{color_text('green', 'Word at the beginning')}: {selection}") print(f"{color_text('green', 'Word')}: {name}") print(f"{color_text('green', 'numbers')}: {spaces_num}") print() if choose('Continue...? [y/n]: '): if selection and spaces_num: main(folder + OUT_WIFI, word=name, position=selection, numbers=spaces_num) elif selection and not spaces_num: main(folder + OUT_WIFI, word=name, position=selection) elif not selection and spaces_num: main(folder + OUT_WIFI, word=name, numbers=spaces_num) elif opc2 == 3: # A diferenรงa aqui que em vez de 8 caracteres serรฃo 10. # aqui serรก usado normalmente para roteadores com senhas SSIDs de fabrica main(folder + OUT_ROT, limit=10) elif opc2 == 4: print(color_text('white', 'exiting...')) sleep(1) break elif opc == 3: print(color_text('white', 'exiting...')) sleep(1) break
the-stack_106_30772
# -*- coding: utf-8 -*- # Copyright 2019-2021 CERN # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # # Authors: # - Jaroslav Guenther <[email protected]>, 2019-2020 # - Patrick Austin <[email protected]>, 2020 # - Benedikt Ziemons <[email protected]>, 2021 import json import random import subprocess import time import traceback from datetime import datetime, timedelta from math import floor from urllib.parse import urlparse, parse_qs from jwkest.jws import JWS from jwkest.jwt import JWT from oic import rndstr from oic.oauth2.message import CCAccessTokenRequest from oic.oic import Client, Grant, Token, REQUEST2ENDPOINT from oic.oic.message import (AccessTokenResponse, AuthorizationResponse, Message, RegistrationResponse) from oic.utils import time_util from oic.utils.authn.client import CLIENT_AUTHN_METHOD from sqlalchemy import and_ from sqlalchemy.sql.expression import true from rucio.common.config import config_get, config_get_int from rucio.common.exception import (CannotAuthenticate, CannotAuthorize, RucioException) from rucio.common.utils import all_oidc_req_claims_present, build_url, val_to_space_sep_str from rucio.core.account import account_exists from rucio.core.identity import exist_identity_account, get_default_account from rucio.core.monitor import record_counter, record_timer from rucio.db.sqla import filter_thread_work from rucio.db.sqla import models from rucio.db.sqla.constants import IdentityType from rucio.db.sqla.session import read_session, transactional_session # worokaround for a bug in pyoidc (as of Dec 2019) REQUEST2ENDPOINT['CCAccessTokenRequest'] = 'token_endpoint' # private/protected file containing Rucio Client secrets known to the Identity Provider as well IDPSECRETS = config_get('oidc', 'idpsecrets', False) ADMIN_ISSUER_ID = config_get('oidc', 'admin_issuer', False) EXPECTED_OIDC_AUDIENCE = config_get('oidc', 'expected_audience', False, 'rucio') EXPECTED_OIDC_SCOPE = config_get('oidc', 'expected_scope', False, 'openid profile') EXCHANGE_GRANT_TYPE = config_get('oidc', 'exchange_grant_type', False, 'urn:ietf:params:oauth:grant-type:token-exchange') REFRESH_LIFETIME_H = config_get_int('oidc', 'default_jwt_refresh_lifetime', False, 96) # TO-DO permission layer: if scope == 'wlcg.groups' # --> check 'profile' info (requested profile scope) def __get_rucio_oidc_clients(keytimeout=43200): """ Creates a Rucio OIDC Client instances per Identity Provider (IdP) according to etc/idpsecrets.json configuration file. Clients have to be pre-registered with the respective IdP with the appropriate settings: allowed to request refresh tokens which have lifetime set in their unverified header, allowed to request token exchange, immediate refresh tokens expiration after first use) :returns: Dictionary of {'https://issuer_1/': <Rucio OIDC Client_1 instance>, 'https://issuer_2/': <Rucio OIDC Client_2 instance>,}. In case of trouble, Exception is raised. """ clients = {} admin_clients = {} try: with open(IDPSECRETS) as client_secret_file: client_secrets = json.load(client_secret_file) except: return (clients, admin_clients) for iss in client_secrets: try: client_secret = client_secrets[iss] issuer = client_secret["issuer"] client = Client(client_authn_method=CLIENT_AUTHN_METHOD) # general parameter discovery about the Identity Provider via issuers URL client.provider_config(issuer) # storing client specific parameters into the client itself client_reg = RegistrationResponse(**client_secret) client.store_registration_info(client_reg) # setting public_key cache timeout to 'keytimeout' seconds keybundles = client.keyjar.issuer_keys[client.issuer] for keybundle in keybundles: keybundle.cache_time = keytimeout clients[issuer] = client # doing the same to store a Rucio Admin client # which has client credential flow allowed client_secret = client_secrets[iss]["SCIM"] client = Client(client_authn_method=CLIENT_AUTHN_METHOD) client.provider_config(issuer) client_reg = RegistrationResponse(**client_secret) client.store_registration_info(client_reg) admin_clients[issuer] = client except Exception as error: raise RucioException(error.args) return (clients, admin_clients) # Initialising Rucio OIDC Clients ALL_OIDC_CLIENTS = __get_rucio_oidc_clients() OIDC_CLIENTS = ALL_OIDC_CLIENTS[0] OIDC_ADMIN_CLIENTS = ALL_OIDC_CLIENTS[1] def __get_init_oidc_client(token_object=None, token_type=None, **kwargs): """ Get an OIDC client object, (re-)initialised with parameters corresponding to authorization flows used to get a token. For special cases - token refresh, token exchange - these parameters are being mocked as pyoidc library has to develop these areas. Initialisation can be made either by kwargs (for a authorization code flow e.g.) or via kwargs (for token exchange or token refresh). :param session_state: state value of the first authorization request :param token_object: DB token token to be included in a Grant for the token exchange or token refresh mechanisms :param token_type: e.g. "subject_token" for token exchange or "refresh_token" :param kwargs: optional strings which contain expected oauth session parameters: issuer_id/issuer, redirect_uri, redirect_to, state, nonce, code, scope, audience, :returns: if first_init == True: dict {'client': oidc client object, 'request': auth_url} for all other cases return oidc client object. If anything goes wrong, exception is thrown. """ try: auth_args = {"grant_types": ["authorization_code"], "response_type": "code", "state": kwargs.get('state', rndstr()), "nonce": kwargs.get('nonce', rndstr())} auth_args["scope"] = token_object.oidc_scope if token_object else kwargs.get('scope', " ") auth_args["audience"] = token_object.audience if token_object else kwargs.get('audience', " ") if token_object: issuer = token_object.identity.split(", ")[1].split("=")[1] oidc_client = OIDC_CLIENTS[issuer] auth_args["client_id"] = oidc_client.client_id token = '' if not token_type: token_type = kwargs.get('token_type', None) if token_type == 'subject_token': token = token_object.token if token_type == 'refresh_token': token = token_object.refresh_token if token_type and token: oidc_client.grant[auth_args['state']] = Grant() oidc_client.grant[auth_args['state']].grant_expiration_time = time_util.utc_time_sans_frac() + 300 resp = AccessTokenResponse() resp[token_type] = token oidc_client.grant[auth_args['state']].tokens.append(Token(resp)) else: secrets, client_secret = {}, {} try: with open(IDPSECRETS) as client_secret_file: secrets = json.load(client_secret_file) except: raise CannotAuthenticate("Rucio server is missing information from the idpsecrets.json file.") if 'issuer_id' in kwargs: client_secret = secrets[kwargs.get('issuer_id', ADMIN_ISSUER_ID)] elif 'issuer' in kwargs: client_secret = next((secrets[i] for i in secrets if 'issuer' in secrets[i] and # NOQA: W504 secrets[i]['issuer'] == kwargs.get('issuer')), None) redirect_url = kwargs.get('redirect_uri', None) if not redirect_url: redirect_to = kwargs.get("redirect_to", "auth/oidc_token") redirect_urls = [u for u in client_secret["redirect_uris"] if redirect_to in u] redirect_url = random.choice(redirect_urls) if not redirect_url: raise CannotAuthenticate("Could not pick any redirect URL(s) from the ones defined " + "in Rucio OIDC Client configuration file.") # NOQA: W503 auth_args["redirect_uri"] = redirect_url oidc_client = OIDC_CLIENTS[client_secret["issuer"]] auth_args["client_id"] = oidc_client.client_id if kwargs.get('first_init', False): auth_url = build_url(oidc_client.authorization_endpoint, params=auth_args) return {'redirect': redirect_url, 'auth_url': auth_url} oidc_client.construct_AuthorizationRequest(request_args=auth_args) # parsing the authorization query string by the Rucio OIDC Client (creates a Grant) oidc_client.parse_response(AuthorizationResponse, info='code=' + kwargs.get('code', rndstr()) + '&state=' + auth_args['state'], sformat="urlencoded") return {'client': oidc_client, 'state': auth_args['state']} except Exception: raise CannotAuthenticate(traceback.format_exc()) @transactional_session def get_auth_oidc(account, session=None, **kwargs): """ Assembles the authorization request of the Rucio Client tailored to the Rucio user & Identity Provider. Saves authentication session parameters in the oauth_requests DB table (for later use-cases). This information is saved for the token lifetime of a token to allow token exchange and refresh. Returns authorization URL as a string or a redirection url to be used in user's browser for authentication. :param account: Rucio Account identifier as a string. :param auth_scope: space separated list of scope names. Scope parameter defines which user's info the user allows to provide to the Rucio Client. :param audience: audience for which tokens are requested (EXPECTED_OIDC_AUDIENCE is the default) :param auto: If True, the function will return authorization URL to the Rucio Client which will log-in with user's IdP credentials automatically. Also it will instruct the IdP to return an AuthZ code to another Rucio REST endpoint /oidc_token. If False, the function will return a URL to be used by the user in the browser in order to authenticate via IdP (which will then return with AuthZ code to /oidc_code REST endpoint). :param polling: If True, '_polling' string will be appended to the access_msg in the DB oauth_requests table to inform the authorization stage that the Rucio Client is polling the server for a token (and no fetchcode needs to be returned at the end). :param refresh_lifetime: specifies how long the OAuth daemon should be refreshing this token. Default is 96 hours. :param ip: IP address of the client as a string. :param session: The database session in use. :returns: User & Rucio OIDC Client specific Authorization or Redirection URL as a string OR a redirection url to be used in user's browser for authentication. """ # TO-DO - implement a check if that account already has a valid # token withthe required scope and audience and return such token ! auth_scope = kwargs.get('auth_scope', EXPECTED_OIDC_SCOPE) if not auth_scope: auth_scope = EXPECTED_OIDC_SCOPE audience = kwargs.get('audience', EXPECTED_OIDC_AUDIENCE) if not audience: audience = EXPECTED_OIDC_AUDIENCE # checking that minimal audience and scope requirements (required by Rucio) are satisfied ! if not all_oidc_req_claims_present(auth_scope, audience, EXPECTED_OIDC_SCOPE, EXPECTED_OIDC_AUDIENCE): raise CannotAuthenticate("Requirements of scope and audience do not satisfy minimal requirements of the Rucio server.") issuer_id = kwargs.get('issuer', ADMIN_ISSUER_ID) if not issuer_id: issuer_id = ADMIN_ISSUER_ID auto = kwargs.get('auto', False) polling = kwargs.get('polling', False) refresh_lifetime = kwargs.get('refresh_lifetime', REFRESH_LIFETIME_H) ip = kwargs.get('ip', None) webhome = kwargs.get('webhome', None) # For webui a mock account will be used here and default account # will be assigned to the identity during get_token_oidc if account.external == 'webui': pass else: # Make sure the account exists if not account_exists(account, session=session): return None try: start = time.time() # redirect_url needs to be specified & one of those defined # in the Rucio OIDC Client configuration redirect_to = "auth/oidc_code" if auto: redirect_to = "auth/oidc_token" # random strings in order to keep track of responses to outstanding requests (state) # and to associate a client session with an ID Token and to mitigate replay attacks (nonce). state, nonce = rndstr(50), rndstr(50) # in the following statement we retrieve the authorization endpoint # from the client of the issuer and build url oidc_dict = __get_init_oidc_client(issuer_id=issuer_id, redirect_to=redirect_to, state=state, nonce=nonce, scope=auth_scope, audience=audience, first_init=True) auth_url = oidc_dict['auth_url'] redirect_url = oidc_dict['redirect'] # redirect code is put in access_msg and returned to the user (if auto=False) access_msg = None if not auto: access_msg = rndstr(23) if polling: access_msg += '_polling' if auto and webhome: access_msg = str(webhome) # Making sure refresh_lifetime is an integer or None. if refresh_lifetime: refresh_lifetime = int(refresh_lifetime) # Specifying temporarily 5 min lifetime for the authentication session. expired_at = datetime.utcnow() + timedelta(seconds=300) # saving session parameters into the Rucio DB oauth_session_params = models.OAuthRequest(account=account, state=state, nonce=nonce, access_msg=access_msg, redirect_msg=auth_url, expired_at=expired_at, refresh_lifetime=refresh_lifetime, ip=ip) oauth_session_params.save(session=session) # If user selected authentication via web browser, a redirection # URL is returned instead of the direct URL pointing to the IdP. if not auto: auth_server = urlparse(redirect_url) auth_url = build_url('https://' + auth_server.netloc, path='auth/oidc_redirect', params=access_msg) record_counter(counters='IdP_authentication.request') record_timer(stat='IdP_authentication.request', time=time.time() - start) return auth_url except Exception: raise CannotAuthenticate(traceback.format_exc()) @transactional_session def get_token_oidc(auth_query_string, ip=None, session=None): """ After Rucio User got redirected to Rucio /auth/oidc_token (or /auth/oidc_code) REST endpoints with authz code and session state encoded within the URL. These parameters are used to eventually gets user's info and tokens from IdP. :param auth_query_string: IdP redirection URL query string (AuthZ code & user session state). :param ip: IP address of the client as a string. :param session: The database session in use. :returns: One of the following tuples: ("fetchcode", <code>); ("token", <token>); ("polling", True); The result depends on the authentication strategy being used (no auto, auto, polling). """ try: start = time.time() parsed_authquery = parse_qs(auth_query_string) state = parsed_authquery["state"][0] code = parsed_authquery["code"][0] # getting oauth request params from the oauth_requests DB Table oauth_req_params = session.query(models.OAuthRequest).filter_by(state=state).first() if oauth_req_params is None: raise CannotAuthenticate("User related Rucio OIDC session could not keep " + "track of responses from outstanding requests.") # NOQA: W503 req_url = urlparse(oauth_req_params.redirect_msg) issuer = 'https://' + req_url.netloc + '/' req_params = parse_qs(req_url.query) for key in req_params: req_params[key] = val_to_space_sep_str(req_params[key]) oidc_client = __get_init_oidc_client(issuer=issuer, code=code, **req_params)['client'] record_counter(counters='IdP_authentication.code_granted') # exchange access code for a access token oidc_tokens = oidc_client.do_access_token_request(state=state, request_args={"code": code}, authn_method="client_secret_basic") if 'error' in oidc_tokens: raise CannotAuthorize(oidc_tokens['error']) # mitigate replay attacks nonce = oauth_req_params.nonce if oidc_tokens['id_token']['nonce'] != nonce: raise CannotAuthenticate("ID token could not be associated with the Rucio OIDC Client" + " session. This points to possible replay attack !") # NOQA: W503 # starting to fill dictionary with parameters for token DB row jwt_row_dict, extra_dict = {}, {} jwt_row_dict['identity'] = oidc_identity_string(oidc_tokens['id_token']['sub'], oidc_tokens['id_token']['iss']) jwt_row_dict['account'] = oauth_req_params.account if jwt_row_dict['account'].external == 'webui': try: jwt_row_dict['account'] = get_default_account(jwt_row_dict['identity'], IdentityType.OIDC, True, session=session) except Exception: return {'webhome': None, 'token': None} # check if given account has the identity registered if not exist_identity_account(jwt_row_dict['identity'], IdentityType.OIDC, jwt_row_dict['account'], session=session): raise CannotAuthenticate("OIDC identity '%s' of the '%s' account is unknown to Rucio." % (jwt_row_dict['identity'], str(jwt_row_dict['account']))) record_counter(counters='IdP_authentication.success') # get access token expiry timestamp jwt_row_dict['lifetime'] = datetime.utcnow() + timedelta(seconds=oidc_tokens['expires_in']) # get audience and scope info from the token if 'scope' in oidc_tokens and 'audience' in oidc_tokens: jwt_row_dict['authz_scope'] = val_to_space_sep_str(oidc_tokens['scope']) jwt_row_dict['audience'] = val_to_space_sep_str(oidc_tokens['audience']) elif 'access_token' in oidc_tokens: try: values = __get_keyvalues_from_claims(oidc_tokens['access_token'], ['scope', 'aud']) jwt_row_dict['authz_scope'] = values['scope'] jwt_row_dict['audience'] = values['aud'] except Exception: # we assume the Identity Provider did not do the right job here jwt_row_dict['authz_scope'] = None jwt_row_dict['audience'] = None # groups = oidc_tokens['id_token']['groups'] # nothing done with group info for the moment - TO-DO ! # collect extra token DB row parameters extra_dict = {} extra_dict['ip'] = ip extra_dict['state'] = state # In case user requested to grant Rucio a refresh token, # this token will get saved in the DB and an automatic refresh # for a specified period of time will be initiated (done by the Rucio daemon). if 'refresh_token' in oidc_tokens: extra_dict['refresh_token'] = oidc_tokens['refresh_token'] extra_dict['refresh'] = True try: extra_dict['refresh_lifetime'] = int(oauth_req_params.refresh_lifetime) except Exception: extra_dict['refresh_lifetime'] = REFRESH_LIFETIME_H try: values = __get_keyvalues_from_claims(oidc_tokens['refresh_token'], ['exp']) exp = values['exp'] extra_dict['refresh_expired_at'] = datetime.utcfromtimestamp(float(exp)) except Exception: # 4 day expiry period by default extra_dict['refresh_expired_at'] = datetime.utcnow() + timedelta(hours=REFRESH_LIFETIME_H) new_token = __save_validated_token(oidc_tokens['access_token'], jwt_row_dict, extra_dict=extra_dict, session=session) record_counter(counters='IdP_authorization.access_token.saved') if 'refresh_token' in oidc_tokens: record_counter(counters='IdP_authorization.refresh_token.saved') # In case authentication via browser was requested, # we save the token in the oauth_requests table if oauth_req_params.access_msg: # If Rucio Client waits for a fetchcode, we save the token under this code in the DB. if 'http' not in oauth_req_params.access_msg: if '_polling' not in oauth_req_params.access_msg: fetchcode = rndstr(50) session.query(models.OAuthRequest).filter(models.OAuthRequest.state == state)\ .update({models.OAuthRequest.access_msg: fetchcode, models.OAuthRequest.redirect_msg: new_token['token']}) # If Rucio Client was requested to poll the Rucio Auth server # for a token automatically, we save the token under a access_msg. else: session.query(models.OAuthRequest).filter(models.OAuthRequest.state == state)\ .update({models.OAuthRequest.access_msg: oauth_req_params.access_msg, models.OAuthRequest.redirect_msg: new_token['token']}) session.commit() if '_polling' in oauth_req_params.access_msg: return {'polling': True} elif 'http' in oauth_req_params.access_msg: return {'webhome': oauth_req_params.access_msg, 'token': new_token} else: return {'fetchcode': fetchcode} else: return {'token': new_token} record_timer(stat='IdP_authorization', time=time.time() - start) except Exception: # TO-DO catch different exceptions - InvalidGrant etc. ... record_counter(counters='IdP_authorization.access_token.exception') return None # raise CannotAuthenticate(traceback.format_exc()) @transactional_session def __get_admin_token_oidc(account, req_scope, req_audience, issuer, session=None): """ Get a token for Rucio application to act on behalf of itself. client_credential flow is used for this purpose. No refresh token is expected to be used. :param account: the Rucio Admin account name to be used (InternalAccount object expected) :param req_scope: the audience requested for the Rucio client's token :param req_audience: the scope requested for the Rucio client's token :param issuer: the Identity Provider nickname or the Rucio instance in use :param session: The database session in use. :returns: A dict with token and expires_at entries. """ try: oidc_client = OIDC_ADMIN_CLIENTS[issuer] args = {"client_id": oidc_client.client_id, "client_secret": oidc_client.client_secret, "grant_type": "client_credentials", "scope": req_scope, "audience": req_audience} # in the future should use oauth2 pyoidc client (base) instead oidc_tokens = oidc_client.do_any(request=CCAccessTokenRequest, request_args=args, response=AccessTokenResponse) if 'error' in oidc_tokens: raise CannotAuthorize(oidc_tokens['error']) record_counter(counters='IdP_authentication.rucio_admin_token_granted') # save the access token in the Rucio DB if 'access_token' in oidc_tokens: validate_dict = __get_rucio_jwt_dict(oidc_tokens['access_token'], account=account, session=session) if validate_dict: record_counter(counters='IdP_authentication.success') new_token = __save_validated_token(oidc_tokens['access_token'], validate_dict, extra_dict={}, session=session) record_counter(counters='IdP_authorization.access_token.saved') return new_token return None # raise RucioException("Rucio could not get a valid admin token from the Identity Provider.") return None # raise RucioException("Rucio could not get its admin access token from the Identity Provider.") except Exception: # TO-DO catch different exceptions - InvalidGrant etc. ... record_counter(counters='IdP_authorization.access_token.exception') return None # raise CannotAuthenticate(traceback.format_exc()) @read_session def __get_admin_account_for_issuer(session=None): """ Gets admin account for the IdP issuer :returns : dictionary { 'issuer_1': (account, identity), ... } """ issuer_account_dict = {} for issuer in OIDC_ADMIN_CLIENTS: admin_identity = oidc_identity_string(OIDC_ADMIN_CLIENTS[issuer].client_id, issuer) admin_account = session.query(models.IdentityAccountAssociation)\ .filter_by(identity_type=IdentityType.OIDC, identity=admin_identity).first() issuer_account_dict[issuer] = (admin_account.account, admin_identity) return issuer_account_dict @transactional_session def get_token_for_account_operation(account, req_audience=None, req_scope=None, admin=False, session=None): """ Looks-up a JWT token with the required scope and audience claims with the account OIDC issuer. If tokens are found, and none contains the requested audience and scope a new token is requested (via token exchange or client credential grants in case admin = True) :param account: Rucio account name in order to lookup the issuer and corresponding valid tokens :param req_audience: audience required to be present in the token (e.g. 'fts:atlas') :param req_scope: scope requested to be present in the token (e.g. fts:submit-transfer) :param admin: If True tokens will be requested for the Rucio admin root account, preferably with the same issuer as the requesting account OIDC identity :param session: DB session in use :return: token dictionary or None, throws an exception in case of problems """ try: if not req_scope: req_scope = EXPECTED_OIDC_SCOPE if not req_audience: req_audience = EXPECTED_OIDC_AUDIENCE # get all identities for the corresponding account identities_list = session.query(models.IdentityAccountAssociation.identity) \ .filter(models.IdentityAccountAssociation.identity_type == IdentityType.OIDC, models.IdentityAccountAssociation.account == account).all() identities = [] for identity in identities_list: identities.append(identity[0]) # get all active/valid OIDC tokens account_tokens = session.query(models.Token).filter(models.Token.identity.in_(identities), models.Token.account == account, models.Token.expired_at > datetime.utcnow()).with_for_update(skip_locked=True).all() # for Rucio Admin account we ask IdP for a token via client_credential grant # for each user account OIDC identity there is an OIDC issuer that must be, by construction, # supported by Rucio server (have OIDC admin client registered as well) # that is why we take the issuer of the account identity that has an active/valid token # and look for admin account identity which has this issuer assigned # requestor should always have at least one active subject token unless it is root # this is why we first discover if the requestor is root or not get_token_for_adminacc = False admin_identity = None admin_issuer = None admin_iss_acc_idt_dict = __get_admin_account_for_issuer(session=session) # check if preferred issuer exists - if multiple present last one is taken preferred_issuer = None for token in account_tokens: preferred_issuer = token.identity.split(", ")[1].split("=")[1] # loop through all OIDC identities registerd for the account of the requestor for identity in identities: issuer = identity.split(", ")[1].split("=")[1] # compare the account of the requestor with the account of the admin if account == admin_iss_acc_idt_dict[issuer][0]: # take first matching case which means root is requesting OIDC authentication admin_identity = admin_iss_acc_idt_dict[issuer][1] if preferred_issuer and preferred_issuer != issuer: continue else: admin_issuer = issuer get_token_for_adminacc = True break # Rucio admin account requesting OIDC token if get_token_for_adminacc: # openid scope is not supported for client_credentials auth flow - removing it if being asked for if 'openid' in req_scope: req_scope = req_scope.replace("openid", "").strip() # checking if there is not already a token to use admin_account_tokens = session.query(models.Token).filter(models.Token.account == account, models.Token.expired_at > datetime.utcnow()).all() for admin_token in admin_account_tokens: if hasattr(admin_token, 'audience') and hasattr(admin_token, 'oidc_scope') and\ all_oidc_req_claims_present(admin_token.oidc_scope, admin_token.audience, req_scope, req_audience): return token_dictionary(admin_token) # if not found request a new one new_admin_token = __get_admin_token_oidc(account, req_scope, req_audience, admin_issuer, session=session) return new_admin_token # Rucio server requests Rucio user to be represented by Rucio admin OIDC identity if admin and not get_token_for_adminacc: # we require any other account than admin to have valid OIDC token in the Rucio DB if not account_tokens: return None # we also require that these tokens at least one has the Rucio scopes and audiences valid_subject_token_exists = False for account_token in account_tokens: if all_oidc_req_claims_present(account_token.oidc_scope, account_token.audience, EXPECTED_OIDC_SCOPE, EXPECTED_OIDC_AUDIENCE): valid_subject_token_exists = True if not valid_subject_token_exists: return None # openid scope is not supported for client_credentials auth flow - removing it if being asked for if 'openid' in req_scope: req_scope = req_scope.replace("openid", "").strip() admin_account = None for account_token in account_tokens: # for each valid account token in the DB we need to check if a valid root token does not exist with the required # scope and audience admin_issuer = account_token.identity.split(", ")[1].split("=")[1] # assuming the requesting account is using Rucio supported IdPs, we check if any token of this admin identity # has already a token with the requested scopes and audiences admin_acc_idt_tuple = admin_iss_acc_idt_dict[admin_issuer] admin_account = admin_acc_idt_tuple[0] admin_identity = admin_acc_idt_tuple[1] admin_account_tokens = session.query(models.Token).filter(models.Token.identity == admin_identity, models.Token.account == admin_account, models.Token.expired_at > datetime.utcnow()).all() for admin_token in admin_account_tokens: if hasattr(admin_token, 'audience') and hasattr(admin_token, 'oidc_scope') and\ all_oidc_req_claims_present(admin_token.oidc_scope, admin_token.audience, req_scope, req_audience): return token_dictionary(admin_token) # if no admin token existing was found for the issuer of the valid user token # we request a new one new_admin_token = __get_admin_token_oidc(admin_account, req_scope, req_audience, admin_issuer, session=session) return new_admin_token # Rucio server requests exchange token for a Rucio user if not admin and not get_token_for_adminacc: # we require any other account than admin to have valid OIDC token in the Rucio DB if not account_tokens: return None # we also require that these tokens at least one has the Rucio scopes and audiences valid_subject_token_exists = False for account_token in account_tokens: if all_oidc_req_claims_present(account_token.oidc_scope, account_token.audience, EXPECTED_OIDC_SCOPE, EXPECTED_OIDC_AUDIENCE): valid_subject_token_exists = True if not valid_subject_token_exists: return None subject_token = None for token in account_tokens: if hasattr(token, 'audience') and hasattr(token, 'oidc_scope'): if all_oidc_req_claims_present(token.oidc_scope, token.audience, req_scope, req_audience): return token_dictionary(token) # from available tokens select preferentially the one which are being refreshed if hasattr(token, 'oidc_scope') and ('offline_access' in str(token['oidc_scope'])): subject_token = token # if not proceed with token exchange if not subject_token: subject_token = random.choice(account_tokens) exchanged_token = __exchange_token_oidc(subject_token, scope=req_scope, audience=req_audience, identity=subject_token.identity, refresh_lifetime=subject_token.refresh_lifetime, account=account, session=session) return exchanged_token return None except Exception: # raise CannotAuthorize(traceback.format_exc(), type(account), account) return None @transactional_session def __exchange_token_oidc(subject_token_object, session=None, **kwargs): """ Exchanged an access_token for a new one with different scope &/ audience providing that the scope specified is registered with IdP for the Rucio OIDC Client and the Rucio user has this scope linked to the subject token presented for the token exchange. :param subject_token_object: DB subject token to be exchanged :param kwargs: 'scope', 'audience', 'grant_type', 'ip' and 'account' doing the exchange :param session: The database session in use. :returns: A dict with token and expires_at entries. """ grant_type = kwargs.get('grant_type', EXCHANGE_GRANT_TYPE) jwt_row_dict, extra_dict = {}, {} jwt_row_dict['account'] = kwargs.get('account', '') jwt_row_dict['authz_scope'] = kwargs.get('scope', '') jwt_row_dict['audience'] = kwargs.get('audience', '') jwt_row_dict['identity'] = kwargs.get('identity', '') extra_dict['ip'] = kwargs.get('ip', None) # if subject token has offline access scope but *no* refresh token in the DB # (happens when user presents subject token acquired from other sources then Rucio CLI mechanism), # add offline_access scope to the token exchange request ! if 'offline_access' in str(subject_token_object.oidc_scope) and not subject_token_object.refresh_token: jwt_row_dict['authz_scope'] += ' offline_access' if not grant_type: grant_type = EXCHANGE_GRANT_TYPE try: start = time.time() record_counter(counters='IdP_authentication.code_granted') oidc_dict = __get_init_oidc_client(token_object=subject_token_object, token_type="subject_token") oidc_client = oidc_dict['client'] args = {"subject_token": subject_token_object.token, "scope": jwt_row_dict['authz_scope'], "audience": jwt_row_dict['audience'], "grant_type": grant_type} # exchange , access token for a new one oidc_token_response = oidc_dict['client'].do_any(Message, endpoint=oidc_client.provider_info["token_endpoint"], state=oidc_dict['state'], request_args=args, authn_method="client_secret_basic") oidc_tokens = oidc_token_response.json() if 'error' in oidc_tokens: raise CannotAuthorize(oidc_tokens['error']) # get audience and scope information if 'scope' in oidc_tokens and 'audience' in oidc_tokens: jwt_row_dict['authz_scope'] = val_to_space_sep_str(oidc_tokens['scope']) jwt_row_dict['audience'] = val_to_space_sep_str(oidc_tokens['audience']) elif 'access_token' in oidc_tokens: values = __get_keyvalues_from_claims(oidc_tokens['access_token'], ['scope', 'aud']) jwt_row_dict['authz_scope'] = values['scope'] jwt_row_dict['audience'] = values['aud'] jwt_row_dict['lifetime'] = datetime.utcnow() + timedelta(seconds=oidc_tokens['expires_in']) if 'refresh_token' in oidc_tokens: extra_dict['refresh_token'] = oidc_tokens['refresh_token'] extra_dict['refresh'] = True extra_dict['refresh_lifetime'] = kwargs.get('refresh_lifetime', REFRESH_LIFETIME_H) if extra_dict['refresh_lifetime'] is None: extra_dict['refresh_lifetime'] = REFRESH_LIFETIME_H try: values = __get_keyvalues_from_claims(oidc_tokens['refresh_token'], ['exp']) extra_dict['refresh_expired_at'] = datetime.utcfromtimestamp(float(values['exp'])) except Exception: # 4 day expiry period by default extra_dict['refresh_expired_at'] = datetime.utcnow() + timedelta(hours=REFRESH_LIFETIME_H) new_token = __save_validated_token(oidc_tokens['access_token'], jwt_row_dict, extra_dict=extra_dict, session=session) record_counter(counters='IdP_authorization.access_token.saved') if 'refresh_token' in oidc_tokens: record_counter(counters='IdP_authorization.refresh_token.saved') record_timer(stat='IdP_authorization.token_exchange', time=time.time() - start) return new_token except Exception: # raise CannotAuthorize(traceback.format_exc()) return None @transactional_session def __change_refresh_state(token, refresh=False, session=None): """ Changes token refresh state to True/False. :param token: the access token for which the refresh value should be changed. """ try: if refresh: # update refresh column for a token to True session.query(models.Token).filter(models.Token.token == token)\ .update({models.Token.refresh: True}) else: session.query(models.Token).filter(models.Token.token == token)\ .update({models.Token.refresh: False, models.Token.refresh_expired_at: datetime.utcnow()}) session.commit() except Exception as error: raise RucioException(error.args) @transactional_session def refresh_cli_auth_token(token_string, account, session=None): """ Checks if there is active refresh token and if so returns either active token with expiration timestamp or requests a new refresh and returns new access token. :param token_string: token string :param account: Rucio account for which token refresh should be considered :return: tuple of (access token, expiration epoch), None otherswise """ # only validated tokens are in the DB, check presence of token_string account_token = session.query(models.Token) \ .filter(models.Token.token == token_string, models.Token.account == account, models.Token.expired_at > datetime.utcnow()) \ .with_for_update(skip_locked=True).first() # if token does not exist in the DB, return None if account_token is None: return None # protection (!) no further action should be made # for token_string without refresh_token in the DB ! if account_token.refresh_token is None: return None # if the token exists, check if it was refreshed already, if not, refresh it if account_token.refresh: # protection (!) returning the same token if the token_string # is a result of a refresh which happened in the last 5 min datetime_min_ago = datetime.utcnow() - timedelta(seconds=300) if account_token.updated_at > datetime_min_ago: epoch_exp = int(floor((account_token.expired_at - datetime(1970, 1, 1)).total_seconds())) new_token_string = account_token.token return new_token_string, epoch_exp # asking for a refresh of this token new_token = __refresh_token_oidc(account_token, session=session) new_token_string = new_token['token'] epoch_exp = int(floor((new_token['expires_at'] - datetime(1970, 1, 1)).total_seconds())) return new_token_string, epoch_exp else: # find account token with the same scope, # audience and has a valid refresh token new_token = session.query(models.Token) \ .filter(models.Token.refresh == true(), models.Token.refresh_expired_at > datetime.utcnow(), models.Token.account == account, models.Token.expired_at > datetime.utcnow()) \ .with_for_update(skip_locked=True).first() if new_token is None: return None # if the new_token has same audience and scopes as the original # account_token --> return this token and exp timestamp to the user if all_oidc_req_claims_present(new_token.oidc_scope, new_token.audience, account_token.oidc_scope, account_token.audience): epoch_exp = int(floor((new_token.expired_at - datetime(1970, 1, 1)).total_seconds())) new_token_string = new_token.token return new_token_string, epoch_exp # if scopes and audience are not the same, return None return None @transactional_session def refresh_jwt_tokens(total_workers, worker_number, refreshrate=3600, limit=1000, session=None): """ Refreshes tokens which expired or will expire before (now + refreshrate) next run of this function and which have valid refresh token. :param total_workers: Number of total workers. :param worker_number: id of the executing worker. :param limit: Maximum number of tokens to refresh per call. :param session: Database session in use. :return: numper of tokens refreshed """ nrefreshed = 0 try: # get tokens for refresh that expire in the next <refreshrate> seconds expiration_future = datetime.utcnow() + timedelta(seconds=refreshrate) query = session.query(models.Token.token) \ .filter(and_(models.Token.refresh == true(), models.Token.refresh_expired_at > datetime.utcnow(), models.Token.expired_at < expiration_future))\ .order_by(models.Token.expired_at) query = filter_thread_work(session=session, query=query, total_threads=total_workers, thread_id=worker_number, hash_variable='token') # limiting the number of tokens for refresh query = query.limit(limit) filtered_tokens = [] for items in session.execute(query).partitions(10): tokens = tuple(map(lambda row: row.token, items)) filtered_tokens += session.query(models.Token) \ .filter(models.Token.token.in_(tokens)) \ .with_for_update(skip_locked=True) \ .all() # refreshing these tokens for token in filtered_tokens: new_token = __refresh_token_oidc(token, session=session) if new_token: nrefreshed += 1 except Exception as error: raise RucioException(error.args) return nrefreshed @transactional_session def __refresh_token_oidc(token_object, session=None): """ Requests new access and refresh tokens from the Identity Provider. Assumption: The Identity Provider issues refresh tokens for one time use only and with a limited lifetime. The refresh tokens are invalidated no matter which of these situations happens first. :param token_object: Rucio models.Token DB row object :returns: A dict with token and expires_at entries if all went OK, None if refresh was not possible due to token invalidity or refresh lifetime constraints. Otherwise, throws an an Exception. """ try: start = time.time() record_counter(counters='IdP_authorization.refresh_token.request') jwt_row_dict, extra_dict = {}, {} jwt_row_dict['account'] = token_object.account jwt_row_dict['identity'] = token_object.identity extra_dict['refresh_start'] = datetime.utcnow() # check if refresh token started in the past already if hasattr(token_object, 'refresh_start'): if token_object.refresh_start: extra_dict['refresh_start'] = token_object.refresh_start # check if refresh lifetime is set for the token extra_dict['refresh_lifetime'] = REFRESH_LIFETIME_H if token_object.refresh_lifetime: extra_dict['refresh_lifetime'] = token_object.refresh_lifetime # if the token has been refreshed for time exceeding # the refresh_lifetime, the attempt will be aborted and refresh stopped if datetime.utcnow() - extra_dict['refresh_start'] > timedelta(hours=extra_dict['refresh_lifetime']): __change_refresh_state(token_object.token, refresh=False, session=session) return None oidc_dict = __get_init_oidc_client(token_object=token_object, token_type="refresh_token") oidc_client = oidc_dict['client'] # getting a new refreshed set of tokens state = oidc_dict['state'] oidc_tokens = oidc_client.do_access_token_refresh(state=state) if 'error' in oidc_tokens: raise CannotAuthorize(oidc_tokens['error']) record_counter(counters='IdP_authorization.refresh_token.refreshed') # get audience and scope information if 'scope' in oidc_tokens and 'audience' in oidc_tokens: jwt_row_dict['authz_scope'] = val_to_space_sep_str(oidc_tokens['scope']) jwt_row_dict['audience'] = val_to_space_sep_str(oidc_tokens['audience']) elif 'access_token' in oidc_tokens: values = __get_keyvalues_from_claims(oidc_tokens['access_token'], ['scope', 'aud']) jwt_row_dict['authz_scope'] = values['scope'] jwt_row_dict['audience'] = values['aud'] # save new access and refresh tokens in the DB if 'refresh_token' in oidc_tokens and 'access_token' in oidc_tokens: # aborting refresh of the original token # (keeping it in place until it expires) __change_refresh_state(token_object.token, refresh=False, session=session) # get access token expiry timestamp jwt_row_dict['lifetime'] = datetime.utcnow() + timedelta(seconds=oidc_tokens['expires_in']) extra_dict['refresh'] = True extra_dict['refresh_token'] = oidc_tokens['refresh_token'] try: values = __get_keyvalues_from_claims(oidc_tokens['refresh_token'], ['exp']) extra_dict['refresh_expired_at'] = datetime.utcfromtimestamp(float(values['exp'])) except Exception: # 4 day expiry period by default extra_dict['refresh_expired_at'] = datetime.utcnow() + timedelta(hours=REFRESH_LIFETIME_H) new_token = __save_validated_token(oidc_tokens['access_token'], jwt_row_dict, extra_dict=extra_dict, session=session) record_counter(counters='IdP_authorization.access_token.saved') record_counter(counters='IdP_authorization.refresh_token.saved') else: raise CannotAuthorize("OIDC identity '%s' of the '%s' account is did not " % (token_object.identity, token_object.account) + "succeed requesting a new access and refresh tokens.") # NOQA: W503 record_timer(stat='IdP_authorization.refresh_token', time=time.time() - start) return new_token except Exception: record_counter(counters='IdP_authorization.refresh_token.exception') raise CannotAuthorize(traceback.format_exc()) @transactional_session def delete_expired_oauthrequests(total_workers, worker_number, limit=1000, session=None): """ Delete expired OAuth request parameters. :param total_workers: Number of total workers. :param worker_number: id of the executing worker. :param limit: Maximum number of oauth request session parameters to delete. :param session: Database session in use. :returns: number of deleted rows """ try: # get expired OAuth request parameters query = session.query(models.OAuthRequest.state).filter(models.OAuthRequest.expired_at < datetime.utcnow())\ .order_by(models.OAuthRequest.expired_at) query = filter_thread_work(session=session, query=query, total_threads=total_workers, thread_id=worker_number, hash_variable='state') # limiting the number of oauth requests deleted at once query = query.limit(limit) ndeleted = 0 for items in session.execute(query).partitions(10): states = tuple(map(lambda row: row.state, items)) ndeleted += session.query(models.OAuthRequest) \ .filter(models.OAuthRequest.state.in_(states)) \ .with_for_update(skip_locked=True) \ .delete(synchronize_session='fetch') except Exception as error: raise RucioException(error.args) return ndeleted def __get_keyvalues_from_claims(token, keys=None): """ Extracting claims from token, e.g. scope and audience. :param token: the JWT to be unpacked :param key: list of key names to extract from the token claims :returns: The list of unicode values under the key, throws an exception otherwise. """ resdict = {} try: claims = JWT().unpack(token).payload() if not keys: keys = claims.keys() for key in keys: value = '' if key in claims: value = val_to_space_sep_str(claims[key]) resdict[key] = value return resdict except Exception: raise CannotAuthenticate(traceback.format_exc()) @read_session def __get_rucio_jwt_dict(jwt, account=None, session=None): """ Get a Rucio token dictionary from token claims. Check token expiration and find default Rucio account for token identity. :param jwt: JSON Web Token to be inspected :param session: DB session in use :returns: Rucio token dictionary, None otherwise """ try: # getting token paylod token_payload = __get_keyvalues_from_claims(jwt) identity_string = oidc_identity_string(token_payload['sub'], token_payload['iss']) expiry_date = datetime.utcfromtimestamp(float(token_payload['exp'])) if expiry_date < datetime.utcnow(): # check if expired return None scope = None audience = None if 'scope' in token_payload: scope = val_to_space_sep_str(token_payload['scope']) if 'aud' in token_payload: audience = val_to_space_sep_str(token_payload['aud']) if not account: # this assumes token has been previously looked up in DB # before to be sure that we do not have the right account already in the DB ! account = get_default_account(identity_string, IdentityType.OIDC, True, session=session) else: if not exist_identity_account(identity_string, IdentityType.OIDC, account, session=session): return None value = {'account': account, 'identity': identity_string, 'lifetime': expiry_date, 'audience': audience, 'authz_scope': scope} return value except Exception: return None @transactional_session def __save_validated_token(token, valid_dict, extra_dict=None, session=None): """ Save JWT token to the Rucio DB. :param token: Authentication token as a variable-length string. :param valid_dict: Validation Rucio dictionary as the output of the __get_rucio_jwt_dict function :raises RucioException: on any error :returns: A dict with token and expires_at entries. """ try: if not extra_dict: extra_dict = {} new_token = models.Token(account=valid_dict.get('account', None), token=token, oidc_scope=valid_dict.get('authz_scope', None), expired_at=valid_dict.get('lifetime', None), audience=valid_dict.get('audience', None), identity=valid_dict.get('identity', None), refresh=extra_dict.get('refresh', False), refresh_token=extra_dict.get('refresh_token', None), refresh_expired_at=extra_dict.get('refresh_expired_at', None), refresh_lifetime=extra_dict.get('refresh_lifetime', None), refresh_start=extra_dict.get('refresh_start', None), ip=extra_dict.get('ip', None)) new_token.save(session=session) return token_dictionary(new_token) except Exception as error: raise RucioException(error.args) @transactional_session def validate_jwt(json_web_token, session=None): """ Verifies signature and validity of a JSON Web Token. Gets the issuer public keys from the oidc_client and verifies the validity of the token. Used only for external tokens, not known to Rucio DB. :param json_web_token: the JWT string to verify :returns: dictionary { account: <account name>, identity: <identity>, lifetime: <token lifetime>, audience: <audience>, authz_scope: <authz_scope> } if successful, None otherwise. """ try: # getting issuer from the token payload token_dict = __get_rucio_jwt_dict(json_web_token, session=session) if not token_dict: return None issuer = token_dict['identity'].split(", ")[1].split("=")[1] oidc_client = OIDC_CLIENTS[issuer] issuer_keys = oidc_client.keyjar.get_issuer_keys(issuer) JWS().verify_compact(json_web_token, issuer_keys) # if there is no audience and scope information, # try to get it from IdP introspection endpoint # TO-BE-REMOVED - once all IdPs support scope and audience in token claims !!! if not token_dict['authz_scope'] or not token_dict['audience']: clprocess = subprocess.Popen(['curl', '-s', '-L', '-u', '%s:%s' % (oidc_client.client_id, oidc_client.client_secret), '-d', 'token=%s' % (json_web_token), oidc_client.introspection_endpoint], shell=False, stdout=subprocess.PIPE) inspect_claims = json.loads(clprocess.communicate()[0]) try: token_dict['audience'] = inspect_claims['aud'] token_dict['authz_scope'] = inspect_claims['scope'] except: pass record_counter(counters='JSONWebToken.valid') # if token is valid and coming from known issuer --> check aud and scope and save it if unknown if token_dict['authz_scope'] and token_dict['audience']: if all_oidc_req_claims_present(token_dict['authz_scope'], token_dict['audience'], EXPECTED_OIDC_SCOPE, EXPECTED_OIDC_AUDIENCE): # save the token in Rucio DB giving the permission to use it for Rucio operations __save_validated_token(json_web_token, token_dict, session=session) else: return None else: return None record_counter(counters='JSONWebToken.saved') return token_dict except Exception: record_counter(counters='JSONWebToken.invalid') return None def oidc_identity_string(sub, iss): """ Transform IdP sub claim and issuers url into users identity string. :param sub: users SUB claim from the Identity Provider :param iss: issuer (IdP) https url :returns: OIDC identity string "SUB=<usersid>, ISS=https://iam-test.ch/" """ return 'SUB=' + str(sub) + ', ISS=' + str(iss) def token_dictionary(token: models.Token): return {'token': token.token, 'expires_at': token.expired_at}
the-stack_106_30774
import sys import os import json import xml.etree.ElementTree as ET START_BOUNDING_BOX_ID = 1 PRE_DEFINE_CATEGORIES = {} # If necessary, pre-define category and its id # PRE_DEFINE_CATEGORIES = {"aeroplane": 1, "bicycle": 2, "bird": 3, "boat": 4, # "bottle":5, "bus": 6, "car": 7, "cat": 8, "chair": 9, # "cow": 10, "diningtable": 11, "dog": 12, "horse": 13, # "motorbike": 14, "person": 15, "pottedplant": 16, # "sheep": 17, "sofa": 18, "train": 19, "tvmonitor": 20} PRE_DEFINE_CATEGORIES = {"car": 0} def get_xml_list(xml_dir, txt_path ='xml_list.txt'): xml_lists = os.listdir(xml_dir) f = open(txt_path, 'w') for xml_Subpath in xml_lists: f.write(xml_Subpath+"\n") f.close() def get(root, name): vars = root.findall(name) return vars def get_and_check(root, name, length): vars = root.findall(name) if len(vars) == 0: raise NotImplementedError('Can not find %s in %s.'%(name, root.tag)) if length > 0 and len(vars) != length: raise NotImplementedError('The size of %s is supposed to be %d, but is %d.'%(name, length, len(vars))) if length == 1: vars = vars[0] return vars def get_filename_as_int(filename): try: filename = filename.split('.')[0].strip() filename = str(filename) return filename except: raise NotImplementedError('Filename %s is supposed to be an integer.'%(filename)) def convert(anno_dirs, json_file): json_dict = {"images":[], "type": "instances", "annotations": [], "categories": []} categories = PRE_DEFINE_CATEGORIES bnd_id = START_BOUNDING_BOX_ID for anno_dir in os.listdir(anno_dirs): anno_dir_path = os.path.join(anno_dirs, anno_dir) for xml_name in os.listdir(anno_dir_path): xml_file_path = os.path.join(anno_dir_path, xml_name) print("Processing %s"%(xml_file_path)) tree = ET.parse(xml_file_path) root = tree.getroot() filename = get_and_check(root, 'filename', 1).text filename = anno_dir + '_' + filename ## The filename must be a number image_id = get_filename_as_int(filename) size = get_and_check(root, 'size', 1) width = int(get_and_check(size, 'width', 1).text) height = int(get_and_check(size, 'height', 1).text) image = {'file_name': filename, 'height': height, 'width': width, 'id':image_id} json_dict['images'].append(image) ## Cruuently we do not support segmentation # segmented = get_and_check(root, 'segmented', 1).text # assert segmented == '0' for obj in get(root, 'object'): difficult = bool(int(get_and_check(obj, 'difficult', 1).text)) if difficult: print(difficult) print('difficult') continue category = 'car' #่ฟ™้‡Œไธ็”จ่ฏปๅ–๏ผŒ็›ดๆŽฅๅ›บๅฎšไธบcar #assert category in categories category_id = categories[category] bndbox = get_and_check(obj, 'bndbox', 1) xmin = int(get_and_check(bndbox, 'xmin', 1).text) - 1 ymin = int(get_and_check(bndbox, 'ymin', 1).text) - 1 xmax = int(get_and_check(bndbox, 'xmax', 1).text) ymax = int(get_and_check(bndbox, 'ymax', 1).text) assert(xmax > xmin) assert(ymax > ymin) o_width = abs(xmax - xmin) o_height = abs(ymax - ymin) ann = {'area': o_width*o_height, 'iscrowd': 0, 'image_id': image_id, 'bbox':[xmin, ymin, o_width, o_height], 'category_id': category_id, 'id': bnd_id, 'ignore': 0, 'segmentation': []} json_dict['annotations'].append(ann) bnd_id = bnd_id + 1 for cate, cid in categories.items(): cat = {'supercategory': 'none', 'id': cid, 'name': cate} json_dict['categories'].append(cat) json_fp = open(json_file, 'w') json_str = json.dumps(json_dict) json_fp.write(json_str) json_fp.close() if __name__ == '__main__': anno_dirs = "/media/xyl/6418a039-786d-4cd8-b0bb-1ed36a649668/Datasets/sky_challenge_competition/Annotation_test" convert(anno_dirs, "ATR_sky_test.json")
the-stack_106_30775
# -*- coding: utf-8 -*- from __future__ import print_function import os import textwrap import logging from . import cli log = logging.getLogger(__name__) def is_module(directory): """A directory is a module if it contains an ``__init__.py`` file. """ return os.path.isdir(directory) and '__init__.py' in os.listdir(directory) def is_pysource(fname): """A file name is a python source file iff it ends with '.py' and doesn't start with a dot. """ return not fname.startswith('.') and fname.endswith('.py') def fname2modname(fname, package_root): subpath = os.path.splitext(fname)[0][len(package_root):] modname = subpath.lstrip(os.path.sep).replace(os.path.sep, '.') return modname def python_sources_below(directory, package=True): for root, dirs, files in os.walk(directory): if package and '__init__.py' not in files: continue dotdirs = [d for d in dirs if d.startswith('.')] for d in dotdirs: dirs.remove(d) if 'migrations' in dirs: dirs.remove('migrations') for fname in files: if is_pysource(fname): # and fname not in args['exclude']: if fname == '__init__.py': yield os.path.abspath(root) else: yield os.path.abspath(os.path.join(root, fname)) class DummyModule(object): """We create a file that imports the module to be investigated. """ def __init__(self, target, **args): self._legal_mnames = {} self.target = target self.fname = '_dummy_' + target.modpath.replace('.', '_') + '.py' self.absname = os.path.join(target.workdir, self.fname) log.debug("dummy-filename: %r (%s)", self.fname, self.absname) if target.is_module: cli.verbose(1, "target is a PACKAGE") with open(self.fname, 'w') as fp: for fname in python_sources_below(target.package_root): modname = fname2modname(fname, target.syspath_dir) self.print_import(fp, modname) elif target.is_dir: # FIXME?: not sure what the intended semantics was here, as it is # this will almost certainly not do the right thing... cli.verbose(1, "target is a DIRECTORY") with open(self.fname, 'w') as fp: for fname in os.listdir(target.dirname): if is_pysource(fname): self.print_import(fp, fname2modname(fname, '')) else: assert target.is_pysource cli.verbose(1, "target is a FILE") with open(self.fname, 'w') as fp: self.print_import(fp, target.modpath) def text(self): """Return the content of the dummy module. """ return open(self.fname).read() def legal_module_name(self, name): """Legal module names are dotted strings where each part is a valid Python identifier. (and not a keyword, and support unicode identifiers in Python3, ..) """ if name in self._legal_mnames: return self._legal_mnames[name] for part in name.split('.'): try: exec("%s = 42" % part, {}, {}) except: # pragma: nocover self._legal_mnames[name] = False return False self._legal_mnames[name] = True return True def print_header(self, fp): # pragma: nocover # we're not executing the file in fp, so really not necessary to # catch import errors print(textwrap.dedent(""" import sys import traceback """), file=fp) def print_import(self, fp, module): if not self.legal_module_name(module): log.warning("SKIPPING ILLEGAL MODULE_NAME: %s", module) return mparts = module.rsplit('.', 1) # we're not executing the file in fp, so really not necessary to # catch import errors if len(mparts) == 1: print(textwrap.dedent("""\ import {module} """).format(module=module), file=fp) else: print(textwrap.dedent("""\ from {prefix} import {mname} """).format(prefix=mparts[0], mname=mparts[1]), file=fp) # if len(mparts) == 1: # print(textwrap.dedent("""\ # import {module} # """).format(module=module)) # else: # print(textwrap.dedent("""\ # from {prefix} import {mname} # """).format(prefix=mparts[0], mname=mparts[1]))
the-stack_106_30776
import tensorflow as tf from modelv4tiny import yolov4tiny from utils.misc_utils import parse_anchors, load_weights num_class = 80 img_size = 416 weight_path = './data/darknet_weights_v4tiny/yolov4-tiny.weights' save_path = './data/darknet_weights_v4tiny/yolov4-tiny.ckpt' anchors = parse_anchors('./data/yolo_tiny_anchors.txt') model = yolov4tiny(80, anchors) with tf.Session() as sess: inputs = tf.placeholder(tf.float32, [1, img_size, img_size, 3]) with tf.variable_scope('yolov4tiny'): feature_map = model.forward(inputs) saver = tf.train.Saver(var_list=tf.global_variables(scope='yolov4tiny')) load_ops = load_weights(tf.global_variables(scope='yolov4tiny'), weight_path) sess.run(load_ops) saver.save(sess, save_path=save_path) print('TensorFlow model checkpoint has been saved to {}'.format(save_path))
the-stack_106_30779
import matplotlib matplotlib.use('TkAgg') import matplotlib.pyplot as plt from matplotlib.animation import FuncAnimation import agentframework import agentstorage import csv import random import argparse import tkinter import requests import bs4 # Quitter function from tkinter loop # From: https://stackoverflow.com/a/55206851 def quit_me() -> None: """Quit and destroy the tkinter mainloop on closing the model GUI window.""" print('Quitting model runner!') root.quit() root.destroy() # Updates agents one by one def update(frame_number) -> None: """ Update agent position and behaviour. Parameters ---------- frame_number : int Set automatically to equal number of model iterations, only used in animation. Returns ------- None """ fig.clear() # clears scatter points from earlier iteration random.shuffle(agents) # shuffle agents to eliminate position-based advantages for a in range(num_of_agents): agents[a].eat() for a in range(num_of_agents): agents[a].move() for a in range(num_of_agents): agents[a].share_with_neighbours(neighbourhood) for a in range(num_of_agents): agents[a].share_eater() for a in range(num_of_agents): agents[a].sick() # Plot agents on a scatterplot recursively adding points onto the environment raster (only on single model run) plt.imshow(environment) for b in range(num_of_agents): plt.scatter(agents[b].x, agents[b].y) # Create and display animation, write outputs def run() -> None: """Run the model animation and record output.""" # Defining animation part with stopping at num_of_iterations and no looping animation = FuncAnimation(fig, update, interval=1, repeat=False, frames=num_of_iterations) # Only draw result when there is no parameter sweeping canvas.draw() # Write environment to outfile env_writer('out.txt') # Print overall and one-by-one agent storage, record in storage.txt agentstorage.all_storage_writer(agents, 'storage.txt') agentstorage.agent_storage_writer(agents, 'storage.txt') # Print agents agent_printer(agents) # Read environment file to nested list def env_reader(infile: str) -> list: """ Read environment list from infile. Parameters ---------- infile : str Name of the file containing the environment list. Returns ------- list Nested (2D) list of the environment. """ with open(infile, 'r') as f: reader = csv.reader(f, quoting=csv.QUOTE_NONNUMERIC) # QUOTE_NONNUMERIC changes everything to float # For every row of the list, append every row element to an environment row, then append environment row to environment environment = [[value for value in row] for row in reader] return environment # Make agents based on num_of_agents def agent_maker(num_of_agents: int, environment: list, ys: list, xs: list) -> list: """ Create list of agents used for simulation. Parameters ---------- num_of_agents : int The desired number of agents, defaults to length of web agent list. environment : list Nested(2D) list of environment. ys : list List of y coordinates retrieved from the web. xs : list List of x coordinates retrieved from the web. Returns ------- list List of agentframework.Agent objects. """ # Create empty agents list agents = [] # For every agent, test if there is a coordinate pair from the web for it (not out of bound) for i in range(num_of_agents): if i < len(ys): y = ys[i] x = xs[i] agents.append(agentframework.Agent(i, environment, agents, y, x)) # If there is no coordinate pair (out of bounds), one is assigned randomly else: y = random.randint(0, max(ys)) x = random.randint(0, max(xs)) agents.append(agentframework.Agent(i, environment, agents, y, x)) return agents # Get agent starting coordinates from the web def web_scraper(url: str) -> tuple: """ Scrape the web for agent coordinates. Parameters ---------- url : str The URL of the agent coordinates data on the web. Returns ------- tuple Tuple of lists ([y coordinates], [x coordinates]). """ # Fetch the url and download the text the html site contains r = requests.get(url) content = r.text # Parse html text to make it selectable soup = bs4.BeautifulSoup(content, 'html.parser') # Find coordinate table table = soup.find(id='yxz') # Retrieve list of html tags containing x and y values ys_html = soup.find_all(attrs={'class': 'y'}) xs_html = soup.find_all(attrs={'class': 'x'}) # Extract and convert to integer the numbers from the tags ys_basic = [int(y.text) for y in ys_html] xs_basic = [int(x.text) for x in xs_html] # Scale the numbers to fill the environment list ys = [y * round(len(environment) / max(ys_basic)) for y in ys_basic] xs = [x * round(len(environment[0]) / max(xs_basic)) for x in xs_basic] return ys, xs # Writes environment file to out.txt def env_writer(outfile: str) -> None: """ Write environment list to text file after simulation. Parameters ---------- outfile : str Name of the output textfile. Returns ------- None """ # Write environment out at the end to a file with open(outfile, 'w') as f: writer = csv.writer(f) for line in environment: writer.writerow(line) # 3. Overwrite __str__ method of agents to print location and storage def agent_printer(agents: list) -> None: """ Print agent properties. Parameters ---------- agents : list List of agentframework.Agent objects. Returns ------- None """ for agent in agents: print(agent) # Create figure for animated plotting fig = plt.figure(figsize=(7, 7)) ax = fig.add_axes([0, 0, 1, 1]) ax.set_autoscale_on(False) # Does not scale automatically # Read environment list environment = env_reader('in.txt') # Scrape web for agent coordinate information ys, xs = web_scraper('https://www.geog.leeds.ac.uk/courses/computing/practicals/python/agent-framework/part9/data.html') # Create command-line functionality (needs positional arguments from command line to run) parser = argparse.ArgumentParser(description='Simulate random moving agents grazing a field and sharing food') # Add arguments parser.add_argument('--agents', help='Number of agents (integer)', type=int, required=False, default=len(ys)) parser.add_argument('--iterations', help='Number of iterations (integer)', type=int, required=False, default=100) parser.add_argument('--neighbourhood', help='Radius of agent communication zone (integer)', type=int, required=False, default=20) # Declare number of agents and iterations, along with neighbourhood size (all from argparse cmd) num_of_agents = parser.parse_args().agents num_of_iterations = parser.parse_args().iterations neighbourhood = parser.parse_args().neighbourhood # Append to agents list agents = agent_maker(num_of_agents, environment, ys, xs) # Create GUI canvas root = tkinter.Tk() root.protocol('WM_DELETE_WINDOW', quit_me) # exists program when window closed root.wm_title('Model') canvas = matplotlib.backends.backend_tkagg.FigureCanvasTkAgg(fig, master=root) # Plot fig on canvas canvas._tkcanvas.pack(side=tkinter.TOP, fill=tkinter.BOTH, expand=1) # Create menu with Run functionality menu_bar = tkinter.Menu(root) root.config(menu=menu_bar) model_menu = tkinter.Menu(menu_bar) menu_bar.add_cascade(label='Model', menu=model_menu) # Model button model_menu.add_command(label='Run model', command=run) # Run button in the drop-down list of Model # Initialise main loop root.mainloop()
the-stack_106_30780
# Copyright 2015 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Helpers to manipulate a tensor graph in python. """ from __future__ import absolute_import from __future__ import division from __future__ import print_function import copy import re import six from tensorflow.core.framework import attr_value_pb2 from tensorflow.core.framework import graph_pb2 from tensorflow.core.framework import node_def_pb2 from tensorflow.python.framework import dtypes from tensorflow.python.framework import ops from tensorflow.python.framework import tensor_util from tensorflow.python.platform import tf_logging as logging from tensorflow.python.util.tf_export import tf_export _VARIABLE_OPS = { "Assign", "AssignAdd", "AssignSub", "Queue", "ScatterAdd", "ScatterSub", "ScatterUpdate", "TruncatedNormal", "Variable", "VariableV2", } def _is_variable_op(op): """Returns true if 'op' refers to a Variable node.""" return op in _VARIABLE_OPS @deprecation.deprecated( date=None, instructions="Use `tf.compat.v1.graph_util.must_run_on_cpu`") @tf_export(v1=["graph_util.must_run_on_cpu"]) def must_run_on_cpu(node, pin_variables_on_cpu=False): """Returns True if the given node_def must run on CPU, otherwise False. Args: node: The node to be assigned to a device. Could be either an ops.Operation or NodeDef. pin_variables_on_cpu: If True, this function will return False if node_def represents a variable-related op. Returns: True if the given node must run on CPU, otherwise False. """ if isinstance(node, ops.Operation): node_def = node.node_def else: assert isinstance(node, node_def_pb2.NodeDef) node_def = node # If the op is a variable-related op, should we pin it on CPU? if pin_variables_on_cpu and _is_variable_op(node_def.op): return True # Constant operations producing a string or int32 must run on CPU. if node_def.op == "Const": # Get the value of the 'dtype' attr dtype = node_def.attr["dtype"].type if dtype == dtypes.string or dtype == dtypes.int32: return True if node_def.op in ["DynamicStitch", "ParallelDynamicStitch"]: dtype = node_def.attr["T"].type if dtype == dtypes.int32: # DynamicStitch on GPU only works for int32 values. return True if node_def.op in ["Cast"]: dtype = node_def.attr["SrcT"].type if dtype == dtypes.int32: # Cast on GPU does not works for int32 values. return True return False ################################################################################ # # device functions for use in with g.device(...) # ################################################################################ def _node_name(n): if n.startswith("^"): return n[1:] else: return n.split(":")[0] def _extract_graph_summary(graph_def): """Extracts useful information from the graph and returns them.""" name_to_input_name = {} # Keyed by the dest node name. name_to_node = {} # Keyed by node name. # Keeps track of node sequences. It is important to still output the # operations in the original order. name_to_seq_num = {} # Keyed by node name. seq = 0 for node in graph_def.node: n = _node_name(node.name) name_to_node[n] = node name_to_input_name[n] = [_node_name(x) for x in node.input] name_to_seq_num[n] = seq seq += 1 return name_to_input_name, name_to_node, name_to_seq_num def _assert_nodes_are_present(name_to_node, nodes): """Assert that nodes are present in the graph.""" for d in nodes: assert d in name_to_node, "%s is not in graph" % d def _bfs_for_reachable_nodes(target_nodes, name_to_input_name): """Breadth first search for reachable nodes from target nodes.""" nodes_to_keep = set() # Breadth first search to find all the nodes that we should keep. next_to_visit = target_nodes[:] while next_to_visit: node = next_to_visit[0] del next_to_visit[0] if node in nodes_to_keep: # Already visited this node. continue nodes_to_keep.add(node) if node in name_to_input_name: next_to_visit += name_to_input_name[node] return nodes_to_keep @deprecation.deprecated( date=None, instructions="Use `tf.compat.v1.graph_util.extract_sub_graph`") @tf_export(v1=["graph_util.extract_sub_graph"]) def extract_sub_graph(graph_def, dest_nodes): """Extract the subgraph that can reach any of the nodes in 'dest_nodes'. Args: graph_def: A graph_pb2.GraphDef proto. dest_nodes: A list of strings specifying the destination node names. Returns: The GraphDef of the sub-graph. Raises: TypeError: If 'graph_def' is not a graph_pb2.GraphDef proto. """ if not isinstance(graph_def, graph_pb2.GraphDef): raise TypeError("graph_def must be a graph_pb2.GraphDef proto.") if isinstance(dest_nodes, six.string_types): raise TypeError("dest_nodes must be a list.") name_to_input_name, name_to_node, name_to_seq_num = _extract_graph_summary( graph_def) _assert_nodes_are_present(name_to_node, dest_nodes) nodes_to_keep = _bfs_for_reachable_nodes(dest_nodes, name_to_input_name) nodes_to_keep_list = sorted( list(nodes_to_keep), key=lambda n: name_to_seq_num[n]) # Now construct the output GraphDef out = graph_pb2.GraphDef() for n in nodes_to_keep_list: out.node.extend([copy.deepcopy(name_to_node[n])]) out.library.CopyFrom(graph_def.library) out.versions.CopyFrom(graph_def.versions) return out @deprecation.deprecated( date=None, instructions="Use `tf.compat.v1.graph_util.tensor_shape_from_node_def_name`" ) @tf_export(v1=["graph_util.tensor_shape_from_node_def_name"]) def tensor_shape_from_node_def_name(graph, input_name): """Convenience function to get a shape from a NodeDef's input string.""" # To get a tensor, the name must be in the form <input>:<port>, for example # 'Mul:0'. The GraphDef input strings don't always have the port specified # though, so if there isn't a colon we need to add a default ':0' to the end. if ":" not in input_name: canonical_name = input_name + ":0" else: canonical_name = input_name tensor = graph.get_tensor_by_name(canonical_name) shape = tensor.get_shape() return shape @deprecation.deprecated( date=None, instructions="Use `tf.compat.v1.graph_util.convert_variables_to_constants`") @tf_export(v1=["graph_util.convert_variables_to_constants"]) def convert_variables_to_constants(sess, input_graph_def, output_node_names, variable_names_whitelist=None, variable_names_blacklist=None): """Replaces all the variables in a graph with constants of the same values. If you have a trained graph containing Variable ops, it can be convenient to convert them all to Const ops holding the same values. This makes it possible to describe the network fully with a single GraphDef file, and allows the removal of a lot of ops related to loading and saving the variables. Args: sess: Active TensorFlow session containing the variables. input_graph_def: GraphDef object holding the network. output_node_names: List of name strings for the result nodes of the graph. variable_names_whitelist: The set of variable names to convert (by default, all variables are converted). variable_names_blacklist: The set of variable names to omit converting to constants. Returns: GraphDef containing a simplified version of the original. """ def get_input_name(node): """Gets the name of the first input. Errors if suffix is not :0.""" details = node.input[0].split(":") if len(details) == 1 or int(details[1]) == 0: return details[0] # While it is valid for input tensors to have a suffix that is not :0, this # method is used to find the associated ops, not tensors, and therefore it # is not valid. raise ValueError("Tensor name '{0}' is invalid.".format(node.input[0])) def create_const_op(node_name, dtype, data, data_shape=None): """Creates a Const op.""" output_node = node_def_pb2.NodeDef() output_node.op = "Const" output_node.name = node_name output_node.attr["dtype"].CopyFrom(dtype) output_node.attr["value"].CopyFrom( attr_value_pb2.AttrValue( tensor=tensor_util.make_tensor_proto( data, dtype=dtype.type, shape=data_shape))) return output_node # This graph only includes the nodes needed to evaluate the output nodes, and # removes unneeded nodes like those involved in saving and assignment. inference_graph = extract_sub_graph(input_graph_def, output_node_names) # Identify the ops in the graph. map_name_to_node = { node.name: node for node in inference_graph.node } # Get list of variables. variable_names = [] variable_dict_names = [] resource_identity_types = {} for node in inference_graph.node: if node.op in ["Variable", "VariableV2", "VarHandleOp"]: variable_name = node.name if ((variable_names_whitelist is not None and variable_name not in variable_names_whitelist) or (variable_names_blacklist is not None and variable_name in variable_names_blacklist)): continue variable_dict_names.append(variable_name) if node.op == "VarHandleOp": variable_names.append(variable_name + "/Read/ReadVariableOp:0") else: variable_names.append(variable_name + ":0") elif node.op in ["ReadVariableOp", "ResourceGather"]: # There can be one or more Identity ops in between the ReadVariableOp and # VarHandleOp. Store the Identity ops with the associated dtypes. source_op_name = get_input_name(node) while map_name_to_node[source_op_name].op == "Identity": resource_identity_types[source_op_name] = node.attr["dtype"] source_op_name = get_input_name(map_name_to_node[source_op_name]) if map_name_to_node[source_op_name].op != "VarHandleOp": raise ValueError("Cannot find the variable that is an input " "to the ReadVariableOp.") # Gets map of variables and the associated data. if variable_names: returned_variables = sess.run(variable_names) else: returned_variables = [] variables_data_map = dict(zip(variable_dict_names, returned_variables)) logging.info("Froze %d variables.", len(returned_variables)) # Reconstruct the graph with constants in place of variables. output_graph_def = graph_pb2.GraphDef() how_many_converted = 0 for input_node in inference_graph.node: output_node = node_def_pb2.NodeDef() if input_node.name in variables_data_map: data = variables_data_map[input_node.name] output_node = create_const_op(input_node.name, input_node.attr["dtype"], data, data.shape) how_many_converted += 1 elif input_node.name in resource_identity_types: # Converts the Identities of type RESOURCE_DT to the appropriate type # based on the input they are referencing. output_node.CopyFrom(input_node) output_node.attr["T"].CopyFrom(resource_identity_types[input_node.name]) elif input_node.op == "ReadVariableOp": # The first branch converts all VarHandleOps of ResourceVariables to # constants, so we need to convert the associated ReadVariableOps to # Identity ops. output_node.op = "Identity" output_node.name = input_node.name output_node.input.extend([input_node.input[0]]) output_node.attr["T"].CopyFrom(input_node.attr["dtype"]) if "_class" in input_node.attr: output_node.attr["_class"].CopyFrom(input_node.attr["_class"]) elif input_node.op == "ResourceGather": # The first branch converts all VarHandleOps of ResourceGather to # constants, so we need to convert the associated ResourceGather to Gather # ops with a Const axis feeding into it. if input_node.attr["batch_dims"].i != 0: raise ValueError("batch_dims != 0 is not supported by freeze_graph.") axis_data = input_node.attr["batch_dims"].i axis_node_name = input_node.name + "/axis" axis_dtype = input_node.attr["Tindices"] output_axis_node = create_const_op(axis_node_name, axis_dtype, axis_data) output_graph_def.node.extend([output_axis_node]) output_node.op = "GatherV2" output_node.name = input_node.name output_node.input.extend( [input_node.input[0], input_node.input[1], axis_node_name]) output_node.attr["Tparams"].CopyFrom(input_node.attr["dtype"]) output_node.attr["Tindices"].CopyFrom(input_node.attr["Tindices"]) output_node.attr["Taxis"].CopyFrom(axis_dtype) if "_class" in input_node.attr: output_node.attr["_class"].CopyFrom(input_node.attr["_class"]) else: output_node.CopyFrom(input_node) output_graph_def.node.extend([output_node]) output_graph_def.library.CopyFrom(inference_graph.library) logging.info("Converted %d variables to const ops.", how_many_converted) return output_graph_def @deprecation.deprecated( date=None, instructions="Use `tf.compat.v1.graph_util.remove_training_nodes`") @tf_export(v1=["graph_util.remove_training_nodes"]) def remove_training_nodes(input_graph, protected_nodes=None): """Prunes out nodes that aren't needed for inference. There are nodes like Identity and CheckNumerics that are only useful during training, and can be removed in graphs that will be used for nothing but inference. Here we identify and remove them, returning an equivalent graph. To be specific, CheckNumerics nodes are always removed, and Identity nodes that aren't involved in control edges are spliced out so that their input and outputs are directly connected. Args: input_graph: Model to analyze and prune. protected_nodes: An optional list of names of nodes to be kept unconditionally. This is for example useful to preserve Identity output nodes. Returns: A list of nodes with the unnecessary ones removed. """ if not protected_nodes: protected_nodes = [] types_to_remove = {"CheckNumerics": True} input_nodes = input_graph.node names_to_remove = {} for node in input_nodes: if node.op in types_to_remove and node.name not in protected_nodes: names_to_remove[node.name] = True nodes_after_removal = [] for node in input_nodes: if node.name in names_to_remove: continue new_node = node_def_pb2.NodeDef() new_node.CopyFrom(node) input_before_removal = node.input del new_node.input[:] for full_input_name in input_before_removal: input_name = re.sub(r"^\^", "", full_input_name) if input_name in names_to_remove: continue new_node.input.append(full_input_name) nodes_after_removal.append(new_node) types_to_splice = {"Identity": True} control_input_names = set() node_names_with_control_input = set() for node in nodes_after_removal: for node_input in node.input: if "^" in node_input: control_input_names.add(node_input.replace("^", "")) node_names_with_control_input.add(node.name) names_to_splice = {} for node in nodes_after_removal: if node.op in types_to_splice and node.name not in protected_nodes: # We don't want to remove nodes that have control edge inputs, because # they might be involved in subtle dependency issues that removing them # will jeopardize. if node.name not in node_names_with_control_input: names_to_splice[node.name] = node.input[0] # We also don't want to remove nodes which are used as control edge inputs. names_to_splice = {name: value for name, value in names_to_splice.items() if name not in control_input_names} nodes_after_splicing = [] for node in nodes_after_removal: if node.name in names_to_splice: continue new_node = node_def_pb2.NodeDef() new_node.CopyFrom(node) input_before_removal = node.input del new_node.input[:] for full_input_name in input_before_removal: input_name = re.sub(r"^\^", "", full_input_name) while input_name in names_to_splice: full_input_name = names_to_splice[input_name] input_name = re.sub(r"^\^", "", full_input_name) new_node.input.append(full_input_name) nodes_after_splicing.append(new_node) output_graph = graph_pb2.GraphDef() output_graph.node.extend(nodes_after_splicing) return output_graph
the-stack_106_30781
from CharLvl_NMT import * from encoder import cnn_encoder, rnn_encoder from decoder import AttnDecoderRNN class CharLevel_autoencoder(nn.Module): def __init__(self, criterion, num_symbols, use_cuda): ''' overview of autoencoder forward: 1. Input batch is embedded 2. CNN+Pool encoder is called on input 3. BiGRU encoder is called on activations of previous encoder 4. Attention GRU decoder takes an embedded symbol at current t - Decoder embedding embeds symbol at current t 6. Batch cross entropy is calculated and returned ''' super(CharLevel_autoencoder, self).__init__() self.char_embedding_dim = 128 self.pooling_stride = 5 self.seq_len = 300 self.num_symbols = num_symbols self.use_cuda = use_cuda self.filter_widths = list(range(1, 9)) # due to cuda limitations, every filter width has 50 less filters self.num_filters_per_width = [150, 150, 200, 200, 250, 250, 250, 250] self.encoder_embedding = nn.Embedding(num_symbols, self.char_embedding_dim) self.cnn_encoder = cnn_encoder( filter_widths = self.filter_widths, num_filters_per_width = self.num_filters_per_width, char_embedding_dim = self.char_embedding_dim, use_cuda = use_cuda) self.decoder_hidden_size = int(np.sum(np.array(self.num_filters_per_width)) ) self.rnn_encoder = rnn_encoder( hidden_size = self.decoder_hidden_size ) # decoder embedding dim dictated by output dim of encoder self.decoder_embedding = nn.Embedding(num_symbols, self.decoder_hidden_size) self.attention_decoder = AttnDecoderRNN( num_symbols = num_symbols, hidden_size = self.decoder_hidden_size, output_size = self.seq_len//self.pooling_stride) self.criterion = criterion def encode(self, data, seq_len): encoder_embedded = self.encoder_embedding(data).unsqueeze(1).transpose(2,3) encoded = self.cnn_encoder.forward(encoder_embedded, self.seq_len) encoded = encoded.squeeze(2) encoder_hidden = self.rnn_encoder.initHidden() encoder_outputs = Variable(torch.zeros(64, seq_len//self.pooling_stride, 2*self.decoder_hidden_size)) if self.use_cuda: encoder_outputs = encoder_outputs.cuda() encoder_hidden = encoder_hidden.cuda() for symbol_ind in range(self.seq_len//self.pooling_stride):#self.rnn_emits_len): output, encoder_hidden = self.rnn_encoder.forward( encoded[:,:,symbol_ind], encoder_hidden) encoder_outputs[:, symbol_ind,:] = output[0] return encoder_outputs, encoder_hidden def decode(self, target_data, decoder_hidden, encoder_outputs, i): use_teacher_forcing = True if random.random() < 0.7 else False if type(i) != bool: # given batch index, then eval mode, no teacher forcing use_teacher_forcing = False output = [] # SOS token = 32 after encoding it input_embedded = Variable(torch.LongTensor([32]).repeat(64), requires_grad = False) if self.use_cuda: input_embedded = input_embedded.cuda() input_embedded = self.decoder_embedding( input_embedded ) for symbol_index in range(self.seq_len): # # current symbol, current hidden state, outputs from encoder decoder_output, decoder_hidden, attn_weights = self.attention_decoder.forward( input_embedded, decoder_hidden, encoder_outputs) output.append(decoder_output) if use_teacher_forcing: input_symbol = Variable(target_data[:, symbol_index], requires_grad = False) if self.use_cuda: input_symbol = input_symbol.cuda() else: values, input_symbol = decoder_output.max(1) input_embedded = self.decoder_embedding( input_symbol ) # at current batch: conglomerate all true and predicted symbols # into one vector then return the batch cross entropy # first mask out padding at the end of every sentence actual_sentence_mask = torch.ne(target_data, 31).byte() threeD_mask = actual_sentence_mask.unsqueeze(2).repeat(1, 1, 125)#.transpose() predicted = torch.stack(output, dim=1) # if validation loader is called, dump predictions if type(i) != bool: values, indices = predicted.max(2) print( indices.data.shape) pickle.dump(indices.data.numpy(), open( "./data/%s_predicted.p" %(i), "wb" ), protocol=4 ) if self.use_cuda: target_data, actual_sentence_mask, threeD_mask = target_data.cuda(), actual_sentence_mask.cuda(), threeD_mask.cuda() # calculate cross entropy on non-padding symbols masked_target = torch.masked_select(target_data, actual_sentence_mask) predicted = predicted.masked_select(Variable(threeD_mask), ) predicted = predicted.view(-1,125) loss = self.criterion( predicted, Variable(masked_target, ) ) return loss
the-stack_106_30783
import torch import os import math from torch.autograd import Variable import numpy as np from PIL import Image from neural_best_buddies.util import util import numpy as np def color_map(i): colors = [ [255,0,0], [0,255,0], [0,0,255], [128,128,0], [0,128,128] ] if i < 5: return colors[i] else: return np.random.randint(0,256,3) def draw_square(image, center, color, radius = 2): d = 2*radius + 1 image_p = np.pad(image, ((radius,radius),(radius,radius),(0,0)),'constant') center_p = [center[0]+radius, center[1]+radius] image_p[center_p[0]-radius, (center_p[1]-radius):(center_p[1]-radius+d), :] = np.tile(color,[d,1]) image_p[(center_p[0]-radius):(center_p[0]-radius+d), center_p[1]-radius, :] = np.tile(color,[d,1]) image_p[center_p[0]+radius, (center_p[1]-radius):(center_p[1]-radius+d), :] = np.tile(color,[d,1]) image_p[(center_p[0]-radius):(center_p[0]-radius+d), center_p[1]+radius, :] = np.tile(color,[d,1]) return image_p[radius:image_p.shape[0]-radius, radius:image_p.shape[1]-radius, :] def draw_dots(image, center, color): image[center[0], center[1], :] = color return image def draw_circle(image, center, color, radius = 4, border_color = [255,255,255]): image_p = np.pad(image, ((radius,radius),(radius,radius),(0,0)),'constant') center_p = [center[0]+radius, center[1]+radius] edge_d = math.floor((2*radius + 1)/6) image_p[center_p[0]-radius, (center_p[1]-edge_d):(center_p[1]+edge_d+1), :] = np.tile(border_color,[3,1]) image_p[center_p[0]+radius, (center_p[1]-edge_d):(center_p[1]+edge_d+1), :] = np.tile(border_color,[3,1]) for i in range(1,radius): image_p[center_p[0]+i, center_p[1]-radius+i-1, :] = border_color image_p[center_p[0]-i, center_p[1]-radius+i-1, :] = border_color image_p[center_p[0]+i, (center_p[1]-radius+i):(center_p[1]+radius-i+1), :] = np.tile(color, [2*(radius-i)+1,1]) image_p[center_p[0]-i, (center_p[1]-radius+i):(center_p[1]+radius-i+1), :] = np.tile(color, [2*(radius-i)+1,1]) image_p[center_p[0]+i, center_p[1]+radius+1-i, :] = border_color image_p[center_p[0]-i, center_p[1]+radius+1-i, :] = border_color image_p[center_p[0], center_p[1]-radius, :] = border_color image_p[center_p[0], (center_p[1]-radius+1):(center_p[1]+radius), :] = np.tile(color, [2*(radius-1)+1,1]) image_p[center_p[0], center_p[1]+radius, :] = border_color return image_p[radius:image_p.shape[0]-radius, radius:image_p.shape[1]-radius, :] def draw_points(self, A, points, radius, name, save_dir, unicolor = False, level = 0): A_marked = util.tensor2im(A) for i in range(len(points)): center = [points[i][0], points[i][1]] if unicolor == True: color = color_map(0) else: color = color_map(i) if level > 2 : A_marked = draw_square(A_marked, center, color, radius=radius) elif level == 2 or level == 1: A_marked = draw_circle(A_marked, center, color) else: A_marked = draw_dots(A_marked, center, color) util.save_image(A_marked, os.path.join(save_dir, name + '.png')) def draw_correspondence(A, B, correspondence, radius, save_dir, level = 0, name=''): A_marked = util.tensor2im(A) B_marked = util.tensor2im(B) for i in range(len(correspondence[0])): color = color_map(i) center_1 = [correspondence[0][i][0], correspondence[0][i][1]] center_2 = [correspondence[1][i][0], correspondence[1][i][1]] if level < 3 : A_marked = draw_circle(A_marked, center_1, color) B_marked = draw_circle(B_marked, center_2, color) else: A_marked = draw_square(A_marked, [center_1[0]+radius, center_1[1]+radius], color, radius=radius) B_marked = draw_square(B_marked, [center_2[0]+radius, center_2[1]+radius], color, radius=radius) util.save_image(A_marked, os.path.join(save_dir, 'A_level_'+str(level)+name+'.png')) util.save_image(B_marked, os.path.join(save_dir, 'B_level_'+str(level)+name+'.png'))
the-stack_106_30786
# -*- coding: utf-8 -*- import pytest from rastaecommerce.skeleton import fib __author__ = "Jens Krause" __copyright__ = "Jens Krause" __license__ = "mit" def test_fib(): assert fib(1) == 1 assert fib(2) == 1 assert fib(7) == 13 with pytest.raises(AssertionError): fib(-10)
the-stack_106_30787
# Copyright (c) 2017 StackHPC Ltd. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import logging import os import subprocess import sys import six import yaml LOG = logging.getLogger(__name__) def yum_install(packages): """Install a list of packages via Yum.""" cmd = ["sudo", "yum", "-y", "install"] cmd += packages try: run_command(cmd) except subprocess.CalledProcessError as e: print("Failed to install packages %s via Yum: returncode %d" % (", ".join(packages), e.returncode)) sys.exit(e.returncode) def galaxy_install(role_file, roles_path, force=False): """Install Ansible roles via Ansible Galaxy.""" cmd = ["ansible-galaxy", "install"] cmd += ["--roles-path", roles_path] cmd += ["--role-file", role_file] if force: cmd += ["--force"] try: run_command(cmd) except subprocess.CalledProcessError as e: LOG.error("Failed to install Ansible roles from %s via Ansible " "Galaxy: returncode %d", role_file, e.returncode) sys.exit(e.returncode) def read_file(path, mode="r"): """Read the content of a file.""" with open(path, mode) as f: return f.read() def read_yaml_file(path): """Read and decode a YAML file.""" try: content = read_file(path) except IOError as e: print("Failed to open config dump file %s: %s" % (path, repr(e))) sys.exit(1) try: return yaml.load(content) except yaml.YAMLError as e: print("Failed to decode config dump YAML file %s: %s" % (path, repr(e))) sys.exit(1) def is_readable_dir(path): """Check whether a path references a readable directory.""" if not os.path.exists(path): return {"result": False, "message": "Path does not exist"} if not os.path.isdir(path): return {"result": False, "message": "Path is not a directory"} if not os.access(path, os.R_OK): return {"result": False, "message": "Directory is not readable"} return {"result": True} def is_readable_file(path): """Check whether a path references a readable file.""" if not os.path.exists(path): return {"result": False, "message": "Path does not exist"} if not os.path.isfile(path): return {"result": False, "message": "Path is not a file"} if not os.access(path, os.R_OK): return {"result": False, "message": "File is not readable"} return {"result": True} def run_command(cmd, quiet=False, check_output=False, **kwargs): """Run a command, checking the output. :param quiet: Redirect output to /dev/null :param check_output: Whether to return the output of the command :returns: The output of the command if check_output is true """ if isinstance(cmd, six.string_types): cmd_string = cmd else: cmd_string = " ".join(cmd) LOG.debug("Running command: %s", cmd_string) if quiet: with open("/dev/null", "w") as devnull: kwargs["stdout"] = devnull kwargs["stderr"] = devnull subprocess.check_call(cmd, **kwargs) elif check_output: return subprocess.check_output(cmd, **kwargs) else: subprocess.check_call(cmd, **kwargs)
the-stack_106_30789
import os from flask import (Flask, redirect, render_template, request, send_file, send_from_directory, url_for) from werkzeug.utils import secure_filename from run import run from utils import allwed_file UPLOAD_FOLDER = "uploads" app = Flask(__name__) app.config["UPLOAD_FOLDER"] = UPLOAD_FOLDER @app.route("/", methods=["GET", "POST"]) def upload_file(): if request.method == "POST": if ("file1" not in request.files) | ("file2" not in request.files): flash("ใƒ•ใ‚กใ‚คใƒซใŒใ‚ใ‚Šใพใ›ใ‚“") return redirect(request.url) file1 = request.files["file1"] file2 = request.files["file2"] if (file1.filename == "") | (file2.filename == ""): flash("ใƒ•ใ‚กใ‚คใƒซใŒใ‚ใ‚Šใพใ›ใ‚“") return redirect(request.url) if file1 and allwed_file(file1.filename): filename1 = secure_filename(file1.filename) file1.save(os.path.join(app.config["UPLOAD_FOLDER"], filename1)) if file2 and allwed_file(file2.filename): filename2 = secure_filename(file2.filename) file2.save(os.path.join(app.config["UPLOAD_FOLDER"], filename2)) class args: file_path = os.path.join(app.config["UPLOAD_FOLDER"], filename1) out_path = "tmp/output.png" data_dir = "tmp" body_path = "data/body.png" background_path = os.path.join(app.config["UPLOAD_FOLDER"], filename2) run(args) return send_file(args.out_path) if request.method == "GET": return render_template("home.html") if __name__ == "__main__": app.run(debug=True, port=8888)
the-stack_106_30790
# Copyright (c) 2010-2012 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. import array import cPickle as pickle import os import sys import unittest from contextlib import closing from gzip import GzipFile from tempfile import mkdtemp from shutil import rmtree from time import sleep, time from swift.common import ring, utils class TestRingData(unittest.TestCase): def setUp(self): self.testdir = os.path.join(os.path.dirname(__file__), 'ring_data') rmtree(self.testdir, ignore_errors=1) os.mkdir(self.testdir) def tearDown(self): rmtree(self.testdir, ignore_errors=1) def assert_ring_data_equal(self, rd_expected, rd_got): self.assertEquals(rd_expected._replica2part2dev_id, rd_got._replica2part2dev_id) self.assertEquals(rd_expected.devs, rd_got.devs) self.assertEquals(rd_expected._part_shift, rd_got._part_shift) def test_attrs(self): r2p2d = [[0, 1, 0, 1], [0, 1, 0, 1]] d = [{'id': 0, 'zone': 0, 'region': 0, 'ip': '10.1.1.0', 'port': 7000}, {'id': 1, 'zone': 1, 'region': 1, 'ip': '10.1.1.1', 'port': 7000}] s = 30 rd = ring.RingData(r2p2d, d, s) self.assertEquals(rd._replica2part2dev_id, r2p2d) self.assertEquals(rd.devs, d) self.assertEquals(rd._part_shift, s) def test_can_load_pickled_ring_data(self): rd = ring.RingData( [[0, 1, 0, 1], [0, 1, 0, 1]], [{'id': 0, 'zone': 0, 'ip': '10.1.1.0', 'port': 7000}, {'id': 1, 'zone': 1, 'ip': '10.1.1.1', 'port': 7000}], 30) ring_fname = os.path.join(self.testdir, 'foo.ring.gz') for p in xrange(pickle.HIGHEST_PROTOCOL): with closing(GzipFile(ring_fname, 'wb')) as f: pickle.dump(rd, f, protocol=p) ring_data = ring.RingData.load(ring_fname) self.assert_ring_data_equal(rd, ring_data) def test_roundtrip_serialization(self): ring_fname = os.path.join(self.testdir, 'foo.ring.gz') rd = ring.RingData( [array.array('H', [0, 1, 0, 1]), array.array('H', [0, 1, 0, 1])], [{'id': 0, 'zone': 0}, {'id': 1, 'zone': 1}], 30) rd.save(ring_fname) rd2 = ring.RingData.load(ring_fname) self.assert_ring_data_equal(rd, rd2) def test_deterministic_serialization(self): """ Two identical rings should produce identical .gz files on disk. Only true on Python 2.7 or greater. """ if sys.version_info[0] == 2 and sys.version_info[1] < 7: return os.mkdir(os.path.join(self.testdir, '1')) os.mkdir(os.path.join(self.testdir, '2')) # These have to have the same filename (not full path, # obviously) since the filename gets encoded in the gzip data. ring_fname1 = os.path.join(self.testdir, '1', 'the.ring.gz') ring_fname2 = os.path.join(self.testdir, '2', 'the.ring.gz') rd = ring.RingData( [array.array('H', [0, 1, 0, 1]), array.array('H', [0, 1, 0, 1])], [{'id': 0, 'zone': 0}, {'id': 1, 'zone': 1}], 30) rd.save(ring_fname1) rd.save(ring_fname2) with open(ring_fname1) as ring1: with open(ring_fname2) as ring2: self.assertEqual(ring1.read(), ring2.read()) class TestRing(unittest.TestCase): def setUp(self): utils.HASH_PATH_SUFFIX = 'endcap' utils.HASH_PATH_PREFIX = '' self.testdir = mkdtemp() self.testgz = os.path.join(self.testdir, 'whatever.ring.gz') self.intended_replica2part2dev_id = [ array.array('H', [0, 1, 0, 1]), array.array('H', [0, 1, 0, 1]), array.array('H', [3, 4, 3, 4])] self.intended_devs = [{'id': 0, 'region': 0, 'zone': 0, 'weight': 1.0, 'ip': '10.1.1.1', 'port': 6000, 'replication_ip': '10.1.0.1', 'replication_port': 6066}, {'id': 1, 'region': 0, 'zone': 0, 'weight': 1.0, 'ip': '10.1.1.1', 'port': 6000, 'replication_ip': '10.1.0.2', 'replication_port': 6066}, None, {'id': 3, 'region': 0, 'zone': 2, 'weight': 1.0, 'ip': '10.1.2.1', 'port': 6000, 'replication_ip': '10.2.0.1', 'replication_port': 6066}, {'id': 4, 'region': 0, 'zone': 2, 'weight': 1.0, 'ip': '10.1.2.2', 'port': 6000, 'replication_ip': '10.2.0.1', 'replication_port': 6066}] self.intended_part_shift = 30 self.intended_reload_time = 15 ring.RingData( self.intended_replica2part2dev_id, self.intended_devs, self.intended_part_shift).save(self.testgz) self.ring = ring.Ring( self.testdir, reload_time=self.intended_reload_time, ring_name='whatever') def tearDown(self): rmtree(self.testdir, ignore_errors=1) def test_creation(self): self.assertEquals(self.ring._replica2part2dev_id, self.intended_replica2part2dev_id) self.assertEquals(self.ring._part_shift, self.intended_part_shift) self.assertEquals(self.ring.devs, self.intended_devs) self.assertEquals(self.ring.reload_time, self.intended_reload_time) self.assertEquals(self.ring.serialized_path, self.testgz) # test invalid endcap _orig_hash_path_suffix = utils.HASH_PATH_SUFFIX _orig_hash_path_prefix = utils.HASH_PATH_PREFIX _orig_swift_conf_file = utils.SWIFT_CONF_FILE try: utils.HASH_PATH_SUFFIX = '' utils.HASH_PATH_PREFIX = '' utils.SWIFT_CONF_FILE = '' self.assertRaises(SystemExit, ring.Ring, self.testdir, 'whatever') finally: utils.HASH_PATH_SUFFIX = _orig_hash_path_suffix utils.HASH_PATH_PREFIX = _orig_hash_path_prefix utils.SWIFT_CONF_FILE = _orig_swift_conf_file def test_has_changed(self): self.assertEquals(self.ring.has_changed(), False) os.utime(self.testgz, (time() + 60, time() + 60)) self.assertEquals(self.ring.has_changed(), True) def test_reload(self): os.utime(self.testgz, (time() - 300, time() - 300)) self.ring = ring.Ring(self.testdir, reload_time=0.001, ring_name='whatever') orig_mtime = self.ring._mtime self.assertEquals(len(self.ring.devs), 5) self.intended_devs.append( {'id': 3, 'region': 0, 'zone': 3, 'weight': 1.0, 'ip': '10.1.1.1', 'port': 9876}) ring.RingData( self.intended_replica2part2dev_id, self.intended_devs, self.intended_part_shift).save(self.testgz) sleep(0.1) self.ring.get_nodes('a') self.assertEquals(len(self.ring.devs), 6) self.assertNotEquals(self.ring._mtime, orig_mtime) os.utime(self.testgz, (time() - 300, time() - 300)) self.ring = ring.Ring(self.testdir, reload_time=0.001, ring_name='whatever') orig_mtime = self.ring._mtime self.assertEquals(len(self.ring.devs), 6) self.intended_devs.append( {'id': 5, 'region': 0, 'zone': 4, 'weight': 1.0, 'ip': '10.5.5.5', 'port': 9876}) ring.RingData( self.intended_replica2part2dev_id, self.intended_devs, self.intended_part_shift).save(self.testgz) sleep(0.1) self.ring.get_part_nodes(0) self.assertEquals(len(self.ring.devs), 7) self.assertNotEquals(self.ring._mtime, orig_mtime) os.utime(self.testgz, (time() - 300, time() - 300)) self.ring = ring.Ring(self.testdir, reload_time=0.001, ring_name='whatever') orig_mtime = self.ring._mtime part, nodes = self.ring.get_nodes('a') self.assertEquals(len(self.ring.devs), 7) self.intended_devs.append( {'id': 6, 'region': 0, 'zone': 5, 'weight': 1.0, 'ip': '10.6.6.6', 'port': 6000}) ring.RingData( self.intended_replica2part2dev_id, self.intended_devs, self.intended_part_shift).save(self.testgz) sleep(0.1) self.ring.get_more_nodes(part).next() self.assertEquals(len(self.ring.devs), 8) self.assertNotEquals(self.ring._mtime, orig_mtime) os.utime(self.testgz, (time() - 300, time() - 300)) self.ring = ring.Ring(self.testdir, reload_time=0.001, ring_name='whatever') orig_mtime = self.ring._mtime self.assertEquals(len(self.ring.devs), 8) self.intended_devs.append( {'id': 5, 'region': 0, 'zone': 4, 'weight': 1.0, 'ip': '10.5.5.5', 'port': 6000}) ring.RingData( self.intended_replica2part2dev_id, self.intended_devs, self.intended_part_shift).save(self.testgz) sleep(0.1) self.assertEquals(len(self.ring.devs), 9) self.assertNotEquals(self.ring._mtime, orig_mtime) def test_reload_without_replication(self): replication_less_devs = [{'id': 0, 'region': 0, 'zone': 0, 'weight': 1.0, 'ip': '10.1.1.1', 'port': 6000}, {'id': 1, 'region': 0, 'zone': 0, 'weight': 1.0, 'ip': '10.1.1.1', 'port': 6000}, None, {'id': 3, 'region': 0, 'zone': 2, 'weight': 1.0, 'ip': '10.1.2.1', 'port': 6000}, {'id': 4, 'region': 0, 'zone': 2, 'weight': 1.0, 'ip': '10.1.2.2', 'port': 6000}] intended_devs = [{'id': 0, 'region': 0, 'zone': 0, 'weight': 1.0, 'ip': '10.1.1.1', 'port': 6000, 'replication_ip': '10.1.1.1', 'replication_port': 6000}, {'id': 1, 'region': 0, 'zone': 0, 'weight': 1.0, 'ip': '10.1.1.1', 'port': 6000, 'replication_ip': '10.1.1.1', 'replication_port': 6000}, None, {'id': 3, 'region': 0, 'zone': 2, 'weight': 1.0, 'ip': '10.1.2.1', 'port': 6000, 'replication_ip': '10.1.2.1', 'replication_port': 6000}, {'id': 4, 'region': 0, 'zone': 2, 'weight': 1.0, 'ip': '10.1.2.2', 'port': 6000, 'replication_ip': '10.1.2.2', 'replication_port': 6000}] testgz = os.path.join(self.testdir, 'without_replication.ring.gz') ring.RingData( self.intended_replica2part2dev_id, replication_less_devs, self.intended_part_shift).save(testgz) self.ring = ring.Ring( self.testdir, reload_time=self.intended_reload_time, ring_name='without_replication') self.assertEquals(self.ring.devs, intended_devs) def test_reload_old_style_pickled_ring(self): devs = [{'id': 0, 'zone': 0, 'weight': 1.0, 'ip': '10.1.1.1', 'port': 6000}, {'id': 1, 'zone': 0, 'weight': 1.0, 'ip': '10.1.1.1', 'port': 6000}, None, {'id': 3, 'zone': 2, 'weight': 1.0, 'ip': '10.1.2.1', 'port': 6000}, {'id': 4, 'zone': 2, 'weight': 1.0, 'ip': '10.1.2.2', 'port': 6000}] intended_devs = [{'id': 0, 'region': 1, 'zone': 0, 'weight': 1.0, 'ip': '10.1.1.1', 'port': 6000, 'replication_ip': '10.1.1.1', 'replication_port': 6000}, {'id': 1, 'region': 1, 'zone': 0, 'weight': 1.0, 'ip': '10.1.1.1', 'port': 6000, 'replication_ip': '10.1.1.1', 'replication_port': 6000}, None, {'id': 3, 'region': 1, 'zone': 2, 'weight': 1.0, 'ip': '10.1.2.1', 'port': 6000, 'replication_ip': '10.1.2.1', 'replication_port': 6000}, {'id': 4, 'region': 1, 'zone': 2, 'weight': 1.0, 'ip': '10.1.2.2', 'port': 6000, 'replication_ip': '10.1.2.2', 'replication_port': 6000}] # simulate an old-style pickled ring testgz = os.path.join(self.testdir, 'without_replication_or_region.ring.gz') ring_data = ring.RingData(self.intended_replica2part2dev_id, devs, self.intended_part_shift) # an old-style pickled ring won't have region data for dev in ring_data.devs: if dev: del dev["region"] gz_file = GzipFile(testgz, 'wb') pickle.dump(ring_data, gz_file, protocol=2) gz_file.close() self.ring = ring.Ring( self.testdir, reload_time=self.intended_reload_time, ring_name='without_replication_or_region') self.assertEquals(self.ring.devs, intended_devs) def test_get_part(self): part1 = self.ring.get_part('a') nodes1 = self.ring.get_part_nodes(part1) part2, nodes2 = self.ring.get_nodes('a') self.assertEquals(part1, part2) self.assertEquals(nodes1, nodes2) def test_get_part_nodes(self): part, nodes = self.ring.get_nodes('a') self.assertEquals(nodes, self.ring.get_part_nodes(part)) def test_get_nodes(self): # Yes, these tests are deliberately very fragile. We want to make sure # that if someones changes the results the ring produces, they know it. self.assertRaises(TypeError, self.ring.get_nodes) part, nodes = self.ring.get_nodes('a') self.assertEquals(part, 0) self.assertEquals(nodes, [self.intended_devs[0], self.intended_devs[3]]) part, nodes = self.ring.get_nodes('a1') self.assertEquals(part, 0) self.assertEquals(nodes, [self.intended_devs[0], self.intended_devs[3]]) part, nodes = self.ring.get_nodes('a4') self.assertEquals(part, 1) self.assertEquals(nodes, [self.intended_devs[1], self.intended_devs[4]]) part, nodes = self.ring.get_nodes('aa') self.assertEquals(part, 1) self.assertEquals(nodes, [self.intended_devs[1], self.intended_devs[4]]) part, nodes = self.ring.get_nodes('a', 'c1') self.assertEquals(part, 0) self.assertEquals(nodes, [self.intended_devs[0], self.intended_devs[3]]) part, nodes = self.ring.get_nodes('a', 'c0') self.assertEquals(part, 3) self.assertEquals(nodes, [self.intended_devs[1], self.intended_devs[4]]) part, nodes = self.ring.get_nodes('a', 'c3') self.assertEquals(part, 2) self.assertEquals(nodes, [self.intended_devs[0], self.intended_devs[3]]) part, nodes = self.ring.get_nodes('a', 'c2') self.assertEquals(part, 2) self.assertEquals(nodes, [self.intended_devs[0], self.intended_devs[3]]) part, nodes = self.ring.get_nodes('a', 'c', 'o1') self.assertEquals(part, 1) self.assertEquals(nodes, [self.intended_devs[1], self.intended_devs[4]]) part, nodes = self.ring.get_nodes('a', 'c', 'o5') self.assertEquals(part, 0) self.assertEquals(nodes, [self.intended_devs[0], self.intended_devs[3]]) part, nodes = self.ring.get_nodes('a', 'c', 'o0') self.assertEquals(part, 0) self.assertEquals(nodes, [self.intended_devs[0], self.intended_devs[3]]) part, nodes = self.ring.get_nodes('a', 'c', 'o2') self.assertEquals(part, 2) self.assertEquals(nodes, [self.intended_devs[0], self.intended_devs[3]]) def add_dev_to_ring(self, new_dev): self.ring.devs.append(new_dev) self.ring._rebuild_tier_data() def test_get_more_nodes(self): # Yes, these tests are deliberately very fragile. We want to make sure # that if someone changes the results the ring produces, they know it. exp_part = 6 exp_devs = [48, 93, 96] exp_zones = set([5, 8, 9]) exp_handoffs = [11, 47, 25, 76, 69, 23, 99, 59, 106, 64, 43, 34, 88, 3, 30, 83, 16, 27, 103, 39, 60, 0, 8, 72, 56, 19, 91, 13, 84, 38, 66, 52, 78, 107, 50, 57, 31, 32, 77, 24, 42, 100, 71, 26, 9, 20, 35, 5, 14, 94, 28, 41, 18, 102, 101, 61, 95, 21, 81, 1, 105, 58, 74, 90, 86, 46, 4, 68, 40, 80, 54, 75, 45, 79, 44, 49, 62, 29, 7, 15, 70, 87, 65, 12, 82, 17, 104, 97, 55, 22, 6, 89, 2, 67, 37, 63, 53, 92, 33, 85, 73, 51, 98, 36, 10] exp_first_handoffs = [1, 37, 48, 68, 84, 75, 11, 101, 14, 73, 100, 75, 29, 19, 18, 101, 15, 99, 95, 24, 46, 82, 73, 62, 24, 89, 9, 22, 107, 74, 54, 63, 40, 106, 99, 83, 64, 73, 73, 106, 106, 80, 6, 25, 20, 33, 6, 79, 59, 42, 62, 24, 14, 107, 28, 0, 85, 5, 4, 12, 58, 11, 92, 18, 36, 56, 86, 1, 21, 33, 80, 97, 4, 81, 79, 76, 89, 50, 75, 27, 7, 96, 47, 55, 81, 104, 12, 5, 18, 106, 27, 93, 39, 92, 42, 30, 20, 88, 58, 105, 65, 29, 17, 52, 11, 106, 7, 24, 21, 91, 62, 52, 50, 31, 77, 102, 19, 11, 8, 58, 53, 20, 26, 8, 18, 82, 48, 68, 82, 89, 101, 50, 3, 52, 46, 11, 2, 30, 79, 66, 4, 61, 3, 56, 45, 102, 73, 84, 36, 19, 34, 84, 49, 40, 103, 66, 31, 33, 93, 33, 4, 52, 26, 58, 30, 47, 100, 57, 40, 79, 33, 107, 24, 20, 44, 4, 7, 59, 83, 101, 1, 56, 20, 61, 33, 16, 5, 74, 98, 4, 80, 15, 104, 52, 73, 18, 67, 75, 98, 73, 79, 68, 75, 27, 91, 36, 100, 52, 95, 37, 46, 70, 14, 47, 3, 70, 23, 40, 105, 62, 86, 48, 22, 54, 4, 72, 81, 13, 0, 18, 98, 101, 36, 29, 24, 39, 79, 97, 105, 28, 107, 47, 52, 101, 20, 22, 29, 65, 27, 7, 33, 64, 101, 60, 19, 55] rb = ring.RingBuilder(8, 3, 1) next_dev_id = 0 for zone in xrange(1, 10): for server in xrange(1, 5): for device in xrange(1, 4): rb.add_dev({'id': next_dev_id, 'ip': '1.2.%d.%d' % (zone, server), 'port': 1234, 'zone': zone, 'region': 0, 'weight': 1.0}) next_dev_id += 1 rb.rebalance(seed=1) rb.get_ring().save(self.testgz) r = ring.Ring(self.testdir, ring_name='whatever') part, devs = r.get_nodes('a', 'c', 'o') primary_zones = set([d['zone'] for d in devs]) self.assertEquals(part, exp_part) self.assertEquals([d['id'] for d in devs], exp_devs) self.assertEquals(primary_zones, exp_zones) devs = list(r.get_more_nodes(part)) self.assertEquals([d['id'] for d in devs], exp_handoffs) # The first 6 replicas plus the 3 primary nodes should cover all 9 # zones in this test seen_zones = set(primary_zones) seen_zones.update([d['zone'] for d in devs[:6]]) self.assertEquals(seen_zones, set(range(1, 10))) # The first handoff nodes for each partition in the ring devs = [] for part in xrange(r.partition_count): devs.append(r.get_more_nodes(part).next()['id']) self.assertEquals(devs, exp_first_handoffs) # Add a new device we can handoff to. zone = 5 server = 0 rb.add_dev({'id': next_dev_id, 'ip': '1.2.%d.%d' % (zone, server), 'port': 1234, 'zone': zone, 'region': 0, 'weight': 1.0}) next_dev_id += 1 rb.rebalance(seed=1) rb.get_ring().save(self.testgz) r = ring.Ring(self.testdir, ring_name='whatever') # We would change expectations here, but in this test no handoffs # changed at all. part, devs = r.get_nodes('a', 'c', 'o') primary_zones = set([d['zone'] for d in devs]) self.assertEquals(part, exp_part) self.assertEquals([d['id'] for d in devs], exp_devs) self.assertEquals(primary_zones, exp_zones) devs = list(r.get_more_nodes(part)) dev_ids = [d['id'] for d in devs] self.assertEquals(len(dev_ids), len(exp_handoffs)) for index, dev in enumerate(dev_ids): self.assertEquals( dev, exp_handoffs[index], 'handoff differs at position %d\n%s\n%s' % ( index, dev_ids[index:], exp_handoffs[index:])) # The handoffs still cover all the non-primary zones first seen_zones = set(primary_zones) seen_zones.update([d['zone'] for d in devs[:6]]) self.assertEquals(seen_zones, set(range(1, 10))) devs = [] for part in xrange(r.partition_count): devs.append(r.get_more_nodes(part).next()['id']) for part in xrange(r.partition_count): self.assertEquals( devs[part], exp_first_handoffs[part], 'handoff for partitition %d is now device id %d' % ( part, devs[part])) # Remove a device. rb.remove_dev(0) rb.rebalance(seed=1) rb.get_ring().save(self.testgz) r = ring.Ring(self.testdir, ring_name='whatever') # Change expectations # The long string of handoff nodes for the partition were the same for # the first 20, which is pretty good. exp_handoffs[20:] = [60, 108, 8, 72, 56, 19, 91, 13, 84, 38, 66, 52, 1, 78, 107, 50, 57, 31, 32, 77, 24, 42, 100, 71, 26, 9, 20, 35, 5, 14, 94, 28, 41, 18, 102, 101, 61, 95, 21, 81, 105, 58, 74, 90, 86, 46, 4, 68, 40, 80, 54, 75, 45, 79, 44, 49, 62, 29, 7, 15, 70, 87, 65, 12, 82, 17, 104, 97, 55, 22, 6, 89, 2, 67, 37, 63, 53, 92, 33, 85, 73, 51, 98, 36, 10] # Just a few of the first handoffs changed exp_first_handoffs[3] = 68 exp_first_handoffs[55] = 104 exp_first_handoffs[116] = 6 exp_first_handoffs[181] = 15 exp_first_handoffs[228] = 38 # Test part, devs = r.get_nodes('a', 'c', 'o') primary_zones = set([d['zone'] for d in devs]) self.assertEquals(part, exp_part) self.assertEquals([d['id'] for d in devs], exp_devs) self.assertEquals(primary_zones, exp_zones) devs = list(r.get_more_nodes(part)) dev_ids = [d['id'] for d in devs] self.assertEquals(len(dev_ids), len(exp_handoffs)) for index, dev in enumerate(dev_ids): self.assertEquals( dev, exp_handoffs[index], 'handoff differs at position %d\n%s\n%s' % ( index, dev_ids[index:], exp_handoffs[index:])) seen_zones = set(primary_zones) seen_zones.update([d['zone'] for d in devs[:6]]) self.assertEquals(seen_zones, set(range(1, 10))) devs = [] for part in xrange(r.partition_count): devs.append(r.get_more_nodes(part).next()['id']) for part in xrange(r.partition_count): self.assertEquals( devs[part], exp_first_handoffs[part], 'handoff for partitition %d is now device id %d' % ( part, devs[part])) # Add a partial replica rb.set_replicas(3.5) rb.rebalance(seed=1) rb.get_ring().save(self.testgz) r = ring.Ring(self.testdir, ring_name='whatever') # Change expectations # We have another replica now exp_devs.append(47) exp_zones.add(4) # Caused some major changes in the sequence of handoffs for our test # partition, but at least the first stayed the same. exp_handoffs[1:] = [81, 25, 69, 23, 99, 59, 76, 3, 106, 64, 43, 13, 34, 88, 30, 16, 27, 103, 39, 74, 60, 108, 8, 56, 19, 91, 52, 84, 38, 66, 1, 78, 45, 107, 50, 57, 83, 31, 46, 32, 77, 24, 42, 63, 100, 72, 71, 7, 26, 9, 20, 35, 5, 87, 14, 94, 62, 28, 41, 90, 18, 82, 102, 22, 101, 61, 85, 95, 21, 98, 67, 105, 58, 86, 4, 79, 68, 40, 80, 54, 75, 44, 49, 6, 29, 15, 70, 65, 12, 17, 104, 97, 55, 89, 2, 37, 53, 92, 33, 73, 51, 36, 10] # Lots of first handoffs changed, but 30 of 256 is still just 11.72%. exp_first_handoffs[1] = 6 exp_first_handoffs[4] = 104 exp_first_handoffs[11] = 106 exp_first_handoffs[17] = 13 exp_first_handoffs[21] = 77 exp_first_handoffs[22] = 95 exp_first_handoffs[27] = 46 exp_first_handoffs[29] = 65 exp_first_handoffs[30] = 3 exp_first_handoffs[31] = 20 exp_first_handoffs[51] = 50 exp_first_handoffs[53] = 8 exp_first_handoffs[54] = 2 exp_first_handoffs[72] = 107 exp_first_handoffs[79] = 72 exp_first_handoffs[85] = 71 exp_first_handoffs[88] = 66 exp_first_handoffs[92] = 29 exp_first_handoffs[93] = 46 exp_first_handoffs[96] = 38 exp_first_handoffs[101] = 57 exp_first_handoffs[103] = 87 exp_first_handoffs[104] = 28 exp_first_handoffs[107] = 1 exp_first_handoffs[109] = 69 exp_first_handoffs[110] = 50 exp_first_handoffs[111] = 76 exp_first_handoffs[115] = 47 exp_first_handoffs[117] = 48 exp_first_handoffs[119] = 7 # Test part, devs = r.get_nodes('a', 'c', 'o') primary_zones = set([d['zone'] for d in devs]) self.assertEquals(part, exp_part) self.assertEquals([d['id'] for d in devs], exp_devs) self.assertEquals(primary_zones, exp_zones) devs = list(r.get_more_nodes(part)) dev_ids = [d['id'] for d in devs] self.assertEquals(len(dev_ids), len(exp_handoffs)) for index, dev in enumerate(dev_ids): self.assertEquals( dev, exp_handoffs[index], 'handoff differs at position %d\n%s\n%s' % ( index, dev_ids[index:], exp_handoffs[index:])) seen_zones = set(primary_zones) seen_zones.update([d['zone'] for d in devs[:6]]) self.assertEquals(seen_zones, set(range(1, 10))) devs = [] for part in xrange(r.partition_count): devs.append(r.get_more_nodes(part).next()['id']) for part in xrange(r.partition_count): self.assertEquals( devs[part], exp_first_handoffs[part], 'handoff for partitition %d is now device id %d' % ( part, devs[part])) # One last test of a partial replica partition exp_part2 = 136 exp_devs2 = [52, 76, 97] exp_zones2 = set([9, 5, 7]) exp_handoffs2 = [2, 67, 37, 92, 33, 23, 107, 63, 44, 103, 108, 85, 73, 10, 89, 80, 4, 17, 49, 32, 12, 41, 58, 20, 25, 61, 94, 47, 69, 56, 101, 28, 83, 8, 96, 53, 51, 42, 98, 35, 36, 84, 43, 104, 31, 65, 1, 40, 9, 74, 95, 45, 5, 71, 86, 78, 30, 93, 48, 91, 15, 88, 39, 18, 57, 72, 70, 27, 54, 16, 24, 21, 14, 11, 77, 62, 50, 6, 105, 26, 55, 29, 60, 34, 13, 87, 59, 38, 99, 75, 106, 3, 82, 66, 79, 7, 46, 64, 81, 22, 68, 19, 102, 90, 100] part2, devs2 = r.get_nodes('a', 'c', 'o2') primary_zones2 = set([d['zone'] for d in devs2]) self.assertEquals(part2, exp_part2) self.assertEquals([d['id'] for d in devs2], exp_devs2) self.assertEquals(primary_zones2, exp_zones2) devs2 = list(r.get_more_nodes(part2)) dev_ids2 = [d['id'] for d in devs2] self.assertEquals(len(dev_ids2), len(exp_handoffs2)) for index, dev in enumerate(dev_ids2): self.assertEquals( dev, exp_handoffs2[index], 'handoff differs at position %d\n%s\n%s' % ( index, dev_ids2[index:], exp_handoffs2[index:])) seen_zones = set(primary_zones2) seen_zones.update([d['zone'] for d in devs2[:6]]) self.assertEquals(seen_zones, set(range(1, 10))) # Test distribution across regions rb.set_replicas(3) for region in xrange(1, 5): rb.add_dev({'id': next_dev_id, 'ip': '1.%d.1.%d' % (region, server), 'port': 1234, 'zone': 1, 'region': region, 'weight': 1.0}) next_dev_id += 1 rb.pretend_min_part_hours_passed() rb.rebalance(seed=1) rb.pretend_min_part_hours_passed() rb.rebalance(seed=1) rb.get_ring().save(self.testgz) r = ring.Ring(self.testdir, ring_name='whatever') # There's 5 regions now, so the primary nodes + first 2 handoffs # should span all 5 regions part, devs = r.get_nodes('a1', 'c1', 'o1') primary_regions = set([d['region'] for d in devs]) primary_zones = set([(d['region'], d['zone']) for d in devs]) more_devs = list(r.get_more_nodes(part)) seen_regions = set(primary_regions) seen_regions.update([d['region'] for d in more_devs[:2]]) self.assertEquals(seen_regions, set(range(0, 5))) # There are 13 zones now, so the first 13 nodes should all have # distinct zones (that's r0z0, r0z1, ..., r0z8, r1z1, r2z1, r3z1, and # r4z1). seen_zones = set(primary_zones) seen_zones.update([(d['region'], d['zone']) for d in more_devs[:10]]) self.assertEquals(13, len(seen_zones)) # Here's a brittle canary-in-the-coalmine test to make sure the region # handoff computation didn't change accidentally exp_handoffs = [111, 112, 74, 54, 93, 31, 2, 43, 100, 22, 71, 92, 35, 9, 50, 41, 76, 80, 84, 88, 17, 96, 6, 102, 37, 29, 105, 5, 47, 20, 13, 108, 66, 81, 53, 65, 25, 58, 32, 94, 101, 1, 10, 44, 73, 75, 21, 97, 28, 106, 30, 16, 39, 77, 42, 72, 34, 99, 14, 61, 90, 4, 40, 3, 45, 62, 7, 15, 87, 12, 83, 89, 33, 98, 49, 107, 56, 86, 48, 57, 24, 11, 23, 26, 46, 64, 69, 38, 36, 79, 63, 104, 51, 70, 82, 67, 68, 8, 95, 91, 55, 59, 85] dev_ids = [d['id'] for d in more_devs] self.assertEquals(len(dev_ids), len(exp_handoffs)) for index, dev_id in enumerate(dev_ids): self.assertEquals( dev_id, exp_handoffs[index], 'handoff differs at position %d\n%s\n%s' % ( index, dev_ids[index:], exp_handoffs[index:])) if __name__ == '__main__': unittest.main()
the-stack_106_30793
import os import time import numpy as np import torch import torch.nn as nn import torch.optim as optim import torch.optim.lr_scheduler as lr_scheduler from torchvision import datasets, models, transforms def init_face_classifier(args, model_name, num_classes=2, resume_from=None): input_size = 100 model = None if model_name == 'vgg11': model = models.vgg11(num_classes=num_classes) num_ftrs = model.classifier[6].in_features model.classifier[6] = nn.Linear(num_ftrs, num_classes) elif model_name == 'vgg11_bn': model = models.vgg11_bn(num_classes=num_classes) num_ftrs = model.classifier[6].in_features model.classifier[6] = nn.Linear(num_ftrs, num_classes) elif model_name == 'vgg13': model = models.vgg13(num_classes=num_classes) num_ftrs = model.classifier[6].in_features model.classifier[6] = nn.Linear(num_ftrs, num_classes) elif model_name == 'vgg13_bn': model = models.vgg13_bn(num_classes=num_classes) num_ftrs = model.classifier[6].in_features model.classifier[6] = nn.Linear(num_ftrs, num_classes) elif model_name == 'vgg16': model = models.vgg16(num_classes=num_classes) num_ftrs = model.classifier[6].in_features model.classifier[6] = nn.Linear(num_ftrs, num_classes) elif model_name == 'vgg16_bn': model = models.vgg16_bn(num_classes=num_classes) num_ftrs = model.classifier[6].in_features model.classifier[6] = nn.Linear(num_ftrs, num_classes) elif model_name == 'vgg19': model = models.vgg19(num_classes=num_classes) num_ftrs = model.classifier[6].in_features model.classifier[6] = nn.Linear(num_ftrs, num_classes) elif model_name == 'vgg19_bn': model = models.vgg19_bn(num_classes=num_classes) num_ftrs = model.classifier[6].in_features model.classifier[6] = nn.Linear(num_ftrs, num_classes) elif model_name == "resnet18": """ Resnet18 """ model = models.resnet18(num_classes=num_classes) num_ftrs = model.fc.in_features model.fc = nn.Linear(num_ftrs, num_classes) if args.dropout > 0: model.fc = nn.Sequential(nn.Dropout(args.dropout), nn.Linear(num_ftrs, num_classes)) elif model_name == "resnet34": """ Resnet34 """ model = models.resnet34(num_classes=num_classes) num_ftrs = model.fc.in_features model.fc = nn.Linear(num_ftrs, num_classes) if args.dropout > 0: model.fc = nn.Sequential(nn.Dropout(args.dropout), nn.Linear(num_ftrs, num_classes)) elif model_name == "resnet50": """ Resnet50 """ model = models.resnet50(num_classes=num_classes) num_ftrs = model.fc.in_features model.fc = nn.Linear(num_ftrs, num_classes) if args.dropout > 0: model.fc = nn.Sequential(nn.Dropout(args.dropout), nn.Linear(num_ftrs, num_classes)) elif model_name == "wide_resnet": model = models.wide_resnet50_2(num_classes=num_classes) num_ftrs = model.fc.in_features model.fc = nn.Linear(num_ftrs, num_classes) if args.dropout > 0: model.fc = nn.Sequential(nn.Dropout(args.dropout), nn.Linear(num_ftrs, num_classes)) else: raise Exception("Invalid model name!") if resume_from is not None: print("Loading weights from %s" % resume_from) model.load_state_dict(torch.load(resume_from, map_location=args.device)) return model, input_size def make_optimizer_and_scheduler(args, model): # Get all the parameters params_to_update = model.parameters() print(model) # Optimizer arg_optim = args.optimizer if arg_optim == 'adam': optimizer = optim.Adam(params_to_update, lr=args.lr) elif arg_optim == 'amsgrad': optimizer = optim.Adam(params_to_update, lr=args.lr, amsgrad=True) elif arg_optim == 'adagrad': optimizer = optim.Adagrad(params_to_update, lr=args.lr) elif arg_optim == 'sgdo': optimizer = optim.SGD(params_to_update, lr=args.lr) elif arg_optim == 'sgd': optimizer = optim.SGD(params_to_update, lr=args.lr, momentum=0.9) elif arg_optim == 'adamwd': optimizer = optim.Adam(params_to_update, lr=args.lr, weight_decay=1e-4) elif arg_optim == 'sgdwd': optimizer = optim.SGD(params_to_update, lr=args.lr, momentum=0.9, weight_decay=1e-4) else: raise Exception("Invalid optimizer!") # Scheduler if args.scheduler == 'exp': scheduler = lr_scheduler.ExponentialLR(optimizer=optimizer, gamma=0.95) elif args.scheduler == 'ms': scheduler = lr_scheduler.MultiStepLR(optimizer=optimizer, milestones=[10, 20], gamma=0.1) elif args.scheduler == 'none': scheduler = None else: raise Exception("Invalid scheduler!") return optimizer, scheduler def get_loss(): # Create an instance of the loss function criterion = nn.CrossEntropyLoss() return criterion # class FaceClassifierModel(nn.Module): # # def __init__(self, device, n, model_name='vgg16', num_classes=2): # super().__init__() # self.device = device # self.n = n # model = models.vgg16(num_classes=num_classes) # num_ftrs = model.classifier[6].in_features # model.classifier[6] = nn.Linear(num_ftrs, num_classes) # self.model = model # # input size = 100, 100 # # def forward(self, data): # # pass
the-stack_106_30795
import os import sys import shutil from UserDict import UserDict # Uncomment to use local kodi addondev repo # sys.path.append(os.path.join(os.getcwd(), 'kodi-addondev', 'src')) import requests import zipfile from git import Repo from lxml import etree import xbmc from kodi_addon_dev import repo, tesseract from kodi_addon_dev.support import Addon, setup_paths ADDON_TEMP_DIR = '/tmp/kodi-addondev.WM_Esa' SCRAPERS_REPO_DIR = os.path.join(ADDON_TEMP_DIR, 'script.module.openscrapers') SCRAPER_ADDON_URLS = { 'the_crew': 'https://raw.githubusercontent.com/thecrewwh/zips/master/_zip/script.module.thecrew', 'shadow': 'https://raw.githubusercontent.com/thecrewwh/zips/master/_zip/plugin.video.shadow', 'tempest': 'https://raw.githubusercontent.com/Tempest0580/tempest/master/zips/plugin.video.tempest' } def bootstrap_scrapers(): # Set up temp dir if not os.path.exists(ADDON_TEMP_DIR): os.mkdir(ADDON_TEMP_DIR) setup_paths(False) # Clone latest openscrapers into temp dir if not os.path.exists(SCRAPERS_REPO_DIR): Repo.clone_from('git://github.com/a4k-openproject/script.module.openscrapers', SCRAPERS_REPO_DIR) # Add openscrapers to python path sys.path.append(SCRAPERS_REPO_DIR) sys.path.append(os.path.join(SCRAPERS_REPO_DIR, 'lib')) # Add alternative scrapers to python path folders = ['en', 'en_OnlyDebrid', 'en_Torrent'] for addon_name, addon_url in SCRAPER_ADDON_URLS.items(): new_addon_folders = import_addon(addon_name, addon_url) folders += new_addon_folders return init_openscrapers(folders) def init_openscrapers(folders): addon = Addon.from_path(SCRAPERS_REPO_DIR) # Monkey patch settings object to enable all providers class AllProviderDict(UserDict, object): def get(self, key, default=''): return "true" if key.startswith("provider.") else super(AllProviderDict, self).get(key, default) addon.settings = AllProviderDict() # Create kodi mock module cached = repo.LocalRepo([], [], addon) deps = cached.load_dependencies(addon) xbmc.session = tesseract.Tesseract(addon, deps, cached) # Initialize openscrapers with passed scraper folders from lib import openscrapers return openscrapers.sources(folders) def import_addon(name, url): # TODO: Check if addon already exists # Get url of latest zip file from addon.xml metadata = etree.XML(requests.get(url + '/addon.xml').text.encode()) zip_url = '%s/%s-%s.zip' % (url, metadata.get('id'), metadata.get('version')) addon_zip = requests.get(zip_url) # Make empty temp dir for addon temp_dir = os.path.join(ADDON_TEMP_DIR, 'tmp') if os.path.exists(temp_dir): shutil.rmtree(temp_dir) os.mkdir(temp_dir) # Unzip addon into temp dir zip_file_path = os.path.join(temp_dir, 'addon.zip') with open(zip_file_path, 'wb') as addon_zip_file: addon_zip_file.write(addon_zip.content) with zipfile.ZipFile(zip_file_path, 'r') as zip_ref: zip_ref.extractall(temp_dir) os.remove(zip_file_path) # Find scraper directories scraper_dirs = [] for addon_subdir in os.walk(temp_dir): folder_name = addon_subdir[0].split('/')[-1] if folder_name == 'en' or folder_name.startswith('en_'): scraper_dirs.append(addon_subdir[0]) if len(scraper_dirs) is 0: for addon_subdir in os.walk(temp_dir): folder_name = addon_subdir[0].split('/')[-1] if folder_name == 'sources': scraper_dirs.append(addon_subdir[0]) # Move scraper directories to openscrapers and remove temp folder for index, scraper_dir in enumerate(scraper_dirs): sources_dir = os.path.join(SCRAPERS_REPO_DIR, 'lib', 'openscrapers', 'sources_openscrapers') new_source_dir = os.path.join(sources_dir, '%s_%i' % (name, index)) if os.path.exists(new_source_dir): shutil.rmtree(new_source_dir) shutil.move(scraper_dir, new_source_dir) change_addon_imports(new_source_dir) shutil.rmtree(temp_dir) # Return list of scraper folders created by function return ['%s_%i' % (name, index) for index in range(len(scraper_dirs))] # Change all imports from resources.lib to openscrapers def change_addon_imports(scraper_dir): for parent_dir, _dirs, files in os.walk(scraper_dir): for file_name in files: scraper_file_dir = os.path.join(parent_dir, file_name) with open(scraper_file_dir) as scraper_file: new_file_contents = scraper_file.read().replace('from resources.lib', 'from openscrapers') with open(scraper_file_dir, 'w') as scraper_file: scraper_file.write(new_file_contents)
the-stack_106_30796
r""" Reed-Solomon codes and Generalized Reed-Solomon codes Given `n` different evaluation points `\alpha_1, \dots, \alpha_n` from some finite field `F`, the corresponding Reed-Solomon code (RS code) of dimension `k` is the set: .. MATH:: \{ f(\alpha_1), \ldots, f(\alpha_n) \mid f \in F[x], \deg f < k \} More generally, given also `n` "column multipliers" `\beta_1, \dots, \beta_n`, the corresponding Generalized Reed-Solomon code (GRS code) of dimension `k` is the set: .. MATH:: \{ (\beta_1 f(\alpha_1), \ldots, \beta_n f(\alpha_n) \mid f \in F[x], \deg f < k \} Here is a list of all content related to GRS codes: - :class:`GeneralizedReedSolomonCode`, the class for GRS codes - :class:`GRSEvaluationVectorEncoder`, an encoder with a vectorial message space - :class:`GRSEvaluationPolynomialEncoder`, an encoder with a polynomial message space - :class:`GRSBerlekampWelchDecoder`, a decoder which corrects errors using Berlekamp-Welch algorithm - :class:`GRSGaoDecoder`, a decoder which corrects errors using Gao algorithm - :class:`GRSErrorErasureDecoder`, a decoder which corrects both errors and erasures - :class:`GRSKeyEquationSyndromeDecoder`, a decoder which corrects errors using the key equation on syndrome polynomials """ #***************************************************************************** # Copyright (C) 2015 David Lucas <[email protected]> # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 2 of the License, or # (at your option) any later version. # http://www.gnu.org/licenses/ #***************************************************************************** from __future__ import absolute_import from six.moves import range from sage.matrix.constructor import matrix, diagonal_matrix from sage.rings.finite_rings.finite_field_constructor import GF from sage.categories.cartesian_product import cartesian_product from sage.modules.free_module_element import vector from sage.modules.free_module import VectorSpace from sage.rings.integer import Integer from sage.misc.cachefunc import cached_method from copy import copy from .linear_code import AbstractLinearCode from .encoder import Encoder from .decoder import Decoder, DecodingError from sage.misc.misc_c import prod from sage.functions.other import binomial, floor, sqrt from sage.calculus.var import var from sage.misc.functional import symbolic_sum from sage.rings.integer_ring import ZZ class GeneralizedReedSolomonCode(AbstractLinearCode): r""" Representation of a (Generalized) Reed-Solomon code. INPUT: - ``evaluation_points`` -- a list of distinct elements of some finite field `F` - ``dimension`` -- the dimension of the resulting code - ``column_multipliers`` -- (default: ``None``) list of non-zero elements of `F`; all column multipliers are set to 1 if default value is kept EXAMPLES: A classical Reed-Solomon code can be constructed by taking all non-zero elements of the field as evaluation points, and specifying no column multipliers:: sage: F = GF(7) sage: evalpts = [F(i) for i in range(1,7)] sage: C = codes.GeneralizedReedSolomonCode(evalpts,3) sage: C [6, 3, 4] Reed-Solomon Code over GF(7) More generally, the following is a Reed-Solomon code where the evaluation points are a subset of the field and includes zero:: sage: F = GF(59) sage: n, k = 40, 12 sage: C = codes.GeneralizedReedSolomonCode(F.list()[:n], k) sage: C [40, 12, 29] Reed-Solomon Code over GF(59) It is also possible to specify the column multipliers:: sage: F = GF(59) sage: n, k = 40, 12 sage: colmults = F.list()[1:n+1] sage: C = codes.GeneralizedReedSolomonCode(F.list()[:n], k, colmults) sage: C [40, 12, 29] Generalized Reed-Solomon Code over GF(59) """ _registered_encoders = {} _registered_decoders = {} def __init__(self, evaluation_points, dimension, column_multipliers=None): r""" TESTS: If the evaluation points are not from a finite field, it raises an error:: sage: C = codes.GeneralizedReedSolomonCode([1,2,3], 1) Traceback (most recent call last): ... ValueError: Evaluation points must be in a finite field (and Integer Ring is not one) If the evaluation points are not from the same finite field, it raises an error:: sage: F2, F3 = GF(2) , GF(3) sage: C = codes.GeneralizedReedSolomonCode([F2.zero(),F2.one(),F3(2)], 1) Traceback (most recent call last): ... ValueError: Failed converting all evaluation points to the same field (unable to find a common ring for all elements) If the column multipliers cannot be converted into the finite are not from a finite field, or cannot be not in the same finite field as the evaluation points, it raises an error:: sage: F = GF(59) sage: F2 = GF(61) sage: n, k = 40, 12 sage: C = codes.GeneralizedReedSolomonCode(F.list()[:n], k, [.3]*n ) Traceback (most recent call last): ... ValueError: Failed converting all evaluation points and column multipliers to the same field (unable to find a common ring for all elements) sage: C = codes.GeneralizedReedSolomonCode(F.list()[:n], k, F2.list()[1:n+1]) Traceback (most recent call last): ... ValueError: Failed converting all evaluation points and column multipliers to the same field (unable to find a common ring for all elements) The number of column multipliers is checked as well:: sage: F = GF(59) sage: n, k = 40, 12 sage: C = codes.GeneralizedReedSolomonCode(F.list()[:n], k, F.list()[1:n]) Traceback (most recent call last): ... ValueError: There must be the same number of evaluation points as column multipliers It is not allowed to have 0 as a column multiplier:: sage: F = GF(59) sage: n, k = 40, 12 sage: C = codes.GeneralizedReedSolomonCode(F.list()[:n], k, F.list()[:n]) Traceback (most recent call last): ... ValueError: All column multipliers must be non-zero And all the evaluation points must be different. Note that they should be different after converting into the same field:: sage: F = GF(5) sage: C = codes.GeneralizedReedSolomonCode([ F(0), 1, 2, 3, 5 ], 3) Traceback (most recent call last): ... ValueError: All evaluation points must be different The dimension is not allowed to exceed the length:: sage: F = GF(59) sage: n, k = 40, 100 sage: C = codes.GeneralizedReedSolomonCode(F.list()[:n], k) Traceback (most recent call last): ... ValueError: The dimension must be a positive integer at most the length of the code. """ if column_multipliers: if len(evaluation_points) != len(column_multipliers): raise ValueError("There must be the same number of evaluation points as column multipliers"); try: common_points = vector(list(evaluation_points) + list(column_multipliers)) F = common_points.base_ring() self._evaluation_points = common_points[:len(evaluation_points)] self._column_multipliers = common_points[len(evaluation_points):] except (TypeError, ValueError) as e: raise ValueError("Failed converting all evaluation points and column multipliers to the same field (%s)" % e.message) else: try: self._evaluation_points = vector(evaluation_points) F = self._evaluation_points.base_ring() self._column_multipliers = vector(F, [F.one()] * len(self._evaluation_points)) except (TypeError, ValueError) as e: raise ValueError("Failed converting all evaluation points to the same field (%s)" % e.message) if not F.is_finite() or not F.is_field(): raise ValueError("Evaluation points must be in a finite field (and %s is not one)" % F) super(GeneralizedReedSolomonCode, self).__init__(F, len(self._evaluation_points), "EvaluationVector", "Gao") if dimension not in ZZ or dimension > self._length or dimension < 1: raise ValueError("The dimension must be a positive integer at most the length of the code.") self._dimension = dimension if F.zero() in self._column_multipliers: raise ValueError("All column multipliers must be non-zero") if len(self._evaluation_points) != len(set(self._evaluation_points)): raise ValueError("All evaluation points must be different") def __eq__(self, other): r""" Test equality between Generalized Reed-Solomon codes. EXAMPLES:: sage: F = GF(59) sage: n, k = 40, 12 sage: C1 = codes.GeneralizedReedSolomonCode(F.list()[:n], k) sage: C2 = codes.GeneralizedReedSolomonCode(F.list()[:n], k) sage: C1.__eq__(C2) True """ return isinstance(other, GeneralizedReedSolomonCode) \ and self.base_field() == other.base_field() \ and self.length() == other.length() \ and self.dimension() == other.dimension() \ and self.evaluation_points() == other.evaluation_points() \ and self.column_multipliers() == other.column_multipliers() def _repr_(self): r""" Return a string representation of ``self``. EXAMPLES:: sage: F = GF(59) sage: n, k = 40, 12 sage: C = codes.GeneralizedReedSolomonCode(F.list()[:n], k) sage: C [40, 12, 29] Reed-Solomon Code over GF(59) sage: colmults = F.list()[1:n+1] sage: C2 = codes.GeneralizedReedSolomonCode(F.list()[:n], k, colmults) sage: C2 [40, 12, 29] Generalized Reed-Solomon Code over GF(59) """ return "[%s, %s, %s] %sReed-Solomon Code over GF(%s)"\ % (self.length(), self.dimension(), self.minimum_distance(), "Generalized " if self.is_generalized() else "", self.base_field().cardinality()) def _latex_(self): r""" Return a latex representation of ``self``. EXAMPLES:: sage: F = GF(59) sage: n, k = 40, 12 sage: C = codes.GeneralizedReedSolomonCode(F.list()[:n], k) sage: latex(C) [40, 12, 29] \textnormal{ Reed-Solomon Code over } \Bold{F}_{59} sage: colmults = F.list()[1:n+1] sage: C2 = codes.GeneralizedReedSolomonCode(F.list()[:n], k, colmults) sage: latex(C2) [40, 12, 29] \textnormal{ Generalized Reed-Solomon Code over } \Bold{F}_{59} """ return "[%s, %s, %s] \\textnormal{ %sReed-Solomon Code over } %s"\ % (self.length(), self.dimension(), self.minimum_distance(), "Generalized " if self.is_generalized() else "", self.base_field()._latex_()) def minimum_distance(self): r""" Return the minimum distance between any two words in ``self``. Since a GRS code is always Maximum-Distance-Separable (MDS), this returns ``C.length() - C.dimension() + 1``. EXAMPLES:: sage: F = GF(59) sage: n, k = 40, 12 sage: C = codes.GeneralizedReedSolomonCode(F.list()[:n], k) sage: C.minimum_distance() 29 """ return self.length() - self.dimension() + 1 def evaluation_points(self): r""" Return the vector of field elements used for the polynomial evaluations. EXAMPLES:: sage: F = GF(11) sage: n, k = 10, 5 sage: C = codes.GeneralizedReedSolomonCode(F.list()[:n], k) sage: C.evaluation_points() (0, 1, 2, 3, 4, 5, 6, 7, 8, 9) """ return self._evaluation_points def column_multipliers(self): r""" Return the vector of column multipliers of ``self``. EXAMPLES:: sage: F = GF(11) sage: n, k = 10, 5 sage: C = codes.GeneralizedReedSolomonCode(F.list()[:n], k) sage: C.column_multipliers() (1, 1, 1, 1, 1, 1, 1, 1, 1, 1) """ return self._column_multipliers def is_generalized(self): r""" Return whether ``self`` is a Generalized Reed-Solomon code or a regular Reed-Solomon code. ``self`` is a Generalized Reed-Solomon code if its column multipliers are not all 1. EXAMPLES:: sage: F = GF(11) sage: n, k = 10, 5 sage: C = codes.GeneralizedReedSolomonCode(F.list()[:n], k) sage: C.column_multipliers() (1, 1, 1, 1, 1, 1, 1, 1, 1, 1) sage: C.is_generalized() False sage: colmults = [ 1, 2, 3, 4, 5, 6, 7, 8, 9, 1] sage: C2 = codes.GeneralizedReedSolomonCode(F.list()[:n], k, colmults) sage: C2.is_generalized() True """ return not all( beta.is_one() for beta in self.column_multipliers() ) @cached_method def multipliers_product(self): r""" Return the component-wise product of the column multipliers of ``self`` with the column multipliers of the dual GRS code. This is a simple Cramer's rule-like expression on the evaluation points of ``self``. Recall that the column multipliers of the dual GRS code are also the column multipliers of the parity check matrix of ``self``. EXAMPLES:: sage: F = GF(11) sage: n, k = 10, 5 sage: C = codes.GeneralizedReedSolomonCode(F.list()[:n], k) sage: C.multipliers_product() [10, 9, 8, 7, 6, 5, 4, 3, 2, 1] """ a = self.evaluation_points() one = self.base_ring().one() return [one / prod(ai - ah for h, ah in enumerate(a) if h != i) for i, ai in enumerate(a)] @cached_method def parity_column_multipliers(self): r""" Return the list of column multipliers of the parity check matrix of ``self``. They are also column multipliers of the generator matrix for the dual GRS code of ``self``. EXAMPLES:: sage: F = GF(11) sage: n, k = 10, 5 sage: C = codes.GeneralizedReedSolomonCode(F.list()[:n], k) sage: C.parity_column_multipliers() [10, 9, 8, 7, 6, 5, 4, 3, 2, 1] """ n = self.length() col_mults = self.column_multipliers() etas = self.multipliers_product() return [etas[i] / col_mults[i] for i in range(n)] @cached_method def parity_check_matrix(self): r""" Return the parity check matrix of ``self``. EXAMPLES:: sage: F = GF(11) sage: n, k = 10, 5 sage: C = codes.GeneralizedReedSolomonCode(F.list()[:n], k) sage: C.parity_check_matrix() [10 9 8 7 6 5 4 3 2 1] [ 0 9 5 10 2 3 2 10 5 9] [ 0 9 10 8 8 4 1 4 7 4] [ 0 9 9 2 10 9 6 6 1 3] [ 0 9 7 6 7 1 3 9 8 5] """ return self.dual_code().generator_matrix() @cached_method def dual_code(self): r""" Return the dual code of ``self``, which is also a GRS code. EXAMPLES:: sage: F = GF(59) sage: colmults = [ F.random_element() for i in range(40) ] sage: C = codes.GeneralizedReedSolomonCode(F.list()[:40], 12, colmults) sage: Cd = C.dual_code(); Cd [40, 28, 13] Generalized Reed-Solomon Code over GF(59) The dual code of the dual code is the original code:: sage: C == Cd.dual_code() True """ col_mults = self.parity_column_multipliers() return GeneralizedReedSolomonCode(self.evaluation_points(), self.length() - self.dimension(), col_mults) def covering_radius(self): r""" Return the covering radius of ``self``. The covering radius of a linear code `C` is the smallest number `r` s.t. any element of the ambient space of `C` is at most at distance `r` to `C`. As GRS codes are Maximum Distance Separable codes (MDS), their covering radius is always `d-1`, where `d` is the minimum distance. This is opposed to random linear codes where the covering radius is computationally hard to determine. EXAMPLES:: sage: F = GF(2^8, 'a') sage: n, k = 256, 100 sage: C = codes.GeneralizedReedSolomonCode(F.list()[:n], k) sage: C.covering_radius() 156 """ return self.length() - self.dimension() @cached_method def weight_distribution(self): r""" Return the list whose `i`'th entry is the number of words of weight `i` in ``self``. Computing the weight distribution for a GRS code is very fast. Note that for random linear codes, it is computationally hard. EXAMPLES:: sage: F = GF(11) sage: n, k = 10, 5 sage: C = codes.GeneralizedReedSolomonCode(F.list()[:n], k) sage: C.weight_distribution() [1, 0, 0, 0, 0, 0, 2100, 6000, 29250, 61500, 62200] TESTS: Test that this method agrees with the generic algorithm:: sage: F = GF(7) sage: C = codes.GeneralizedReedSolomonCode(F.list(), 3) sage: C.weight_distribution() == super(codes.GeneralizedReedSolomonCode, C).weight_distribution() # long time True sage: F = GF(8) sage: C = codes.GeneralizedReedSolomonCode(F.list(), 3) sage: C.weight_distribution() == super(codes.GeneralizedReedSolomonCode, C).weight_distribution() # long time True """ d = self.minimum_distance() n = self.length() q = self.base_ring().order() s = var('s') wd = [1] + [0] * (d - 1) for i in range(d, n+1): tmp = binomial(n, i) * (q - 1) wd.append(tmp * symbolic_sum(binomial(i-1, s) * (-1)**s * q**(i - d - s), s, 0, i-d)) return wd def _punctured_form(self, points): r""" Return a representation of ``self`` as a :class:`GeneralizedReedSolomonCode` punctured in ``points``. INPUT: - ``points`` -- a set of positions where to puncture ``self`` EXAMPLES:: sage: C_grs = codes.GeneralizedReedSolomonCode(GF(59).list()[:40], 12) sage: C_grs._punctured_form({4, 3}) [38, 12, 27] Reed-Solomon Code over GF(59) """ if not isinstance(points, (Integer, int, set)): raise TypeError("points must be either a Sage Integer, a Python int, or a set") alphas = list(self.evaluation_points()) col_mults = list(self.column_multipliers()) n = self.length() punctured_alphas = [] punctured_col_mults = [] punctured_alphas = [alphas[i] for i in range(n) if i not in points] punctured_col_mults = [col_mults[i] for i in range(n) if i not in points] G = self.generator_matrix() G = G.delete_columns(list(points)) dimension = G.rank() return GeneralizedReedSolomonCode(punctured_alphas, dimension, punctured_col_mults) def decode_to_message(self, r): r""" Decode ``r`` to an element in message space of ``self``. .. NOTE:: If the code associated to ``self`` has the same length as its dimension, ``r`` will be unencoded as is. In that case, if ``r`` is not a codeword, the output is unspecified. INPUT: - ``r`` -- a codeword of ``self`` OUTPUT: - a vector of ``self`` message space EXAMPLES:: sage: F = GF(11) sage: n, k = 10, 5 sage: C = codes.GeneralizedReedSolomonCode(F.list()[1:n+1], k) sage: r = vector(F, (8, 2, 6, 10, 6, 10, 7, 6, 7, 2)) sage: C.decode_to_message(r) (3, 6, 6, 3, 1) """ if self.length() == self.dimension(): return self.encoder().unencode_nocheck(r) return vector(self.decoder().decode_to_message(r)) ####################### encoders ############################### class GRSEvaluationVectorEncoder(Encoder): r""" Encoder for (Generalized) Reed-Solomon codes that encodes vectors into codewords. Let `C` be a GRS code of length `n` and dimension `k` over some finite field `F`. We denote by `\alpha_i` its evaluations points and by `\beta_i` its column multipliers, where `1 \leq i \leq n`. Let `m = (m_1, \dots, m_k)`, a vector over `F`, be the message. We build a polynomial using the coordinates of `m` as coefficients: .. MATH:: p = \Sigma_{i=1}^{m} m_i \times x^i. The encoding of `m` will be the following codeword: .. MATH:: (\beta_1 \times p(\alpha_1), \dots, \beta_n \times p(\alpha_n)). INPUT: - ``code`` -- the associated code of this encoder EXAMPLES:: sage: F = GF(59) sage: n, k = 40, 12 sage: C = codes.GeneralizedReedSolomonCode(F.list()[:n], k) sage: E = codes.encoders.GRSEvaluationVectorEncoder(C) sage: E Evaluation vector-style encoder for [40, 12, 29] Reed-Solomon Code over GF(59) Actually, we can construct the encoder from ``C`` directly:: sage: E = C.encoder("EvaluationVector") sage: E Evaluation vector-style encoder for [40, 12, 29] Reed-Solomon Code over GF(59) """ def __init__(self, code): r""" EXAMPLES:: sage: F = GF(59) sage: n, k = 40, 12 sage: C = codes.GeneralizedReedSolomonCode(F.list()[:n], k) sage: E = codes.encoders.GRSEvaluationVectorEncoder(C) sage: E Evaluation vector-style encoder for [40, 12, 29] Reed-Solomon Code over GF(59) """ super(GRSEvaluationVectorEncoder, self).__init__(code) def __eq__(self, other): r""" Test equality between GRSEvaluationVectorEncoder objects. EXAMPLES:: sage: F = GF(59) sage: n, k = 40, 12 sage: C = codes.GeneralizedReedSolomonCode(F.list()[:n], k) sage: D1 = codes.encoders.GRSEvaluationVectorEncoder(C) sage: D2 = codes.encoders.GRSEvaluationVectorEncoder(C) sage: D1.__eq__(D2) True sage: D1 is D2 False """ return isinstance(other, GRSEvaluationVectorEncoder) \ and self.code() == other.code() def _repr_(self): r""" Return a string representation of ``self``. EXAMPLES:: sage: F = GF(59) sage: n, k = 40, 12 sage: C = codes.GeneralizedReedSolomonCode(F.list()[:n], k) sage: E = codes.encoders.GRSEvaluationVectorEncoder(C) sage: E Evaluation vector-style encoder for [40, 12, 29] Reed-Solomon Code over GF(59) """ return "Evaluation vector-style encoder for %s" % self.code() def _latex_(self): r""" Return a latex representation of ``self``. EXAMPLES:: sage: F = GF(59) sage: n, k = 40, 12 sage: C = codes.GeneralizedReedSolomonCode(F.list()[:n], k) sage: E = codes.encoders.GRSEvaluationVectorEncoder(C) sage: latex(E) \textnormal{Evaluation vector-style encoder for }[40, 12, 29] \textnormal{ Reed-Solomon Code over } \Bold{F}_{59} """ return "\\textnormal{Evaluation vector-style encoder for }%s" % self.code()._latex_() @cached_method def generator_matrix(self): r""" Return a generator matrix of ``self`` Considering a GRS code of length `n`, dimension `k`, with evaluation points `(\alpha_1, \dots, \alpha_n)` and column multipliers `(\beta_1, \dots, \beta_n)`, its generator matrix `G` is built using the following formula: .. MATH:: G = [g_{i,j}], g_{i,j} = \beta_j \times \alpha_{j}^{i}. This matrix is a Vandermonde matrix. EXAMPLES:: sage: F = GF(11) sage: n, k = 10, 5 sage: C = codes.GeneralizedReedSolomonCode(F.list()[:n], k) sage: E = codes.encoders.GRSEvaluationVectorEncoder(C) sage: E.generator_matrix() [1 1 1 1 1 1 1 1 1 1] [0 1 2 3 4 5 6 7 8 9] [0 1 4 9 5 3 3 5 9 4] [0 1 8 5 9 4 7 2 6 3] [0 1 5 4 3 9 9 3 4 5] """ C = self.code() alphas = C.evaluation_points() col_mults = C.column_multipliers() g = matrix(C.base_field(), C.dimension(), C.length(), lambda i,j: col_mults[j] * alphas[j]**i) g.set_immutable() return g class GRSEvaluationPolynomialEncoder(Encoder): r""" Encoder for (Generalized) Reed-Solomon codes which uses evaluation of polynomials to obtain codewords. Let `C` be a GRS code of length `n` and dimension `k` over some finite field `F`. We denote by `\alpha_i` its evaluations points and by `\beta_i` its column multipliers, where `1 \leq i \leq n`. Let `p` be a polynomial of degree at most `k-1` in `F[x]` be the message. The encoding of `m` will be the following codeword: .. MATH:: (\beta_1 \times p(\alpha_1), \dots, \beta_n \times p(\alpha_n)). INPUT: - ``code`` -- the associated code of this encoder - ``polynomial_ring`` -- (default: ``None``) a polynomial ring to specify the message space of ``self``, if needed; it is set to `F[x]` (where `F` is the base field of ``code``) if default value is kept EXAMPLES:: sage: F = GF(59) sage: n, k = 40, 12 sage: C = codes.GeneralizedReedSolomonCode(F.list()[:n], k) sage: E = codes.encoders.GRSEvaluationPolynomialEncoder(C) sage: E Evaluation polynomial-style encoder for [40, 12, 29] Reed-Solomon Code over GF(59) sage: E.message_space() Univariate Polynomial Ring in x over Finite Field of size 59 Actually, we can construct the encoder from ``C`` directly:: sage: E = C.encoder("EvaluationPolynomial") sage: E Evaluation polynomial-style encoder for [40, 12, 29] Reed-Solomon Code over GF(59) We can also specify another polynomial ring:: sage: R = PolynomialRing(F, 'y') sage: E = C.encoder("EvaluationPolynomial", polynomial_ring=R) sage: E.message_space() Univariate Polynomial Ring in y over Finite Field of size 59 """ def __init__(self, code, polynomial_ring=None): r""" TESTS: If ``polynomial_ring`` is not a polynomial ring, an exception is raised:: sage: F = GF(59) sage: n, k = 40, 12 sage: C = codes.GeneralizedReedSolomonCode(F.list()[:n], k) sage: E = codes.encoders.GRSEvaluationPolynomialEncoder(C, polynomial_ring = F) Traceback (most recent call last): ... ValueError: polynomial_ring has to be a univariate polynomial ring Same if ``polynomial_ring`` is a multivariate polynomial ring:: sage: Fxy.<x,y> = F[] sage: E = codes.encoders.GRSEvaluationPolynomialEncoder(C, polynomial_ring = Fxy) Traceback (most recent call last): ... ValueError: polynomial_ring has to be a univariate polynomial ring ``polynomial_ring``'s base field and ``code``'s base field have to be the same:: sage: Gx.<x> = GF(7)[] sage: E = codes.encoders.GRSEvaluationPolynomialEncoder(C, polynomial_ring = Gx) Traceback (most recent call last): ... ValueError: polynomial_ring's base field has to be the same as code's """ from sage.rings.polynomial.polynomial_ring import PolynomialRing_commutative super(GRSEvaluationPolynomialEncoder, self).__init__(code) if polynomial_ring is None: self._polynomial_ring = code.base_field()['x'] else: if not isinstance(polynomial_ring, PolynomialRing_commutative): raise ValueError("polynomial_ring has to be a univariate polynomial ring") elif not len(polynomial_ring.variable_names()) == 1: raise ValueError("polynomial_ring has to be a univariate polynomial ring") if not polynomial_ring.base_ring() == code.base_field(): raise ValueError("polynomial_ring's base field has to be the same as code's") self._polynomial_ring = polynomial_ring def __eq__(self, other): r""" Test equality between GRSEvaluationPolynomialEncoder objects. EXAMPLES:: sage: F = GF(59) sage: n, k = 40, 12 sage: C = codes.GeneralizedReedSolomonCode(F.list()[:n], k) sage: D1 = codes.encoders.GRSEvaluationPolynomialEncoder(C) sage: D2 = codes.encoders.GRSEvaluationPolynomialEncoder(C) sage: D1 is D2 False sage: D1.__eq__(D2) True sage: R = PolynomialRing(F, 'y') sage: D3 = codes.encoders.GRSEvaluationPolynomialEncoder(C, polynomial_ring=R) sage: D1.__eq__(D3) False """ return (isinstance(other, GRSEvaluationPolynomialEncoder) and self.code() == other.code() and self.polynomial_ring() == other.polynomial_ring()) def _repr_(self): r""" Return a string representation of ``self``. EXAMPLES:: sage: F = GF(59) sage: n, k = 40, 12 sage: C = codes.GeneralizedReedSolomonCode(F.list()[:n], k) sage: E = C.encoder("EvaluationPolynomial") sage: E Evaluation polynomial-style encoder for [40, 12, 29] Reed-Solomon Code over GF(59) """ return "Evaluation polynomial-style encoder for %s" % self.code() def _latex_(self): r""" Return a latex representation of ``self``. EXAMPLES:: sage: F = GF(59) sage: n, k = 40, 12 sage: C = codes.GeneralizedReedSolomonCode(F.list()[:n], k) sage: E = C.encoder("EvaluationPolynomial") sage: latex(E) \textnormal{Evaluation polynomial-style encoder for }[40, 12, 29] \textnormal{ Reed-Solomon Code over } \Bold{F}_{59} """ return "\\textnormal{Evaluation polynomial-style encoder for }%s" % self.code()._latex_() def encode(self, p): r""" Transform the polynomial ``p`` into a codeword of :meth:`code`. One can use the following shortcut to encode a word with an encoder ``E``:: E(word) INPUT: - ``p`` -- a polynomial from the message space of ``self`` of degree less than ``self.code().dimension()`` OUTPUT: - a codeword in associated code of ``self`` EXAMPLES:: sage: F = GF(11) sage: Fx.<x> = F[] sage: n, k = 10 , 5 sage: C = codes.GeneralizedReedSolomonCode(F.list()[:n], k) sage: E = C.encoder("EvaluationPolynomial") sage: p = x^2 + 3*x + 10 sage: c = E.encode(p); c (10, 3, 9, 6, 5, 6, 9, 3, 10, 8) sage: c in C True If a polynomial of too high degree is given, an error is raised:: sage: p = x^10 sage: E.encode(p) Traceback (most recent call last): ... ValueError: The polynomial to encode must have degree at most 4 If ``p`` is not an element of the proper polynomial ring, an error is raised:: sage: Qy.<y> = QQ[] sage: p = y^2 + 1 sage: E.encode(p) Traceback (most recent call last): ... ValueError: The value to encode must be in Univariate Polynomial Ring in x over Finite Field of size 11 TESTS: The bug described in :trac:`20744` is now fixed:: sage: F = GF(11) sage: Fm.<my_variable> = F[] sage: n, k = 10 , 5 sage: C = codes.GeneralizedReedSolomonCode(F.list()[:n], k) sage: E = C.encoder("EvaluationPolynomial", polynomial_ring = Fm) sage: p = my_variable^2 + 3*my_variable + 10 sage: c = E.encode(p) sage: c in C True """ M = self.message_space() if p not in M: raise ValueError("The value to encode must be in %s" % M) C = self.code() if p.degree() >= C.dimension(): raise ValueError("The polynomial to encode must have degree at most %s" % (C.dimension() - 1)) alphas = C.evaluation_points() col_mults = C.column_multipliers() c = vector(C.base_ring(), [col_mults[i]*p(alphas[i]) for i in range(C.length())]) return c def unencode_nocheck(self, c): r""" Return the message corresponding to the codeword ``c``. Use this method with caution: it does not check if ``c`` belongs to the code, and if this is not the case, the output is unspecified. Instead, use :meth:`unencode`. INPUT: - ``c`` -- a codeword of :meth:`code` OUTPUT: - a polynomial of degree less than ``self.code().dimension()`` EXAMPLES:: sage: F = GF(11) sage: n, k = 10 , 5 sage: C = codes.GeneralizedReedSolomonCode(F.list()[:n], k) sage: E = C.encoder("EvaluationPolynomial") sage: c = vector(F, (10, 3, 9, 6, 5, 6, 9, 3, 10, 8)) sage: c in C True sage: p = E.unencode_nocheck(c); p x^2 + 3*x + 10 sage: E.encode(p) == c True Note that no error is thrown if ``c`` is not a codeword, and that the result is undefined:: sage: c = vector(F, (11, 3, 9, 6, 5, 6, 9, 3, 10, 8)) sage: c in C False sage: p = E.unencode_nocheck(c); p 6*x^4 + 6*x^3 + 2*x^2 sage: E.encode(p) == c False """ C = self.code() alphas = C.evaluation_points() col_mults = C.column_multipliers() c = [c[i]/col_mults[i] for i in range(C.length())] points = [(alphas[i], c[i]) for i in range(C.dimension())] Pc = self.polynomial_ring().lagrange_polynomial(points) return Pc def message_space(self): r""" Return the message space of ``self`` EXAMPLES:: sage: F = GF(11) sage: n, k = 10 , 5 sage: C = codes.GeneralizedReedSolomonCode(F.list()[:n], k) sage: E = C.encoder("EvaluationPolynomial") sage: E.message_space() Univariate Polynomial Ring in x over Finite Field of size 11 """ return self._polynomial_ring polynomial_ring = message_space ####################### decoders ############################### class GRSBerlekampWelchDecoder(Decoder): r""" Decoder for (Generalized) Reed-Solomon codes which uses Berlekamp-Welch decoding algorithm to correct errors in codewords. This algorithm recovers the error locator polynomial by solving a linear system. See [HJ2004]_ pp. 51-52 for details. INPUT: - ``code`` -- a code associated to this decoder EXAMPLES:: sage: F = GF(59) sage: n, k = 40, 12 sage: C = codes.GeneralizedReedSolomonCode(F.list()[:n], k) sage: D = codes.decoders.GRSBerlekampWelchDecoder(C) sage: D Berlekamp-Welch decoder for [40, 12, 29] Reed-Solomon Code over GF(59) Actually, we can construct the decoder from ``C`` directly:: sage: D = C.decoder("BerlekampWelch") sage: D Berlekamp-Welch decoder for [40, 12, 29] Reed-Solomon Code over GF(59) """ def __init__(self, code): r""" TESTS: If ``code`` is not a GRS code, an error is raised:: sage: C = codes.random_linear_code(GF(11), 10, 4) sage: codes.decoders.GRSBerlekampWelchDecoder(C) Traceback (most recent call last): ... ValueError: code has to be a generalized Reed-Solomon code """ if not isinstance(code, GeneralizedReedSolomonCode): raise ValueError("code has to be a generalized Reed-Solomon code") super(GRSBerlekampWelchDecoder, self).__init__(code, code.ambient_space(), "EvaluationPolynomial") def __eq__(self, other): r""" Test equality between GRSBerlekampWelchDecoder objects. EXAMPLES:: sage: F = GF(59) sage: n, k = 40, 12 sage: C = codes.GeneralizedReedSolomonCode(F.list()[:n], k) sage: D1 = codes.decoders.GRSBerlekampWelchDecoder(C) sage: D2 = codes.decoders.GRSBerlekampWelchDecoder(C) sage: D1.__eq__(D2) True sage: D1 is D2 False """ return (isinstance(other, GRSBerlekampWelchDecoder) and self.code() == other.code() and self.input_space() == other.input_space()) def _repr_(self): r""" Return a string representation of ``self``. EXAMPLES:: sage: F = GF(59) sage: n, k = 40, 12 sage: C = codes.GeneralizedReedSolomonCode(F.list()[:n], k) sage: D = codes.decoders.GRSBerlekampWelchDecoder(C) sage: D Berlekamp-Welch decoder for [40, 12, 29] Reed-Solomon Code over GF(59) """ return "Berlekamp-Welch decoder for %s" % self.code() def _latex_(self): r""" Return a latex representation of ``self``. EXAMPLES:: sage: F = GF(59) sage: n, k = 40, 12 sage: C = codes.GeneralizedReedSolomonCode(F.list()[:n], k) sage: D = codes.decoders.GRSBerlekampWelchDecoder(C) sage: latex(D) \textnormal{Berlekamp Welch decoder for }[40, 12, 29] \textnormal{ Reed-Solomon Code over } \Bold{F}_{59} """ return "\\textnormal{Berlekamp Welch decoder for }%s"\ % self.code()._latex_() def _decode_to_code_and_message(self, r): r""" Decode ``r`` to an element in message space of ``self`` and its representation in the ambient space of the code associated to ``self``. INPUT: - ``r`` -- a codeword of ``self`` OUTPUT: - a pair ``(c, f)``, where * ``c`` is the representation of ``r`` decoded in the ambient space of the associated code of ``self`` *``f`` its representation in the message space of ``self`` EXAMPLES:: sage: F = GF(59) sage: n, k = 40, 12 sage: C = codes.GeneralizedReedSolomonCode(F.list()[:n], k) sage: D = codes.decoders.GRSBerlekampWelchDecoder(C) sage: c = C.random_element() sage: Chan = channels.StaticErrorRateChannel(C.ambient_space(), D.decoding_radius()) sage: y = Chan(c) sage: c_dec, f_dec = D._decode_to_code_and_message(y) sage: f_dec == D.connected_encoder().unencode(c) True sage: c_dec == c True """ C = self.code() if r not in C.ambient_space(): raise ValueError("The word to decode has to be in the ambient space of the code") n, k = C.length(), C.dimension() if n == k: return r, self.connected_encoder().unencode_nocheck(r) if r in C: return r, self.connected_encoder().unencode_nocheck(r) col_mults = C.column_multipliers() r_list = copy(r) r_list = [r[i]/col_mults[i] for i in range(0, C.length())] t = (C.minimum_distance()-1) // 2 l0 = n-1-t l1 = n-1-t-(k-1) S = matrix(C.base_field(), n, l0+l1+2, lambda i,j : (C.evaluation_points()[i])**j if j<(l0+1) else r_list[i]*(C.evaluation_points()[i])**(j-(l0+1))) S = S.right_kernel() S = S.basis_matrix().row(0) R = C.base_field()['x'] Q0 = R(S.list_from_positions(range(l0 + 1))) Q1 = R(S.list_from_positions(range(l0 + 1, l0 + l1 + 2))) f, rem = (-Q0).quo_rem(Q1) if not rem.is_zero(): raise DecodingError("Decoding failed because the number of errors exceeded the decoding radius") if f not in R: raise DecodingError("Decoding failed because the number of errors exceeded the decoding radius") c = self.connected_encoder().encode(f) if (c - r).hamming_weight() > self.decoding_radius(): raise DecodingError("Decoding failed because the number of errors exceeded the decoding radius") return c, f def decode_to_message(self, r): r""" Decode ``r`` to an element in message space of ``self``. .. NOTE:: If the code associated to ``self`` has the same length as its dimension, ``r`` will be unencoded as is. In that case, if ``r`` is not a codeword, the output is unspecified. INPUT: - ``r`` -- a codeword of ``self`` OUTPUT: - a vector of ``self`` message space EXAMPLES:: sage: F = GF(59) sage: n, k = 40, 12 sage: C = codes.GeneralizedReedSolomonCode(F.list()[:n], k) sage: D = codes.decoders.GRSBerlekampWelchDecoder(C) sage: c = C.random_element() sage: Chan = channels.StaticErrorRateChannel(C.ambient_space(), D.decoding_radius()) sage: y = Chan(c) sage: D.connected_encoder().unencode(c) == D.decode_to_message(y) True TESTS: If one tries to decode a word which is too far from any codeword, an exception is raised:: sage: e = vector(F,[0, 0, 54, 23, 1, 0, 0, 0, 53, 21, 0, 0, 0, 34, 6, 11, 0, 0, 16, 0, 0, 0, 9, 0, 10, 27, 35, 0, 0, 0, 0, 46, 0, 0, 0, 0, 0, 0, 44, 0]); e.hamming_weight() 15 sage: D.decode_to_message(c + e) Traceback (most recent call last): ... DecodingError: Decoding failed because the number of errors exceeded the decoding radius If one tries to decode something which is not in the ambient space of the code, an exception is raised:: sage: D.decode_to_message(42) Traceback (most recent call last): ... ValueError: The word to decode has to be in the ambient space of the code The bug detailed in :trac:`20340` has been fixed:: sage: C = codes.GeneralizedReedSolomonCode(GF(59).list()[:40], 12) sage: c = C.random_element() sage: D = C.decoder("BerlekampWelch") sage: E = D.connected_encoder() sage: m = E.message_space().random_element() sage: c = E.encode(m) sage: D.decode_to_message(c) == m True """ return self._decode_to_code_and_message(r)[1] def decode_to_code(self, r): r""" Correct the errors in ``r`` and returns a codeword. .. NOTE:: If the code associated to ``self`` has the same length as its dimension, ``r`` will be returned as is. INPUT: - ``r`` -- a vector of the ambient space of ``self.code()`` OUTPUT: - a vector of ``self.code()`` EXAMPLES:: sage: F = GF(59) sage: n, k = 40, 12 sage: C = codes.GeneralizedReedSolomonCode(F.list()[:n], k) sage: D = codes.decoders.GRSBerlekampWelchDecoder(C) sage: c = C.random_element() sage: Chan = channels.StaticErrorRateChannel(C.ambient_space(), D.decoding_radius()) sage: y = Chan(c) sage: c == D.decode_to_code(y) True TESTS: If one tries to decode a word which is too far from any codeword, an exception is raised:: sage: e = vector(F,[0, 0, 54, 23, 1, 0, 0, 0, 53, 21, 0, 0, 0, 34, 6, 11, 0, 0, 16, 0, 0, 0, 9, 0, 10, 27, 35, 0, 0, 0, 0, 46, 0, 0, 0, 0, 0, 0, 44, 0]); e.hamming_weight() 15 sage: D.decode_to_code(c + e) Traceback (most recent call last): ... DecodingError: Decoding failed because the number of errors exceeded the decoding radius If one tries to decode something which is not in the ambient space of the code, an exception is raised:: sage: D.decode_to_code(42) Traceback (most recent call last): ... ValueError: The word to decode has to be in the ambient space of the code The bug detailed in :trac:`20340` has been fixed:: sage: C = codes.GeneralizedReedSolomonCode(GF(59).list()[:40], 12) sage: c = C.random_element() sage: D = C.decoder("BerlekampWelch") sage: D.decode_to_code(c) == c True """ return self._decode_to_code_and_message(r)[0] def decoding_radius(self): r""" Return maximal number of errors that ``self`` can decode. OUTPUT: - the number of errors as an integer EXAMPLES:: sage: F = GF(59) sage: n, k = 40, 12 sage: C = codes.GeneralizedReedSolomonCode(F.list()[:n], k) sage: D = codes.decoders.GRSBerlekampWelchDecoder(C) sage: D.decoding_radius() 14 """ return (self.code().minimum_distance()-1)//2 class GRSGaoDecoder(Decoder): r""" Decoder for (Generalized) Reed-Solomon codes which uses Gao decoding algorithm to correct errors in codewords. Gao decoding algorithm uses early terminated extended Euclidean algorithm to find the error locator polynomial. See [Ga02]_ for details. INPUT: - ``code`` -- the associated code of this decoder EXAMPLES:: sage: F = GF(59) sage: n, k = 40, 12 sage: C = codes.GeneralizedReedSolomonCode(F.list()[:n], k) sage: D = codes.decoders.GRSGaoDecoder(C) sage: D Gao decoder for [40, 12, 29] Reed-Solomon Code over GF(59) Actually, we can construct the decoder from ``C`` directly:: sage: D = C.decoder("Gao") sage: D Gao decoder for [40, 12, 29] Reed-Solomon Code over GF(59) """ def __init__(self, code): r""" TESTS: If ``code`` is not a GRS code, an error is raised:: sage: C = codes.random_linear_code(GF(11), 10, 4) sage: codes.decoders.GRSGaoDecoder(C) Traceback (most recent call last): ... ValueError: code has to be a generalized Reed-Solomon code """ if not isinstance(code, GeneralizedReedSolomonCode): raise ValueError("code has to be a generalized Reed-Solomon code") super(GRSGaoDecoder, self).__init__(code, code.ambient_space(), "EvaluationPolynomial") def __eq__(self, other): r""" Test equality of GRSGaoDecoder objects. EXAMPLES:: sage: F = GF(59) sage: n, k = 40, 12 sage: C = codes.GeneralizedReedSolomonCode(F.list()[:n], k) sage: D1 = codes.decoders.GRSGaoDecoder(C) sage: D2 = codes.decoders.GRSGaoDecoder(C) sage: D1.__eq__(D2) True sage: D1 is D2 False """ return (isinstance(other, GRSGaoDecoder) and self.code() == other.code() and self.input_space() == other.input_space()) def _repr_(self): r""" Return a string representation of ``self``. EXAMPLES:: sage: F = GF(59) sage: n, k = 40, 12 sage: C = codes.GeneralizedReedSolomonCode(F.list()[:n], k) sage: D = codes.decoders.GRSGaoDecoder(C) sage: D Gao decoder for [40, 12, 29] Reed-Solomon Code over GF(59) """ return "Gao decoder for %s" % self.code() def _latex_(self): r""" Return a latex representation of ``self``. EXAMPLES:: sage: F = GF(59) sage: n, k = 40, 12 sage: C = codes.GeneralizedReedSolomonCode(F.list()[:n], k) sage: D = codes.decoders.GRSGaoDecoder(C) sage: latex(D) \textnormal{Gao decoder for }[40, 12, 29] \textnormal{ Reed-Solomon Code over } \Bold{F}_{59} """ return "\\textnormal{Gao decoder for }%s" % self.code()._latex_() @cached_method def _polynomial_vanishing_at_alphas(self, PolRing): r""" Return the unique minimal-degree polynomial vanishing at all the evaluation points. INPUT: - ``PolRing`` -- polynomial ring of the output OUTPUT: - a polynomial over ``PolRing`` EXAMPLES:: sage: F = GF(11) sage: n, k = 10, 5 sage: C = codes.GeneralizedReedSolomonCode(F.list()[:n], k) sage: D = codes.decoders.GRSGaoDecoder(C) sage: P = PolynomialRing(F,'x') sage: D._polynomial_vanishing_at_alphas(P) x^10 + 10*x^9 + x^8 + 10*x^7 + x^6 + 10*x^5 + x^4 + 10*x^3 + x^2 + 10*x """ alphas = self.code().evaluation_points() G = PolRing.one() x = PolRing.gen() for i in range(0, self.code().length()): G = G*(x-self.code().evaluation_points()[i]) return G def _partial_xgcd(self, a, b, PolRing): r""" Performs an Euclidean algorithm on ``a`` and ``b`` until a remainder has degree less than `\frac{n+k}{2}`, `n` being the dimension of the code, `k` its dimension, and returns `(r, s)` such that in the step just before termination, `r = a\times s + b\times t`. INPUT: - ``a, b`` -- polynomials over ``PolRing`` - ``PolRing`` -- polynomial ring of the output OUTPUT: - a tuple of polynomials EXAMPLES:: sage: F = GF(11) sage: n, k = 10, 5 sage: C = codes.GeneralizedReedSolomonCode(F.list()[:n], k) sage: D = codes.decoders.GRSGaoDecoder(C) sage: P = PolynomialRing(F,'x') sage: x = P.parameter() sage: a = 5*x^2 + 9*x + 8 sage: b = 10*x^2 + 3*x + 5 sage: D._partial_xgcd(a, b, P) (10*x^2 + 3*x + 5, 1) """ stop = floor(self.code().dimension() + self.code().length()) // 2 s = PolRing.one() prev_s = PolRing.zero() r = b prev_r = a while(r.degree() >= stop): q = prev_r.quo_rem(r)[0] (prev_r, r) = (r, prev_r - q * r) (prev_s, s) = (s, prev_s - q * s) return (r, s) def _decode_to_code_and_message(self, r): r""" Decode ``r`` to an element in message space of ``self`` and its representation in the ambient space of the code associated to ``self``. INPUT: - ``r`` -- a codeword of ``self`` OUTPUT: - ``(c, h)`` -- ``c`` is the representation of ``r`` decoded in the ambient space of the associated code of ``self``, ``h`` its representation in the message space of ``self``. EXAMPLES:: sage: F = GF(59) sage: n, k = 40, 12 sage: C = codes.GeneralizedReedSolomonCode(F.list()[:n], k) sage: D = codes.decoders.GRSGaoDecoder(C) sage: c = C.random_element() sage: Chan = channels.StaticErrorRateChannel(C.ambient_space(), D.decoding_radius()) sage: y = Chan(c) sage: c_dec, h_dec = D._decode_to_code_and_message(y) sage: h_dec == D.connected_encoder().unencode(c) True sage: c_dec == c True """ C = self.code() if r not in C.ambient_space(): raise ValueError("The word to decode has to be in the ambient space of the code") alphas = C.evaluation_points() col_mults = C.column_multipliers() PolRing = C.base_field()['x'] G = self._polynomial_vanishing_at_alphas(PolRing) n = C.length() if n == C.dimension() or r in C: return r, self.connected_encoder().unencode_nocheck(r) points = [(alphas[i], r[i]/col_mults[i]) for i in range(0, n)] R = PolRing.lagrange_polynomial(points) (Q1, Q0) = self._partial_xgcd(G, R, PolRing) h, rem = Q1.quo_rem(Q0) if not rem.is_zero(): raise DecodingError("Decoding failed because the number of errors exceeded the decoding radius") if h not in PolRing: raise DecodingError("Decoding failed because the number of errors exceeded the decoding radius") c = self.connected_encoder().encode(h) if (c - r).hamming_weight() > self.decoding_radius(): raise DecodingError("Decoding failed because the number of errors exceeded the decoding radius") return c, h def decode_to_message(self, r): r""" Decode ``r`` to an element in message space of ``self``. .. NOTE:: If the code associated to ``self`` has the same length as its dimension, ``r`` will be unencoded as is. In that case, if ``r`` is not a codeword, the output is unspecified. INPUT: - ``r`` -- a codeword of ``self`` OUTPUT: - a vector of ``self`` message space EXAMPLES:: sage: F = GF(59) sage: n, k = 40, 12 sage: C = codes.GeneralizedReedSolomonCode(F.list()[:n], k) sage: D = codes.decoders.GRSGaoDecoder(C) sage: c = C.random_element() sage: Chan = channels.StaticErrorRateChannel(C.ambient_space(), D.decoding_radius()) sage: y = Chan(c) sage: D.connected_encoder().unencode(c) == D.decode_to_message(y) True TESTS: If one tries to decode a word which is too far from any codeword, an exception is raised:: sage: e = vector(F,[0, 0, 54, 23, 1, 0, 0, 0, 53, 21, 0, 0, 0, 34, 6, 11, 0, 0, 16, 0, 0, 0, 9, 0, 10, 27, 35, 0, 0, 0, 0, 46, 0, 0, 0, 0, 0, 0, 44, 0]); e.hamming_weight() 15 sage: D.decode_to_message(c + e) Traceback (most recent call last): ... DecodingError: Decoding failed because the number of errors exceeded the decoding radius If one tries to decode something which is not in the ambient space of the code, an exception is raised:: sage: D.decode_to_message(42) Traceback (most recent call last): ... ValueError: The word to decode has to be in the ambient space of the code The bug detailed in :trac:`20340` has been fixed:: sage: C = codes.GeneralizedReedSolomonCode(GF(59).list()[:40], 12) sage: c = C.random_element() sage: D = C.decoder("Gao") sage: E = D.connected_encoder() sage: m = E.message_space().random_element() sage: c = E.encode(m) sage: D.decode_to_message(c) == m True """ return self._decode_to_code_and_message(r)[1] def decode_to_code(self, r): r""" Correct the errors in ``r`` and returns a codeword. .. NOTE:: If the code associated to ``self`` has the same length as its dimension, ``r`` will be returned as is. INPUT: - ``r`` -- a vector of the ambient space of ``self.code()`` OUTPUT: - a vector of ``self.code()`` EXAMPLES:: sage: F = GF(59) sage: n, k = 40, 12 sage: C = codes.GeneralizedReedSolomonCode(F.list()[:n], k) sage: D = codes.decoders.GRSGaoDecoder(C) sage: c = C.random_element() sage: Chan = channels.StaticErrorRateChannel(C.ambient_space(), D.decoding_radius()) sage: y = Chan(c) sage: c == D.decode_to_code(y) True TESTS: If one tries to decode a word which is too far from any codeword, an exception is raised:: sage: e = vector(F,[0, 0, 54, 23, 1, 0, 0, 0, 53, 21, 0, 0, 0, 34, 6, 11, 0, 0, 16, 0, 0, 0, 9, 0, 10, 27, 35, 0, 0, 0, 0, 46, 0, 0, 0, 0, 0, 0, 44, 0]); e.hamming_weight() 15 sage: D.decode_to_code(c + e) Traceback (most recent call last): ... DecodingError: Decoding failed because the number of errors exceeded the decoding radius If one tries to decode something which is not in the ambient space of the code, an exception is raised:: sage: D.decode_to_code(42) Traceback (most recent call last): ... ValueError: The word to decode has to be in the ambient space of the code The bug detailed in :trac:`20340` has been fixed:: sage: C = codes.GeneralizedReedSolomonCode(GF(59).list()[:40], 12) sage: c = C.random_element() sage: D = C.decoder("Gao") sage: c = C.random_element() sage: D.decode_to_code(c) == c True """ return self._decode_to_code_and_message(r)[0] def decoding_radius(self): r""" Return maximal number of errors that ``self`` can decode OUTPUT: - the number of errors as an integer EXAMPLES:: sage: F = GF(59) sage: n, k = 40, 12 sage: C = codes.GeneralizedReedSolomonCode(F.list()[:n], k) sage: D = codes.decoders.GRSGaoDecoder(C) sage: D.decoding_radius() 14 """ return (self.code().minimum_distance() - 1) // 2 class GRSErrorErasureDecoder(Decoder): r""" Decoder for (Generalized) Reed-Solomon codes which is able to correct both errors and erasures in codewords. Let `C` be a GRS code of length `n` and dimension `k`. Considering `y` a codeword with at most `t` errors (`t` being the `\left\lfloor \frac{d-1}{2} \right\rfloor` decoding radius), and `e` the erasure vector, this decoder works as follows: - Puncture the erased coordinates which are identified in `e`. - Create a new GRS code of length `n - w(e)`, where `w` is the Hamming weight function, and dimension `k`. - Use Gao decoder over this new code one the punctured word built on the first step. - Recover the original message from the decoded word computed on the previous step. - Encode this message using an encoder over `C`. INPUT: - ``code`` -- the associated code of this decoder EXAMPLES:: sage: F = GF(59) sage: n, k = 40, 12 sage: C = codes.GeneralizedReedSolomonCode(F.list()[:n], k) sage: D = codes.decoders.GRSErrorErasureDecoder(C) sage: D Error-Erasure decoder for [40, 12, 29] Reed-Solomon Code over GF(59) Actually, we can construct the decoder from ``C`` directly:: sage: D = C.decoder("ErrorErasure") sage: D Error-Erasure decoder for [40, 12, 29] Reed-Solomon Code over GF(59) """ def __init__(self, code): r""" TESTS: If ``code`` is not a GRS code, an error is raised:: sage: C = codes.random_linear_code(GF(11), 10, 4) sage: codes.decoders.GRSErrorErasureDecoder(C) Traceback (most recent call last): ... ValueError: code has to be a generalized Reed-Solomon code """ if not isinstance(code, GeneralizedReedSolomonCode): raise ValueError("code has to be a generalized Reed-Solomon code") input_space = cartesian_product([code.ambient_space(), VectorSpace(GF(2), code.ambient_space().dimension())]) super(GRSErrorErasureDecoder, self).__init__(code, input_space, "EvaluationVector") def __eq__(self, other): r""" Test equality of GRSErrorErasureDecoder objects. EXAMPLES:: sage: F = GF(59) sage: n, k = 40, 12 sage: C = codes.GeneralizedReedSolomonCode(F.list()[:n], k) sage: D1 = codes.decoders.GRSErrorErasureDecoder(C) sage: D2 = codes.decoders.GRSErrorErasureDecoder(C) sage: D1.__eq__(D2) True sage: D1 is D2 False """ return isinstance(other, GRSErrorErasureDecoder) \ and self.code() == other.code() def _repr_(self): r""" Return a string representation of ``self``. EXAMPLES:: sage: F = GF(59) sage: n, k = 40, 12 sage: C = codes.GeneralizedReedSolomonCode(F.list()[:n], k) sage: D = codes.decoders.GRSErrorErasureDecoder(C) sage: D Error-Erasure decoder for [40, 12, 29] Reed-Solomon Code over GF(59) """ return "Error-Erasure decoder for %s" % self.code() def _latex_(self): r""" Return a latex representation of ``self``. EXAMPLES:: sage: F = GF(59) sage: n, k = 40, 12 sage: C = codes.GeneralizedReedSolomonCode(F.list()[:n], k) sage: D = codes.decoders.GRSErrorErasureDecoder(C) sage: latex(D) \textnormal{Error-Erasure decoder for }[40, 12, 29] \textnormal{ Reed-Solomon Code over } \Bold{F}_{59} """ return "\\textnormal{Error-Erasure decoder for }%s"\ % self.code()._latex_() def decode_to_message(self, word_and_erasure_vector): r""" Decode ``word_and_erasure_vector`` to an element in message space of ``self`` INPUT: - word_and_erasure_vector -- a tuple whose: * first element is an element of the ambient space of the code * second element is a vector over `\GF{2}` whose length is the same as the code's .. NOTE:: If the code associated to ``self`` has the same length as its dimension, ``r`` will be unencoded as is. If the number of erasures is exactly `n - k`, where `n` is the length of the code associated to ``self`` and `k` its dimension, ``r`` will be returned as is. In either case, if ``r`` is not a codeword, the output is unspecified. INPUT: - ``word_and_erasure_vector`` -- a pair of vectors, where first element is a codeword of ``self`` and second element is a vector of GF(2) containing erasure positions OUTPUT: - a vector of ``self`` message space EXAMPLES:: sage: F = GF(59) sage: n, k = 40, 12 sage: C = codes.GeneralizedReedSolomonCode(F.list()[:n], k) sage: D = codes.decoders.GRSErrorErasureDecoder(C) sage: c = C.random_element() sage: n_era = randint(0, C.minimum_distance() - 2) sage: Chan = channels.ErrorErasureChannel(C.ambient_space(), D.decoding_radius(n_era), n_era) sage: y = Chan(c) sage: D.connected_encoder().unencode(c) == D.decode_to_message(y) True TESTS: If one tries to decode a word with too many erasures, it returns an exception:: sage: Chan = channels.ErrorErasureChannel(C.ambient_space(), 0, C.minimum_distance() + 1) sage: y = Chan(c) sage: D.decode_to_message(y) Traceback (most recent call last): ... DecodingError: Too many erasures in the received word If one tries to decode something which is not in the ambient space of the code, an exception is raised:: sage: D.decode_to_message((42, random_vector(GF(2), C.length()))) Traceback (most recent call last): ... ValueError: The word to decode has to be in the ambient space of the code If one tries to pass an erasure_vector which is not a vector over GF(2) of the same length as code's, an exception is raised:: sage: D.decode_to_message((C.random_element(), 42)) Traceback (most recent call last): ... ValueError: The erasure vector has to be a vector over GF(2) of the same length as the code """ C = self.code() word, erasure_vector = word_and_erasure_vector n, k = C.length(), C.dimension() if word not in C.ambient_space(): raise ValueError("The word to decode has to be in the ambient space of the code") if not erasure_vector in VectorSpace(GF(2), n): raise ValueError("The erasure vector has to be a vector over GF(2) of the same length as the code") if erasure_vector.hamming_weight() >= self.code().minimum_distance(): raise DecodingError("Too many erasures in the received word") punctured_word = vector(self.code().base_ring(), [word[i] for i in range(len(word)) if erasure_vector[i]!=1]) C1_length = len(punctured_word) if C1_length == k: return self.connected_encoder().unencode_nocheck(word) C1_evaluation_points = [self.code().evaluation_points()[i] for i in range(n) if erasure_vector[i]!=1] C1_column_multipliers = [self.code().column_multipliers()[i] for i in range(n) if erasure_vector[i]!=1] C1 = GeneralizedReedSolomonCode(C1_evaluation_points, k, C1_column_multipliers) return C1.decode_to_message(punctured_word) def decoding_radius(self, number_erasures): r""" Return maximal number of errors that ``self`` can decode according to how many erasures it receives. INPUT: - ``number_erasures`` -- the number of erasures when we try to decode OUTPUT: - the number of errors as an integer EXAMPLES:: sage: F = GF(59) sage: n, k = 40, 12 sage: C = codes.GeneralizedReedSolomonCode(F.list()[:n], k) sage: D = codes.decoders.GRSErrorErasureDecoder(C) sage: D.decoding_radius(5) 11 If we receive too many erasures, it returns an exception as codeword will be impossible to decode:: sage: D.decoding_radius(30) Traceback (most recent call last): ... ValueError: The number of erasures exceed decoding capability """ diff = self.code().minimum_distance() - 1 - number_erasures if diff <= 0: raise ValueError("The number of erasures exceed decoding capability") else: return diff // 2 class GRSKeyEquationSyndromeDecoder(Decoder): r""" Decoder for (Generalized) Reed-Solomon codes which uses a Key equation decoding based on the syndrome polynomial to correct errors in codewords. This algorithm uses early terminated extended euclidean algorithm to solve the key equations, as described in [Rot2006]_, pp. 183-195. INPUT: - ``code`` -- The associated code of this decoder. EXAMPLES:: sage: F = GF(59) sage: n, k = 40, 12 sage: C = codes.GeneralizedReedSolomonCode(F.list()[1:n+1], k) sage: D = codes.decoders.GRSKeyEquationSyndromeDecoder(C) sage: D Key equation decoder for [40, 12, 29] Reed-Solomon Code over GF(59) Actually, we can construct the decoder from ``C`` directly:: sage: D = C.decoder("KeyEquationSyndrome") sage: D Key equation decoder for [40, 12, 29] Reed-Solomon Code over GF(59) """ def __init__(self, code): r""" TESTS:: sage: F = GF(59) sage: n, k = 40, 12 sage: C = codes.GeneralizedReedSolomonCode(F.list()[:n], k) sage: codes.decoders.GRSKeyEquationSyndromeDecoder(C) Traceback (most recent call last): ... ValueError: Impossible to use this decoder over a GRS code which contains 0 amongst its evaluation points If ``code`` is not a GRS code, an error is raised:: sage: C = codes.random_linear_code(GF(11), 10, 4) sage: codes.decoders.GRSKeyEquationSyndromeDecoder(C) Traceback (most recent call last): ... ValueError: code has to be a generalized Reed-Solomon code """ if not isinstance(code, GeneralizedReedSolomonCode): raise ValueError("code has to be a generalized Reed-Solomon code") if code.base_field().zero() in code.evaluation_points(): raise ValueError("Impossible to use this decoder over a GRS code which contains 0 amongst its evaluation points") super(GRSKeyEquationSyndromeDecoder, self).__init__(code, code.ambient_space(), "EvaluationVector") def __eq__(self, other): r""" Test equality of GRSKeyEquationSyndromeDecoder objects. EXAMPLES:: sage: F = GF(59) sage: n, k = 40, 12 sage: C = codes.GeneralizedReedSolomonCode(F.list()[1:n+1], k) sage: D1 = codes.decoders.GRSKeyEquationSyndromeDecoder(C) sage: D2 = codes.decoders.GRSKeyEquationSyndromeDecoder(C) sage: D1.__eq__(D2) True sage: D1 is D2 False """ return isinstance(other, GRSKeyEquationSyndromeDecoder) \ and self.code() == other.code()\ and self.input_space() == other.input_space() def _repr_(self): r""" Return a string representation of ``self``. EXAMPLES:: sage: F = GF(59) sage: n, k = 40, 12 sage: C = codes.GeneralizedReedSolomonCode(F.list()[1:n+1], k) sage: D = codes.decoders.GRSKeyEquationSyndromeDecoder(C) sage: D Key equation decoder for [40, 12, 29] Reed-Solomon Code over GF(59) """ return "Key equation decoder for %s" % self.code() def _latex_(self): r""" Return a latex representation of ``self``. EXAMPLES:: sage: F = GF(59) sage: n, k = 40, 12 sage: C = codes.GeneralizedReedSolomonCode(F.list()[1:n+1], k) sage: D = codes.decoders.GRSKeyEquationSyndromeDecoder(C) sage: latex(D) \textnormal{Key equation decoder for }[40, 12, 29] \textnormal{ Reed-Solomon Code over } \Bold{F}_{59} """ return "\\textnormal{Key equation decoder for }%s" % self.code()._latex_() def _partial_xgcd(self, a, b, PolRing): r""" Performs an Euclidean algorithm on ``a`` and ``b`` until a remainder has degree less than `\frac{n+k}{2}`, `n` being the dimension of the code, `k` its dimension, and returns `(r, t)` such that in the step just before termination, `r = a\times s + b\times t`. INPUT: - ``a, b`` -- polynomials over ``PolRing`` - ``PolRing`` -- polynomial ring of the output OUTPUT: - a tuple of polynomials EXAMPLES:: sage: F = GF(11) sage: n, k = 10, 5 sage: C = codes.GeneralizedReedSolomonCode(F.list()[1:n+1], k) sage: D = codes.decoders.GRSKeyEquationSyndromeDecoder(C) sage: P = PolynomialRing(F,'x') sage: x = P.parameter() sage: a = 5*x^2 + 9*x + 8 sage: b = 10*x^2 + 3*x + 5 sage: D._partial_xgcd(a, b, P) (5, 8*x + 10) """ prev_t = PolRing.zero() t = PolRing.one() prev_r = a r = b while(r.degree() >= t.degree()): q = prev_r.quo_rem(r)[0] prev_r, r = r, prev_r - q * r prev_t, t = t, prev_t - q * t return (r, t) def _syndrome(self, r): r""" Return the coefficients of the syndrome polynomial of ``r``. INPUT: - ``r`` -- a vector of the ambient space of ``self.code()`` OUTPUT: - a list EXAMPLES:: sage: F = GF(11) sage: n, k = 10, 5 sage: C = codes.GeneralizedReedSolomonCode(F.list()[1:n+1], k) sage: D = codes.decoders.GRSKeyEquationSyndromeDecoder(C) sage: r = vector(F, (8, 2, 6, 10, 6, 10, 7, 6, 7, 2)) sage: D._syndrome(r) [1, 10, 1, 10, 1] """ C = self.code() F = C.base_ring() S = [] col_mults = C.parity_column_multipliers() alphas = C.evaluation_points() for l in range(C.minimum_distance() - 1): Sl = F.zero() for j in range(C.length()): Sl += r[j] * col_mults[j] * (alphas[j] ** l) S.append(Sl) return S def _forney_formula(self, error_evaluator, error_locator): r""" Return the error vector computed through Forney's formula. INPUT: - ``error_evaluator``, ``error_locator`` -- two polynomials OUTPUT: - a vector EXAMPLES:: sage: F = GF(11) sage: n, k = 10, 5 sage: C = codes.GeneralizedReedSolomonCode(F.list()[1:n+1], k) sage: D = codes.decoders.GRSKeyEquationSyndromeDecoder(C) sage: R.<x> = F[] sage: evaluator, locator = R(10), R([10, 10]) sage: D._forney_formula(evaluator, locator) (0, 0, 0, 0, 0, 0, 0, 0, 0, 1) """ C = self.code() alphas = C.evaluation_points() col_mults = C.parity_column_multipliers() ELPp = error_locator.derivative() F = C.base_ring() zero, one = F.zero(), F.one() e = [] for i in range(C.length()): alpha_inv = one/alphas[i] if error_locator(alpha_inv) == zero: e.append(-alphas[i]/col_mults[i] * error_evaluator(alpha_inv)/ELPp(alpha_inv)) else: e.append(zero) return vector(F, e) def decode_to_code(self, r): r""" Correct the errors in ``r`` and returns a codeword. .. NOTE:: If the code associated to ``self`` has the same length as its dimension, ``r`` will be returned as is. INPUT: - ``r`` -- a vector of the ambient space of ``self.code()`` OUTPUT: - a vector of ``self.code()`` EXAMPLES:: sage: F = GF(59) sage: n, k = 40, 12 sage: C = codes.GeneralizedReedSolomonCode(F.list()[1:n+1], k) sage: D = codes.decoders.GRSKeyEquationSyndromeDecoder(C) sage: c = C.random_element() sage: Chan = channels.StaticErrorRateChannel(C.ambient_space(), D.decoding_radius()) sage: y = Chan(c) sage: c == D.decode_to_code(y) True TESTS: If one tries to decode a word with too many errors, it returns an exception:: sage: Chan = channels.StaticErrorRateChannel(C.ambient_space(), D.decoding_radius()+1) sage: y = Chan(c) sage: D.decode_to_message(y) Traceback (most recent call last): ... DecodingError: Decoding failed because the number of errors exceeded the decoding radius If one tries to decode something which is not in the ambient space of the code, an exception is raised:: sage: D.decode_to_code(42) Traceback (most recent call last): ... ValueError: The word to decode has to be in the ambient space of the code """ C = self.code() if r not in C.ambient_space(): raise ValueError("The word to decode has to be in the ambient space of the code") F = C.base_field() PolRing = C.base_field()['x'] x = PolRing.gen() if C.length() == C.dimension() or r in C: return r S = PolRing(self._syndrome(r)) a = x ** (C.minimum_distance() - 1) (EEP, ELP) = self._partial_xgcd(a, S, PolRing) e = self._forney_formula(EEP, ELP) dec = r - e if dec not in C: raise DecodingError("Decoding failed because the number of errors exceeded the decoding radius") return dec def decode_to_message(self, r): r""" Decode ``r`` to an element in message space of ``self`` .. NOTE:: If the code associated to ``self`` has the same length as its dimension, ``r`` will be unencoded as is. In that case, if ``r`` is not a codeword, the output is unspecified. INPUT: - ``r`` -- a codeword of ``self`` OUTPUT: - a vector of ``self`` message space EXAMPLES:: sage: F = GF(59) sage: n, k = 40, 12 sage: C = codes.GeneralizedReedSolomonCode(F.list()[1:n+1], k) sage: D = codes.decoders.GRSKeyEquationSyndromeDecoder(C) sage: c = C.random_element() sage: Chan = channels.StaticErrorRateChannel(C.ambient_space(), D.decoding_radius()) sage: y = Chan(c) sage: D.connected_encoder().unencode(c) == D.decode_to_message(y) True """ C = self.code() if C.length() == C.dimension(): return self.connected_encoder().unencode_nocheck(r) return super(GRSKeyEquationSyndromeDecoder, self).decode_to_message(r) def decoding_radius(self): r""" Return maximal number of errors that ``self`` can decode OUTPUT: - the number of errors as an integer EXAMPLES:: sage: F = GF(59) sage: n, k = 40, 12 sage: C = codes.GeneralizedReedSolomonCode(F.list()[1:n+1], k) sage: D = codes.decoders.GRSKeyEquationSyndromeDecoder(C) sage: D.decoding_radius() 14 """ return (self.code().minimum_distance()-1) // 2 # Make an alias to make everyone happy ReedSolomonCode = GeneralizedReedSolomonCode ####################### registration ############################### GeneralizedReedSolomonCode._registered_encoders["EvaluationVector"] = GRSEvaluationVectorEncoder GeneralizedReedSolomonCode._registered_encoders["EvaluationPolynomial"] = GRSEvaluationPolynomialEncoder GeneralizedReedSolomonCode._registered_decoders["BerlekampWelch"] = GRSBerlekampWelchDecoder GRSBerlekampWelchDecoder._decoder_type = {"hard-decision", "always-succeed"} GeneralizedReedSolomonCode._registered_decoders["Gao"] = GRSGaoDecoder GRSGaoDecoder._decoder_type = {"hard-decision", "always-succeed"} GeneralizedReedSolomonCode._registered_decoders["ErrorErasure"] = GRSErrorErasureDecoder GRSErrorErasureDecoder._decoder_type = {"error-erasure", "always-succeed"} GeneralizedReedSolomonCode._registered_decoders["KeyEquationSyndrome"] = GRSKeyEquationSyndromeDecoder GRSKeyEquationSyndromeDecoder._decoder_type = {"hard-decision", "always-succeed"}
the-stack_106_30798
import pytest import os from utils import compare_errors, first_error_only_line tests_dir = __file__.rpartition('/')[0] + '/semantic/' tests = [(file) for file in os.listdir(tests_dir) if file.endswith('.cl')] @pytest.mark.semantic @pytest.mark.error @pytest.mark.run(order=3) @pytest.mark.parametrize("cool_file", tests) def test_semantic_errors(compiler_path, cool_file): compare_errors(compiler_path, tests_dir + cool_file, tests_dir + cool_file[:-3] + '_error.txt', \ cmp=first_error_only_line)
the-stack_106_30799
#! /usr/bin/env python3 # -*- coding: utf-8 -*- from plumbum.cmd import mv, mkdir, rename from plumbum import local from typing import Tuple from common import ( Colors, Config, get_cmd_or_die, pb, setup_logging, transpile ) import argparse import logging import multiprocessing import os import re import sys desc = 'transpile files in compiler_commands.json.' parser = argparse.ArgumentParser(description="Translates tinycc into the repo/rust/src directory") parser.add_argument('-f', '--filter', default="", help='Filters translated files') config = Config() config.add_args(parser) C2RUST_DIR = config.ROOT_DIR TCC_REPO = os.path.join(C2RUST_DIR, "examples/tinycc/repo") TCC_RS = os.path.join(TCC_REPO, "tcc.rs") COMPILE_COMMANDS = os.path.join(TCC_REPO, "compile_commands.json") RUST_ROOT_DIR = os.path.join(TCC_REPO, "rust") RUST_SRC_DIR = os.path.join(RUST_ROOT_DIR, "src") MAIN_RS = os.path.join(RUST_SRC_DIR, "main.rs") MAIN_MODS = """\ #![feature(label_break_value)] extern crate libc; pub mod i386_asm; pub mod libtcc; pub mod tccasm; pub mod tccelf; pub mod tccgen; pub mod tccpp; pub mod tccrun; pub mod x86_64_gen; pub mod x86_64_link; """ Retcode = int StdErr = str StdOut = str def move(from_, to) -> Tuple[Retcode, StdOut, StdErr]: mv_args = [from_, to] return mv[mv_args].run() def rename_(*args) -> Tuple[Retcode, StdOut, StdErr]: return rename[args].run() def add_mods(path: str): with open(path, "r+") as file: text = file.read() text = re.sub(r"extern crate libc;", MAIN_MODS, text, count=1) file.seek(0) file.write(text) file.truncate() if __name__ == "__main__": setup_logging() args = parser.parse_args() # Add option to use the debug version of `c2rust` config.update_args(args) assert os.path.isfile(COMPILE_COMMANDS), "Could not find {}".format(COMPILE_COMMANDS) print(Colors.OKBLUE + "Transpiling..." + Colors.NO_COLOR) transpile(COMPILE_COMMANDS, emit_build_files=False, reorganize_definitions=False, # TODO extra_transpiler_args=["--reduce-type-annotations"]) # Create the src dir if it doesn't already exist mkdir["-p", RUST_SRC_DIR].run() # Move and rename TCC.rs to main.rs move(TCC_RS, MAIN_RS) plumbum_rs_glob = local.path(TCC_REPO) // "*.rs" # Move source files to src directory retcode, _, _ = move(plumbum_rs_glob, RUST_SRC_DIR) assert retcode != 1, "Could not move translated rs files:\n{}".format(stderr) # main.rs needs to know about modules so we add them here add_mods(MAIN_RS) print(Colors.OKGREEN + "Done!" + Colors.NO_COLOR)
the-stack_106_30800
import logging import os import click from utoolbox.io import open_dataset from utoolbox.io.dataset.base import SessionDataset, TiledDataset from utoolbox.io.dataset import ZarrDataset from utoolbox.util.log import change_logging_level from prompt_toolkit.shortcuts import message_dialog, radiolist_dialog __all__ = ["export"] logger = logging.getLogger("utoolbox.cli.dataset") @click.group() @click.pass_context def export(ctx): """Export info or partial dataset.""" @export.command() @click.argument("ds_path", metavar="DATASET") @click.argument("csv_path", metavar="OUTPUT") @click.option( "-p", "--precision", type=int, default=4, help="maximum number of the decimal place" ) @click.pass_context def coords(ctx, ds_path, csv_path, precision): """ Export filename-coordinate mapping. \f Args: ds_path (str): path to the dataset csv_path (str): where to dump the CSV output precision (int, optional): maximum number of the decimal place """ show_trace = logger.getEffectiveLevel() <= logging.DEBUG ds = open_dataset(ds_path, show_trace=show_trace) if not isinstance(ds, TiledDataset): raise TypeError("only tiled dataset contains coordinate information") if isinstance(ds, SessionDataset): raise ValueError("session-based dataset cannot cherry pick internal arrays") # reload dataset with alterantive class class DumpFilename(type(ds)): @property def read_func(self): def func(uri, shape, dtype): return uri return func logger.debug("reload with DumpFilename") with change_logging_level(logging.ERROR): ds = DumpFilename.load(ds_path) # iterate over uuid and re-interpret the result logger.info("mapping UUID to actual filename") inventory = ds.inventory.reset_index(name="uuid") filenames = [ds[uuid] if uuid else "" for uuid in inventory["uuid"]] inventory["filename"] = filenames # for multi-file stacks, we explode to expand lists to separate rows inventory = inventory.explode("filename") # drop uuid column inventory.drop("uuid", axis="columns", inplace=True) # extract real world coordinate coords = ds.tile_coords.reset_index() # merge tables index_col_names = [name for name in coords.columns if name.startswith("tile_")] inventory = inventory.merge(coords, how="left", on=index_col_names) # rename columns ax = [k.split("_")[1] for k in index_col_names] inventory.rename( columns={k: f"i{v}" for k, v in zip(index_col_names, ax)}, inplace=True ) inventory.rename(columns={f"{k}_coord": k for k in ax}, inplace=True) inventory.to_csv( csv_path, sep=",", index=False, # ignore row number header=True, # we need column headers float_format=f"%.{precision}f", # 4 digit decimals ) @export.command() @click.argument("ds_path", metavar="DATASET") @click.option("-l", "--level", type=int, default=0, help="resolution level to export") @click.pass_context def label(ctx, ds_path, level): """ Export specific label from a ZarrDataset. \f Args: ds_path (str): path to the dataset """ show_trace = logger.getEffectiveLevel() <= logging.DEBUG ds = open_dataset(ds_path, show_trace=show_trace) if not isinstance(ds, ZarrDataset): raise TypeError("input is not a ZarrDataset") labels = ds.labels if len(labels) < 2: if len(labels) == 1: desc = "Only a single label exists in this dataset.\n" else: desc = "Cannot find a valid label (something very wrong?)\n" message_dialog(title="Unable to export label", text=desc).run() return values = [(label,) * 2 for label in labels] label = radiolist_dialog( title="Found multiple labels", text="Which label would you like to export?", values=values, ).run() if label is None: logger.info("cancelled") return # reload dataset logger.info(f'reload dataset with default label "{label}"') ds = open_dataset(ds_path, label=label, show_trace=show_trace) # TODO camera -> color, nested folder, tif
the-stack_106_30801
from xml.sax.saxutils import escape import sgmllib, time, os, sys, new, urlparse, re from planet import config, feedparser import htmltmpl voids=feedparser._BaseHTMLProcessor.elements_no_end_tag empty=re.compile(r"<((%s)[^>]*)></\2>" % '|'.join(voids)) class stripHtml(sgmllib.SGMLParser): "remove all tags from the data" def __init__(self, data): sgmllib.SGMLParser.__init__(self) self.result='' if isinstance(data, str): try: self.feed(data.decode('utf-8')) except: self.feed(data) else: self.feed(data) self.close() def __str__(self): if isinstance(self.result, unicode): return self.result.encode('utf-8') return self.result def handle_entityref(self, ref): import htmlentitydefs if ref in htmlentitydefs.entitydefs: ref=htmlentitydefs.entitydefs[ref] if len(ref)==1: self.result+=unichr(ord(ref)) elif ref.startswith('&#') and ref.endswith(';'): self.handle_charref(ref[2:-1]) else: self.result+='&%s;' % ref else: self.result+='&%s;' % ref def handle_charref(self, ref): try: if ref.startswith('x'): self.result+=unichr(int(ref[1:],16)) else: self.result+=unichr(int(ref)) except: self.result+='&#%s;' % ref def handle_data(self, data): if data: self.result+=data # Data format mappers def String(value): if isinstance(value, unicode): return value.encode('utf-8') return value def Plain(value): return str(stripHtml(value)) def PlanetDate(value): return time.strftime(config.date_format(), value) def NewDate(value): return time.strftime(config.new_date_format(), value) def Rfc822(value): return time.strftime("%a, %d %b %Y %H:%M:%S +0000", value) def Rfc3399(value): return time.strftime("%Y-%m-%dT%H:%M:%S+00:00", value) # Map from FeedParser path to Planet tmpl names Base = [ ['author', String, 'author'], ['author_name', String, 'author_detail', 'name'], ['generator', String, 'generator'], ['id', String, 'id'], ['icon', String, 'icon'], ['last_updated_822', Rfc822, 'updated_parsed'], ['last_updated_iso', Rfc3399, 'updated_parsed'], ['last_updated', PlanetDate, 'updated_parsed'], ['link', String, 'link'], ['logo', String, 'logo'], ['rights', String, 'rights_detail', 'value'], ['subtitle', String, 'subtitle_detail', 'value'], ['title', String, 'title_detail', 'value'], ['title_plain', Plain, 'title_detail', 'value'], ['url', String, 'links', {'rel':'self'}, 'href'], ['url', String, 'planet_http_location'], ] Items = [ ['author', String, 'author'], ['author_email', String, 'author_detail', 'email'], ['author_name', String, 'author_detail', 'name'], ['author_uri', String, 'author_detail', 'href'], ['content_language', String, 'content', 0, 'language'], ['content', String, 'summary_detail', 'value'], ['content', String, 'content', 0, 'value'], ['date', PlanetDate, 'published_parsed'], ['date', PlanetDate, 'updated_parsed'], ['date_822', Rfc822, 'published_parsed'], ['date_822', Rfc822, 'updated_parsed'], ['date_iso', Rfc3399, 'published_parsed'], ['date_iso', Rfc3399, 'updated_parsed'], ['enclosure_href', String, 'links', {'rel': 'enclosure'}, 'href'], ['enclosure_length', String, 'links', {'rel': 'enclosure'}, 'length'], ['enclosure_type', String, 'links', {'rel': 'enclosure'}, 'type'], ['id', String, 'id'], ['link', String, 'links', {'rel': 'alternate'}, 'href'], ['new_channel', String, 'source', 'id'], ['new_date', NewDate, 'published_parsed'], ['new_date', NewDate, 'updated_parsed'], ['rights', String, 'rights_detail', 'value'], ['title_language', String, 'title_detail', 'language'], ['title_plain', Plain, 'title_detail', 'value'], ['title', String, 'title_detail', 'value'], ['summary_language', String, 'summary_detail', 'language'], ['updated', PlanetDate, 'updated_parsed'], ['updated_822', Rfc822, 'updated_parsed'], ['updated_iso', Rfc3399, 'updated_parsed'], ['published', PlanetDate, 'published_parsed'], ['published_822', Rfc822, 'published_parsed'], ['published_iso', Rfc3399, 'published_parsed'], ] # Add additional rules for source information for rule in Base: Items.append(['channel_'+rule[0], rule[1], 'source'] + rule[2:]) def tmpl_mapper(source, rules): "Apply specified rules to the source, and return a template dictionary" output = {} for rule in rules: node = source for path in rule[2:]: if isinstance(path, str) and path in node: if path == 'value': if node.get('type','')=='text/plain': node['value'] = escape(node['value']) node['type'] = 'text/html' elif node.get('type','')=='application/xhtml+xml': node['value'] = empty.sub(r"<\1 />", node['value']) node = node[path] elif isinstance(path, int): node = node[path] elif isinstance(path, dict): for test in node: for key, value in path.items(): if test.get(key,None) != value: break else: node = test break else: break else: break else: if node: output[rule[0]] = rule[1](node) # copy over all planet namespaced elements from parent source for name,value in source.items(): if name.startswith('planet_'): output[name[7:]] = String(value) if not output.get('name') and source.has_key('title_detail'): output['name'] = Plain(source.title_detail.value) # copy over all planet namespaced elements from child source element if 'source' in source: for name,value in source.source.items(): if name.startswith('planet_'): output['channel_' + name[7:]] = String(value) if not output.get('channel_name') and \ source.source.has_key('title_detail'): output['channel_name'] = Plain(source.source.title_detail.value) return output def _end_planet_source(self): self._end_source() context = self._getContext() if not context.has_key('sources'): context['sources'] = [] context.sources.append(context.source) del context['source'] def template_info(source): """ get template information from a feedparser output """ # wire in support for planet:source, call feedparser, unplug planet:source mixin=feedparser._FeedParserMixin mixin._start_planet_source = mixin._start_source mixin._end_planet_source = \ new.instancemethod(_end_planet_source, None, mixin) data=feedparser.parse(source) del mixin._start_planet_source del mixin._end_planet_source # apply rules to convert feed parser output to htmltmpl input output = {'Channels': [], 'Items': []} output.update(tmpl_mapper(data.feed, Base)) sources = [] for feed in data.feed.get('sources',[]): source = tmpl_mapper(feed, Base) sources.append([source.get('name'), source]) sources.sort() output['Channels'] = [source for name,source in sources] for entry in data.entries: output['Items'].append(tmpl_mapper(entry, Items)) # synthesize isPermaLink attribute for item in output['Items']: if item.get('id') == item.get('link'): item['guid_isPermaLink']='true' else: item['guid_isPermaLink']='false' # feed level information output['generator'] = config.generator_uri() output['name'] = config.name() output['link'] = config.link() output['owner_name'] = config.owner_name() output['owner_email'] = config.owner_email() output['pubsubhubbub_hub'] = config.pubsubhubbub_hub() if config.feed(): output['feed'] = config.feed() output['feedtype'] = config.feed().find('rss')>=0 and 'rss' or 'atom' # date/time information date = time.gmtime() output['date'] = PlanetDate(date) output['date_iso'] = Rfc3399(date) output['date_822'] = Rfc822(date) # remove new_dates and new_channels that aren't "new" date = channel = None for item in output['Items']: if item.has_key('new_date'): if item['new_date'] == date: del item['new_date'] else: date = item['new_date'] if item.has_key('new_channel'): if item['new_channel'] == channel and not item.has_key('new_date'): del item['new_channel'] else: channel = item['new_channel'] return output def run(script, doc, output_file=None, options={}): """ process an HTMLTMPL file """ manager = htmltmpl.TemplateManager() template = manager.prepare(script) tp = htmltmpl.TemplateProcessor(html_escape=0) for key,value in template_info(doc).items(): tp.set(key, value) if output_file: basename = os.path.basename(output_file) reluri = os.path.splitext(os.path.basename(output_file))[0] tp.set('url', urlparse.urljoin(config.link(),reluri)) tp.set('fullurl', urlparse.urljoin(config.link(),basename)) output = open(output_file, "w") output.write(tp.process(template)) output.close() else: return tp.process(template) if __name__ == '__main__': sys.path.insert(0, os.path.split(sys.path[0])[0]) for test in sys.argv[1:]: from pprint import pprint pprint(template_info('/home/rubys/bzr/venus/tests/data/filter/tmpl/'+test))
the-stack_106_30802
# Copyright (c) 2017-present, Facebook, Inc. # All rights reserved. # # This source code is licensed under the license found in the LICENSE file in # the root directory of this source tree. An additional grant of patent rights # can be found in the PATENTS file in the same directory. from . import FairseqLRScheduler, register_lr_scheduler @register_lr_scheduler('linear') class LinearSchedule(FairseqLRScheduler): def __init__(self, args, optimizer): super().__init__(args, optimizer) if len(args.lr) > 1: raise ValueError( 'Cannot use a fixed learning rate schedule with inverse_sqrt.' ' Consider --lr-scheduler=fixed instead.' ) max_lr = args.lr[0] if args.warmup_init_lr < 0: args.warmup_init_lr = max_lr self.max_update = args.max_update self.lr_step_warmup = (max_lr - args.warmup_init_lr) / args.warmup_updates self.lr_step_decay = - (max_lr - args.end_lr) / (args.max_update - args.warmup_updates) self.end_lr = args.end_lr # initial learning rate self.lr = args.warmup_init_lr self.optimizer.set_lr(self.lr) @staticmethod def add_args(parser): """Add arguments to the parser for this LR scheduler.""" parser.add_argument('--warmup-updates', default=4000, type=int, metavar='N', help='warmup the learning rate linearly for the first N updates') parser.add_argument('--warmup-init-lr', default=-1, type=float, metavar='LR', help='initial learning rate during warmup phase; default is args.lr') parser.add_argument('--end-lr', default=0.0, type=float, metavar='LR', help='final learning rate; default is 0') def step(self, epoch, val_loss=None): """Update the learning rate at the end of the given epoch.""" super().step(epoch, val_loss) # we don't change the learning rate at epoch boundaries return self.optimizer.get_lr() def step_update(self, num_updates): """Update the learning rate after each update.""" if num_updates < self.args.warmup_updates: self.lr = self.args.warmup_init_lr + num_updates * self.lr_step_warmup else: self.lr = (num_updates - self.max_update) * self.lr_step_decay + self.end_lr self.optimizer.set_lr(self.lr) return self.lr
the-stack_106_30805
# Copyright (c) 2014, Clemson University # All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are met: # # * Redistributions of source code must retain the above copyright notice, this # list of conditions and the following disclaimer. # # * Redistributions in binary form must reproduce the above copyright notice, # this list of conditions and the following disclaimer in the documentation # and/or other materials provided with the distribution. # # * Neither the name of the {organization} nor the names of its # contributors may be used to endorse or promote products derived from # this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" # AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE # IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE # DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE # FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL # DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR # SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER # CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, # OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. from django.contrib import admin from django_sshkey.models import UserKey from salmonella.admin import SalmonellaMixin class UserKeyAdmin(SalmonellaMixin, admin.ModelAdmin): list_display = [ '__unicode__', 'user', 'name', 'fingerprint', 'created', 'last_modified', 'last_used', ] search_fields = [ 'user__username', ] readonly_fields = [ 'fingerprint', 'created', 'last_modified', 'last_used', ] salmonella_fields = ('user',) admin.site.register(UserKey, UserKeyAdmin)
the-stack_106_30808
from honeygrove import log from honeygrove.config import Config from honeygrove.core.Credential import Credential from honeygrove.core.FilesystemParser import FilesystemParser from honeygrove.core.HoneytokenDatabase import HoneytokenDatabase from honeygrove.services.ServiceBaseModel import Limiter, ServiceBaseModel from cryptography.hazmat.backends import default_backend from cryptography.hazmat.primitives import serialization from twisted.conch import avatar, error, insults, interfaces, recvline from twisted.conch.ssh import factory, keys, session, userauth, common, transport from twisted.cred.portal import Portal from twisted.internet import defer from twisted.python import components from datetime import datetime, timedelta import json import os from os.path import expanduser, exists, dirname from random import randint import re import subprocess import time from urllib import request transport.SSHTransportBase.ourVersionString = Config.ssh.banner lastLoginTime = dict() def load_database(): global lastLoginTime try: with open(str(Config.ssh.database_path), 'r') as fp: lastLoginTime = json.loads(fp.read()) except FileNotFoundError: pass except Exception: # e.g. damaged json encoding log.err("Failed to load lastLoginTime from existing file \""+str(Config.ssh.database_path)+"\"") def save_database(): try: with open(str(Config.ssh.database_path), 'w') as fp: fp.write(json.dumps(lastLoginTime)) except Exception: # e.g. insufficient write permissions, io error etc. log.err("Failed to write lastLoginTime to file \""+str(Config.ssh.database_path)+"\"") class SSHService(ServiceBaseModel): honeytokendb = HoneytokenDatabase(servicename=Config.ssh.name) def __init__(self): super(SSHService, self).__init__() self._name = Config.ssh.name self._port = Config.ssh.port # Create a custom portal with the honeytoken database as credential backend p = Portal(SSHRealm()) p.registerChecker(self.honeytokendb) self._fService = factory.SSHFactory() self._fService.services[b'ssh-userauth'] = groveUserAuth self._limiter = Limiter(self._fService, Config.ssh.name, Config.ssh.connections_per_host) self._fService.portal = p # self.protocol = SSHProtocol # self._fService.protocol = self.protocol home = expanduser('~') # XXX: These paths should be configurable privateKeyPath = home + '/.ssh/id_honeygrove' publicKeyPath = home + '/.ssh/id_honeygrove.pub' # Generate RSA keys if they don't exist if not (exists(privateKeyPath) and exists(publicKeyPath)): key = keys.rsa.generate_private_key(public_exponent=65537, key_size=4096, backend=default_backend()) private_key = key.private_bytes(serialization.Encoding.PEM, serialization.PrivateFormat.TraditionalOpenSSL, serialization.NoEncryption()) public_key = key.public_key().public_bytes(serialization.Encoding.OpenSSH, serialization.PublicFormat.OpenSSH) # make .ssh directory, if it doesn't exist os.makedirs(dirname(publicKeyPath), exist_ok=True) with open(privateKeyPath, 'w') as f: f.write(private_key.decode()) with open(publicKeyPath, 'w') as f: f.write(public_key.decode()) self._fService.privateKeys = {b'ssh-rsa': keys.Key.fromFile(privateKeyPath)} self._fService.publicKeys = {b'ssh-rsa': keys.Key.fromFile(publicKeyPath)} class SSHProtocol(recvline.HistoricRecvLine): def connectionMade(self): """ Initializes the session """ super(SSHProtocol, self).connectionMade() # Service related self.service_name = Config.ssh.name self.local_ip = Config.general.address self.local_port = Config.ssh.port self.log = log # Connection related self.user = self.terminal.transport.session.avatar self.remote = self.user.conn.transport.transport.client self._parser = FilesystemParser(Config.folder.filesystem) self.current_dir = expanduser("~") load = self.loadLoginTime(self.user.username) if not load: # Random, plausible last login time tdelta = timedelta(days=randint(1, 365), seconds=randint(0, 60), minutes=randint(0, 60), hours=randint(0, 24)) now = datetime.now() login = now - tdelta loginStr = str(login.ctime()) else: loginStr = load self.saveLoginTime(self.user.username) self.terminal.write("Last login: " + loginStr) self.terminal.nextLine() self.showPrompt() def saveLoginTime(self, username): global lastLoginTime # limits number of saved "user profiles" to keep an attacker from filling the memory if len(lastLoginTime) <= 10000: if Config.general.use_utc: lastLoginTime[username] = str(datetime.utcnow().ctime()) else: lastLoginTime[username] = str(datetime.now().ctime()) def loadLoginTime(self, username): if username in lastLoginTime: return lastLoginTime[username] else: return False def print(self, lines, log=None): """ Prints a response to the client's terminal :param lines: a line or list of lines to be printed :param log: string that will appear in the log file """ if not isinstance(lines, list): # if only a single line should be printed lines = [lines] for line in lines: self.terminal.write(line) self.terminal.nextLine() if not log: log = lines self.log.response(self.service_name, self.remote[0], self.remote[1], self.local_ip, self.local_port, log, self.user.username) def showPrompt(self): """ Show prompt at start of line. """ self.terminal.write(self.user.username + "@" + Config.general.hostname + ":" + self._parser.get_formatted_path() + "$ ") def getCommandFunc(self, cmd): """ Get the corresponding function to a command. :param cmd: the command to search for :return: the corresponding "ssh_" function """ return getattr(self, 'ssh_' + cmd, None) def get_help(self, cmd): """ Get the helptext for a command :param cmd: :return: the corresponding text """ helptext = "" append = False with open(str(Config.ssh.helptext_folder)) as helps: for line in helps: if append and re.match("^\S", line): return helptext if re.match("^" + cmd, line): append = True if append: helptext = helptext + line return helptext def handle_arguments(self, args): """ Split arguments in path and list of arguments :param args: arguments :return: path, arguments """ path = "" arguments = [] for arg in args: if not arg.startswith("-"): path = arg else: for char in arg: if char != "-": arguments.append(char) return path, arguments def lineReceived(self, line): """ What to do, when we receive input. Also handles real command execution. :param line: the line received """ line = line.strip() if line: line = line.decode() # log call, we received a request self.log.request(self.service_name, self.remote[0], self.remote[1], self.local_ip, self.local_port, line, self.user.username) res = None if Config.ssh.real_shell: # Forwarding commands to the real shell if "cd" in line: try: self.current_dir = subprocess.check_output(line + " && pwd", stderr=subprocess.STDOUT, shell=True, cwd=self.current_dir).decode().strip() except subprocess.CalledProcessError as e: res = e.output res = res.decode() res = res.split("\n")[:-1] if "exit" in line: self.ssh_exit() else: try: res = subprocess.check_output(line, stderr=subprocess.STDOUT, shell=True, cwd=self.current_dir) except subprocess.CalledProcessError as e: res = e.output res = res.decode() res = res.split("\n")[:-1] else: # faking an ssh session cmdAndArgs = line.split() cmd = cmdAndArgs[0] args = cmdAndArgs[1:] func = self.getCommandFunc(cmd) if func: try: res = func(*args) except Exception as e: self.log.err(str(e)) else: res = cmd + ": command not found" if res: if not isinstance(res, tuple): # If only response and no log text res = (res, "") self.print(*res) self.showPrompt() def ssh_help(self, cmd=''): """ Prints the GNU bash help for cmd or the universal help text if cmd is not given :param cmd: the command """ if cmd: func = self.getCommandFunc(cmd) if func: text = self.get_help(cmd) if not func or not text: text = "help: no help topics match `{}'. " \ "Try `help help' or `man -k {}' or `info {}'.".format(cmd, cmd, cmd) return text gnuhelp = [] with open(str(Config.ssh.gnuhelp_folder)) as file: for line in file: gnuhelp.append(line) return gnuhelp, "Help text" def ssh_pwd(self): """ Prints the path to the current directory in fake filesystem """ return self._parser.get_current_path() def ssh_echo(self, *args): """ Prints whatever is in args """ return " ".join(args) def ssh_whoami(self): """ prints the username """ return self.user.username def ssh_exit(self): """ close the connection """ self.terminal.nextLine() self.terminal.loseConnection() def ssh_clear(self): """clear the terminal""" self.terminal.reset() def ssh_cd(self, *args): """ Change directory in fake filesystem :param args: arguments and path """ res = None if args: res = self._parser.cd(args[-1]) return res def ssh_ls(self, *args): """ Lists the content of the given directory in faked filesystem or of the current one if no path is given in args :param args: arguments and path """ path, arguments = self.handle_arguments(args) try: lines = self._parser.ls(path).split("\n")[:-1] # Split puts an empty string after the last "/n" except Exception: return "ls: " + path + ": No such file or directory." for line in lines: if line and line[0] == "." and "a" not in arguments: lines.remove(line) lines = lines return lines, "ls Text" def ssh_mkdir(self, *args): """ Creates a directory in the fake filesystem :param args: path to be created :return: """ return self._parser.mkdir(args[-1]) def ssh_touch(self, *args): """ Creates a file in the fake filesystem :param args: path to the new file """ return self._parser.touch(args[-1]) def ssh_rm(self, *args): """ removes whatever is at the specified path :param args: arguments and path :return: """ path, arguments = self.handle_arguments(args) if "r" in arguments and "f" in arguments and path == "/": time.sleep(4) self.ssh_exit() # r e a l i s m return if self._parser.valid_directory(path) and "r" not in arguments: return "rm: " + args[-1] + ": is a directory" return self._parser.delete(path) def ssh_mv(self, *args): """ Moves an element in the fake filesystem :param args: arguments, original path, new path """ res = self._parser.move(args[-2], args[-1]) if res: return "mv: " + res def ssh_cat(self, *args): """ Display the content of a file :param args: filepath """ try: response = self._parser.cat(args[-1]) except Exception as e: if str(e) == "File not found": response = "cat: " + args[-1] + ": File or directory not found" if str(e) == "Is a directory": response = "cat: " + args[-1] + ": Is a directory" return response def ssh_wget(self, *args): """ Downloads a file from the internet :param args: url """ # Handle URL url = args[-1] filename = url.split('/')[-1].split('#')[0].split('?')[0] if not re.match(r"^https?://", url): url = "http://" + url if not re.match(r"^https?://.*\..*/", url): # wenn die URL nichts hinter dem "/" nach der TLD hat filename = "index.html" # Determine filename i = 1 while filename in os.listdir(Config.folder.quarantine): filename = filename + "." + str(i) i += 1 # Write to disk filepath = "" if Config.ssh.accept_files: request.urlretrieve(url, Config.folder.quarantine / filename) filepath = Config.folder.quarantine / filename self.log.file(self.name, self.userIP, filename, filepath, self.user.username) def ssh_ll(self, *args): """ Alias for ls -l :param args: arguments """ return self.ssh_ls(*args + ("-l",)) class SSHSession(session.SSHSession): local_ip = Config.general.address local_port = Config.ssh.port def openShell(self, transport): """ wire the protocol to the transport channel :param transport: """ serverProtocol = insults.insults.ServerProtocol( SSHProtocol) # neues ServerProtocol mit SSHProtocol als Terminal serverProtocol.makeConnection(transport) transport.makeConnection(session.wrapProtocol(serverProtocol)) remote = transport.session.avatar.conn.transport.transport.client log.request("SSH", remote[0], remote[1], self.local_ip, self.local_port, "<open shell>", transport.session.avatar.username) def getPty(self, terminal, windowSize, attrs): """ Ignore Pty requests :param terminal: :param windowSize: :param attrs: :return: """ pass def execCommand(self, pp, cmd): """ Gets called when the client requests command execution (eg. with a pipe) We don't support command execution but we still want to log the command (e.g. for forensics) :param pp: the transport protocol :param cmd: the command the client wants executed """ remote = pp.session.conn.transport.transport.client log.request("SSH", remote[0], remote[1], self.local_ip, self.local_port, "<exec '{}'>".format(cmd.decode()), pp.session.avatar.username) pp.session.conn.transport.sendDisconnect(7, b"Command Execution is not supported.") def windowChanged(self, *args): """ This method would be used to determine the window size of the client terminal. """ pass class SSHRealm(SSHSession): def requestAvatar(avatarId, mind, *interfaces): """ Return the Avatar Object :param avatarId: specifies the service (e.g. session, userauth) :param mind: username :param interfaces: :return: """ return interfaces[0], SSHAvatar(username=mind, service=avatarId), lambda: None class SSHAvatar(avatar.ConchUser): def __init__(self, username, service): super(SSHAvatar, self).__init__() self.username = username self.channelLookup.update({b'session': session.SSHSession}) def lookupChannel(self, channelType, windowSize, maxPacket, data): klass = self.channelLookup.get(channelType, None) if not klass: log.err("Channel {} requested but not found!".format(channelType.decode())) else: return klass(remoteWindow=windowSize, remoteMaxPacket=maxPacket, data=data, avatar=self) class groveUserAuth(userauth.SSHUserAuthServer): def _decode(self, value, title): try: return value.decode() except UnicodeError: # value is invalid utf-8 log.info('{} was invalid UTF-8: "{}"'.format(title, value)) return value.decode('replace') def auth_password(self, ip, username, password): c = Credential(ip, username, password) return self.portal.login(c, None, interfaces.IConchUser).addErrback(self._ebPassword) def tryAuth(self, auth_type, ip, username, secret): auth_type = self._decode(auth_type, "Auth type") auth_type = auth_type.replace('-', '_') f = getattr(self, 'auth_%s' % (auth_type,), None) if f: ret = f(ip, username, secret) if not ret: return defer.fail( error.ConchError('%s return None instead of a Deferred' % (auth_type, ))) else: return ret return defer.fail(error.ConchError('bad auth type: %s' % (auth_type,))) def ssh_USERAUTH_REQUEST(self, packet): """ Base taken from twisted and modified to track login attempts """ # Parse login packet user, next_service, auth_type, secret = common.getNS(packet, 3) # Decode username and secret user = self._decode(user, "User") secret = self._decode(secret[5:], "Secret") # Store remote information for later remote_ip, remote_port = self.transport.transport.client # Check we are in the correct session? FIXME: figure out why eaxctly we need this if user != self.user or next_service != self.nextService: self.authenticatedWith = [] # clear auth state # Store some state for twisted internals self.user = user self.nextService = next_service self.method = auth_type # Start point for deferred d = self.tryAuth(auth_type, remote_ip, user, secret) # Currently we only care about password authentication (and only log the rest) if auth_type != b'password': if auth_type == b'publickey': # Extract key from `secret` # TODO: decode key and log it algorithm, secret, blobrest = common.getNS(secret[1:], 2) d.addCallback(self._cbFinishedAuth) d.addErrback(log.defer_login, Config.ssh.name, Config.ssh.port, remote_ip, remote_port, auth_type, False, user, secret) d.addErrback(self._ebMaybeBadAuth) d.addErrback(self._ebBadAuth) return d # Do we know a honeytoken for this credential pair? honeytoken = SSHService.honeytokendb.try_get_token(user, secret) # Callbacks and Errbacks # # If the login suceeds (via HoneytokenDatabase) then we expect to find a honeytoken above # and we pass it to the Callback. If the login does not succeed, than we should not find a # honeytoken and thus we do not pass `None` to the Errback. d.addCallback(self._cbFinishedAuth) d.addCallback(log.defer_login, Config.ssh.name, Config.ssh.port, remote_ip, remote_port, auth_type, True, user, secret, honeytoken) d.addErrback(log.defer_login, Config.ssh.name, Config.ssh.port, remote_ip, remote_port, auth_type, False, user, secret) d.addErrback(self._ebMaybeBadAuth) d.addErrback(self._ebBadAuth) return d # SSHAvatar created by SSHSession implement ISession components.registerAdapter(SSHSession, SSHAvatar, session.ISession) if __name__ == '__main__': service = SSHService() service.startService()
the-stack_106_30809
import numpy as np import pytest import pandas as pd from pandas import ( Categorical, MultiIndex, Series, ) import pandas._testing as tm class TestSeriesCount: def test_count_level_series(self): index = MultiIndex( levels=[["foo", "bar", "baz"], ["one", "two", "three", "four"]], codes=[[0, 0, 0, 2, 2], [2, 0, 1, 1, 2]], ) ser = Series(np.random.randn(len(index)), index=index) with tm.assert_produces_warning(FutureWarning): result = ser.count(level=0) expected = ser.groupby(level=0).count() tm.assert_series_equal( result.astype("f8"), expected.reindex(result.index).fillna(0) ) with tm.assert_produces_warning(FutureWarning): result = ser.count(level=1) expected = ser.groupby(level=1).count() tm.assert_series_equal( result.astype("f8"), expected.reindex(result.index).fillna(0) ) def test_count_multiindex(self, series_with_multilevel_index): ser = series_with_multilevel_index series = ser.copy() series.index.names = ["a", "b"] with tm.assert_produces_warning(FutureWarning): result = series.count(level="b") with tm.assert_produces_warning(FutureWarning): expect = ser.count(level=1).rename_axis("b") tm.assert_series_equal(result, expect) with tm.assert_produces_warning(FutureWarning): result = series.count(level="a") with tm.assert_produces_warning(FutureWarning): expect = ser.count(level=0).rename_axis("a") tm.assert_series_equal(result, expect) msg = "Level x not found" with pytest.raises(KeyError, match=msg): with tm.assert_produces_warning(FutureWarning): series.count("x") def test_count_level_without_multiindex(self): ser = Series(range(3)) msg = "Series.count level is only valid with a MultiIndex" with pytest.raises(ValueError, match=msg): with tm.assert_produces_warning(FutureWarning): ser.count(level=1) def test_count(self, datetime_series): assert datetime_series.count() == len(datetime_series) datetime_series[::2] = np.NaN assert datetime_series.count() == np.isfinite(datetime_series).sum() mi = MultiIndex.from_arrays([list("aabbcc"), [1, 2, 2, np.nan, 1, 2]]) ts = Series(np.arange(len(mi)), index=mi) with tm.assert_produces_warning(FutureWarning): left = ts.count(level=1) right = Series([2, 3, 1], index=[1, 2, np.nan]) tm.assert_series_equal(left, right) ts.iloc[[0, 3, 5]] = np.nan with tm.assert_produces_warning(FutureWarning): tm.assert_series_equal(ts.count(level=1), right - 1) # GH#29478 with pd.option_context("use_inf_as_na", True): assert Series([pd.Timestamp("1990/1/1")]).count() == 1 def test_count_categorical(self): ser = Series( Categorical( [np.nan, 1, 2, np.nan], categories=[5, 4, 3, 2, 1], ordered=True ) ) result = ser.count() assert result == 2
the-stack_106_30813
# encoding: utf-8 from __future__ import absolute_import, division, print_function, unicode_literals from django.utils.html import strip_tags class Highlighter(object): css_class = 'highlighted' html_tag = 'span' max_length = 200 text_block = '' def __init__(self, query, **kwargs): self.query = query if 'max_length' in kwargs: self.max_length = int(kwargs['max_length']) if 'html_tag' in kwargs: self.html_tag = kwargs['html_tag'] if 'css_class' in kwargs: self.css_class = kwargs['css_class'] self.query_words = set([word.lower() for word in self.query.split() if not word.startswith('-')]) def highlight(self, text_block): self.text_block = strip_tags(text_block) highlight_locations = self.find_highlightable_words() start_offset, end_offset = self.find_window(highlight_locations) return self.render_html(highlight_locations, start_offset, end_offset) def find_highlightable_words(self): # Use a set so we only do this once per unique word. word_positions = {} # Pre-compute the length. end_offset = len(self.text_block) lower_text_block = self.text_block.lower() for word in self.query_words: if not word in word_positions: word_positions[word] = [] start_offset = 0 while start_offset < end_offset: next_offset = lower_text_block.find(word, start_offset, end_offset) # If we get a -1 out of find, it wasn't found. Bomb out and # start the next word. if next_offset == -1: break word_positions[word].append(next_offset) start_offset = next_offset + len(word) return word_positions def find_window(self, highlight_locations): best_start = 0 best_end = self.max_length # First, make sure we have words. if not len(highlight_locations): return (best_start, best_end) words_found = [] # Next, make sure we found any words at all. for word, offset_list in highlight_locations.items(): if len(offset_list): # Add all of the locations to the list. words_found.extend(offset_list) if not len(words_found): return (best_start, best_end) if len(words_found) == 1: return (words_found[0], words_found[0] + self.max_length) # Sort the list so it's in ascending order. words_found = sorted(words_found) # We now have a denormalized list of all positions were a word was # found. We'll iterate through and find the densest window we can by # counting the number of found offsets (-1 to fit in the window). highest_density = 0 if words_found[:-1][0] > self.max_length: best_start = words_found[:-1][0] best_end = best_start + self.max_length for count, start in enumerate(words_found[:-1]): current_density = 1 for end in words_found[count + 1:]: if end - start < self.max_length: current_density += 1 else: current_density = 0 # Only replace if we have a bigger (not equal density) so we # give deference to windows earlier in the document. if current_density > highest_density: best_start = start best_end = start + self.max_length highest_density = current_density return (best_start, best_end) def render_html(self, highlight_locations=None, start_offset=None, end_offset=None): # Start by chopping the block down to the proper window. text = self.text_block[start_offset:end_offset] # Invert highlight_locations to a location -> term list term_list = [] for term, locations in highlight_locations.items(): term_list += [(loc - start_offset, term) for loc in locations] loc_to_term = sorted(term_list) # Prepare the highlight template if self.css_class: hl_start = '<%s class="%s">' % (self.html_tag, self.css_class) else: hl_start = '<%s>' % (self.html_tag) hl_end = '</%s>' % self.html_tag # Copy the part from the start of the string to the first match, # and there replace the match with a highlighted version. highlighted_chunk = "" matched_so_far = 0 prev = 0 prev_str = "" for cur, cur_str in loc_to_term: # This can be in a different case than cur_str actual_term = text[cur:cur + len(cur_str)] # Handle incorrect highlight_locations by first checking for the term if actual_term.lower() == cur_str: if cur < prev + len(prev_str): continue highlighted_chunk += text[prev + len(prev_str):cur] + hl_start + actual_term + hl_end prev = cur prev_str = cur_str # Keep track of how far we've copied so far, for the last step matched_so_far = cur + len(actual_term) # Don't forget the chunk after the last term highlighted_chunk += text[matched_so_far:] if start_offset > 0: highlighted_chunk = '...%s' % highlighted_chunk if end_offset < len(self.text_block): highlighted_chunk = '%s...' % highlighted_chunk return highlighted_chunk
the-stack_106_30814
import threading import os import asyncio import logging import re from functools import partial from datetime import datetime from loguru import logger from prompt_toolkit.eventloop.utils import get_event_loop from prompt_toolkit.application import Application from prompt_toolkit.buffer import Buffer from prompt_toolkit.key_binding import KeyBindings from prompt_toolkit.layout.containers import HSplit, VSplit, Window, WindowAlign from prompt_toolkit.layout.controls import BufferControl, FormattedTextControl from prompt_toolkit.layout.layout import Layout from prompt_toolkit.layout.margins import NumberedMargin, ScrollbarMargin from prompt_toolkit.widgets import SearchToolbar, TextArea, Frame, HorizontalLine from prompt_toolkit.styles import Style from prompt_toolkit.key_binding.bindings.focus import focus_next, focus_previous from prompt_toolkit.document import Document from prompt_toolkit.history import FileHistory from prompt_toolkit.clipboard.pyperclip import PyperclipClipboard from prompt_toolkit.application.current import get_app from prompt_toolkit.lexers import Lexer from prompt_toolkit.styles.named_colors import NAMED_COLORS from .nrfjprog import NRFJProg, NRFJProgRTTNoChannels def getTime(): return datetime.now().strftime('%Y.%m.%d %H:%M:%S.%f')[:23] log_level_color_lut = { 'X': NAMED_COLORS['Blue'], 'D': NAMED_COLORS['Magenta'], 'I': NAMED_COLORS['Green'], 'W': NAMED_COLORS['Yellow'], 'E': NAMED_COLORS['Red'], 'dbg': NAMED_COLORS['Magenta'], 'inf': NAMED_COLORS['Green'], 'wrn': NAMED_COLORS['Yellow'], 'err': NAMED_COLORS['Red'], } class LogLexer(Lexer): def __init__(self, patern, colors=log_level_color_lut) -> None: super().__init__() self.patern = patern self.colors = colors def lex_document(self, document): def get_line(lineno): line = document.lines[lineno] g = re.match(self.patern, line) if g: color = self.colors.get(g.group(2), '#ffffff') return [(color, g.group(1)), ('#ffffff', g.group(3))] return [('#ffffff', line)] return get_line class Console: def __init__(self, prog: NRFJProg, history_file, console_file, latency=50): channels = prog.rtt_start() is_old = False if 'Terminal' not in channels: raise Exception('Not found RTT Terminal channel') if len(channels) > 1: if 'Logger' not in channels: raise Exception('Not found RTT Logger channel') elif len(channels) == 1: is_old = True shell_search = SearchToolbar(ignore_case=True, vi_mode=True) shell_window = TextArea( scrollbar=True, line_numbers=True, focusable=True, focus_on_click=True, read_only=True, search_field=shell_search ) self.shell_buffer = shell_window.buffer logger_search = SearchToolbar(ignore_case=True, vi_mode=True) logger_window = TextArea( scrollbar=True, line_numbers=True, focusable=True, focus_on_click=True, read_only=True, search_field=logger_search, lexer=LogLexer(r'^(#.*?\d(?:\.\d+)? <(\w)\>)(.*)' if is_old else r'^(\[.*?\].*?<(\w+)\>)(.*)') ) self.logger_buffer = logger_window.buffer logger.debug(f'history_file: {history_file}') os.makedirs(os.path.dirname(history_file), exist_ok=True) input_history = FileHistory(history_file) search_field = SearchToolbar(ignore_case=True) self.input_field = TextArea( height=1, prompt=">>> ", style="class:input-field", multiline=False, wrap_lines=False, search_field=search_field, history=input_history, focusable=True, focus_on_click=True) def get_titlebar_text(): return [ ("class:title", " HARDWARIO CHESTER Console "), ("class:title", " (Press [Ctrl-Q] to quit.)"), ] root_container = HSplit( [ # The titlebar. Window( height=1, content=FormattedTextControl(get_titlebar_text), align=WindowAlign.CENTER, ), VSplit( [ Frame(HSplit( [ shell_window, shell_search, HorizontalLine(), self.input_field, search_field ] ), title="Shell"), Frame(HSplit( [ logger_window, logger_search ] ), title="Log"), ] ) ] ) bindings = KeyBindings() @bindings.add("c-insert", eager=True) # TODO: check @bindings.add("c-c", eager=True) def do_copy(event): if event.app.layout.has_focus(shell_window): data = shell_window.buffer.copy_selection() event.app.clipboard.set_data(data) elif event.app.layout.has_focus(logger_window): data = logger_window.buffer.copy_selection() event.app.clipboard.set_data(data) @bindings.add("c-q", eager=True) def _(event): event.app.exit() bindings.add("tab")(focus_next) bindings.add("s-tab")(focus_previous) self.app = Application( layout=Layout(root_container, focused_element=self.input_field), key_bindings=bindings, mouse_support=True, full_screen=True, enable_page_navigation_bindings=True, clipboard=PyperclipClipboard() ) rtt_read_delay = latency / 1000.0 if is_old: async def task_rtt_read(): while prog.rtt_is_running: with logger.catch(message='task_rtt_read', reraise=True): try: lines = prog.rtt_read('Terminal') except NRFJProgRTTNoChannels: return if lines: shell = '' log = '' for line in lines.splitlines(): if line.startswith('#'): log += line + '\n' console_file.write(getTime() + ' ') console_file.write(line) else: shell += line + '\n' console_file.write(getTime() + ' > ') console_file.write(line) console_file.write('\n') console_file.flush() if shell: shell = shell.replace('\r', '') self.shell_buffer.set_document(Document(self.shell_buffer.text + shell, None), True) if log: log = log.replace('\r', '') self.logger_buffer.set_document(Document(self.logger_buffer.text + log, None), True) await asyncio.sleep(rtt_read_delay) else: channels_up = (('Terminal', self.shell_buffer), ('Logger', self.logger_buffer)) async def task_rtt_read(): while prog.rtt_is_running: for channel, buffer in channels_up: with logger.catch(message='task_rtt_read', reraise=True): try: line = prog.rtt_read(channel) except NRFJProgRTTNoChannels: return if line: # buffer.insert_text(line.replace('\r', '')) for sline in line.splitlines(): console_file.write(getTime() + (' # ' if channel == 'Logger' else ' > ')) console_file.write(sline) console_file.write('\n') console_file.flush() line = line.replace('\r', '') buffer.set_document(Document(buffer.text + line, None), True) await asyncio.sleep(rtt_read_delay) console_file.write(f'{ "*" * 80 }\n') loop = get_event_loop() loop.create_task(task_rtt_read()) def accept(buff): line = f'{buff.text}\n'.replace('\r', '') # self.shell_buffer.insert_text(line) console_file.write(f'{getTime()} < {line}') text = self.shell_buffer.text + line self.shell_buffer.set_document(Document(text, None), True) prog.rtt_write('Terminal', f'{buff.text}\n') return None self.input_field.accept_handler = accept self.app.run() prog.rtt_stop()
the-stack_106_30815
#------------------------------------------------------------------------------ # Copyright (c) 2016, The University of Manchester, UK. # # BSD licenced. See LICENCE for details. # # Authors: Robert Haines #------------------------------------------------------------------------------ import numpy as np import pandas as pd import matplotlib.pyplot as plt raw_data = pd.DataFrame.from_csv('data/data_in_days.csv', header=None) counts = raw_data.groupby(2).count() data = [[counts, '0'], [counts[1:], '1'], [counts[2:], '2']] for d in data: fig, ax = plt.subplots() ax.plot(d[0]) for item in [fig, ax]: item.patch.set_visible(False) for item in ['top', 'right', 'bottom', 'left']: ax.spines[item].set_visible(False) ax.set_xlabel('Sustainability (days)') ax.set_ylabel('Number of projects') ax.set_xlim([0, 2800]) fig.set_size_inches(6, 3) fig.tight_layout() name = 'output/plot-' + d[1] + '.pdf' plt.savefig(name)
the-stack_106_30822
from django.contrib.contenttypes.fields import GenericRelation from django.core.exceptions import ValidationError from django.core.validators import MaxValueValidator, MinValueValidator from django.db import models from django.urls import reverse from dcim.choices import * from dcim.constants import * from extras.utils import extras_features from netbox.models import PrimaryModel from utilities.querysets import RestrictedQuerySet from utilities.validators import ExclusionValidator from .device_components import CableTermination, PathEndpoint __all__ = ( 'PowerFeed', 'PowerPanel', ) # # Power # @extras_features('custom_fields', 'custom_links', 'export_templates', 'tags', 'webhooks') class PowerPanel(PrimaryModel): """ A distribution point for electrical power; e.g. a data center RPP. """ site = models.ForeignKey( to='Site', verbose_name='็ซ™็‚น', on_delete=models.PROTECT ) location = models.ForeignKey( to='dcim.Location', verbose_name='ๅŒบไฝ', on_delete=models.PROTECT, blank=True, null=True ) name = models.CharField( verbose_name='ๅ็งฐ', max_length=100 ) images = GenericRelation( verbose_name='ๅ›พ็‰‡', to='extras.ImageAttachment' ) objects = RestrictedQuerySet.as_manager() class Meta: ordering = ['site', 'name'] unique_together = ['site', 'name'] def __str__(self): return self.name def get_absolute_url(self): return reverse('dcim:powerpanel', args=[self.pk]) def clean(self): super().clean() # Location must belong to assigned Site if self.location and self.location.site != self.site: raise ValidationError( f"Location {self.location} ({self.location.site}) is in a different site than {self.site}" ) @extras_features('custom_fields', 'custom_links', 'export_templates', 'tags', 'webhooks') class PowerFeed(PrimaryModel, PathEndpoint, CableTermination): """ An electrical circuit delivered from a PowerPanel. """ power_panel = models.ForeignKey( to='PowerPanel', verbose_name='็”ตๆบๆฟ', on_delete=models.PROTECT, related_name='powerfeeds' ) rack = models.ForeignKey( to='Rack', verbose_name='ๆœบๆžถ', on_delete=models.PROTECT, blank=True, null=True ) name = models.CharField( verbose_name='ๅ็งฐ', max_length=100 ) status = models.CharField( max_length=50, verbose_name='็Šถๆ€', choices=PowerFeedStatusChoices, default=PowerFeedStatusChoices.STATUS_ACTIVE ) type = models.CharField( max_length=50, verbose_name='็ฑปๅž‹', choices=PowerFeedTypeChoices, default=PowerFeedTypeChoices.TYPE_PRIMARY ) supply = models.CharField( max_length=50, verbose_name='็”ตๆต็ฑปๅž‹', choices=PowerFeedSupplyChoices, default=PowerFeedSupplyChoices.SUPPLY_AC ) phase = models.CharField( max_length=50, verbose_name='็›ธ', choices=PowerFeedPhaseChoices, default=PowerFeedPhaseChoices.PHASE_SINGLE ) voltage = models.SmallIntegerField( verbose_name='็”ตๅŽ‹', default=POWERFEED_VOLTAGE_DEFAULT, validators=[ExclusionValidator([0])] ) amperage = models.PositiveSmallIntegerField( validators=[MinValueValidator(1)], verbose_name='็”ตๆต', default=POWERFEED_AMPERAGE_DEFAULT ) max_utilization = models.PositiveSmallIntegerField( validators=[MinValueValidator(1), MaxValueValidator(100)], verbose_name='ๆœ€ๅคงๅˆฉ็”จ็އ', default=POWERFEED_MAX_UTILIZATION_DEFAULT ) available_power = models.PositiveIntegerField( default=0, verbose_name='ๅฏ็”จ็”ตๆบ', editable=False ) comments = models.TextField( verbose_name='่ฏ„่ฎบ', blank=True ) objects = RestrictedQuerySet.as_manager() clone_fields = [ 'power_panel', 'rack', 'status', 'type', 'mark_connected', 'supply', 'phase', 'voltage', 'amperage', 'max_utilization', 'available_power', ] class Meta: ordering = ['power_panel', 'name'] unique_together = ['power_panel', 'name'] def __str__(self): return self.name def get_absolute_url(self): return reverse('dcim:powerfeed', args=[self.pk]) def clean(self): super().clean() # Rack must belong to same Site as PowerPanel if self.rack and self.rack.site != self.power_panel.site: raise ValidationError("Rack {} ({}) and power panel {} ({}) are in different sites".format( self.rack, self.rack.site, self.power_panel, self.power_panel.site )) # AC voltage cannot be negative if self.voltage < 0 and self.supply == PowerFeedSupplyChoices.SUPPLY_AC: raise ValidationError({ "voltage": "Voltage cannot be negative for AC supply" }) def save(self, *args, **kwargs): # Cache the available_power property on the instance kva = abs(self.voltage) * self.amperage * (self.max_utilization / 100) if self.phase == PowerFeedPhaseChoices.PHASE_3PHASE: self.available_power = round(kva * 1.732) else: self.available_power = round(kva) super().save(*args, **kwargs) @property def parent_object(self): return self.power_panel def get_type_class(self): return PowerFeedTypeChoices.CSS_CLASSES.get(self.type) def get_status_class(self): return PowerFeedStatusChoices.CSS_CLASSES.get(self.status)
the-stack_106_30823
import json import requests def list_places(): response = requests.get('http://places-api:5000/places/') places = json.loads(response.text) return places def get_place(id): response = requests.get(f'http://places-api:5000/places/{id}') place = json.loads(response.text) return place def get_people(): response = requests.get('http://people-api:5000/people/') people = json.loads(response.text) return people def get_people_per_place(people, place_id): people_per_place = [] for person in people: if 'placeId' in person.keys() and person['placeId'] == place_id: person.pop('placeId') people_per_place.append(person) return people_per_place def generate_response(id=None): response = {} people = get_people() if not id: items = [] places = list_places() for place in places: item = {} item.update(place) item['people'] = get_people_per_place(people, place['id']) items.append(item) response['items'] = items else: response.update(get_place(id)) response['people'] = get_people_per_place(people, id) return response
the-stack_106_30824
# # # from __future__ import absolute_import, division, print_function, \ unicode_literals from ipaddress import IPv4Address, IPv6Address from logging import getLogger import re from six import string_types, text_type from ..equality import EqualityTupleMixin from .geo import GeoCodes class Change(object): def __init__(self, existing, new): self.existing = existing self.new = new @property def record(self): 'Returns new if we have one, existing otherwise' return self.new or self.existing def __lt__(self, other): self_record = self.record other_record = other.record return ((self_record.name, self_record._type) < (other_record.name, other_record._type)) class Create(Change): def __init__(self, new): super(Create, self).__init__(None, new) def __repr__(self, leader=''): source = self.new.source.id if self.new.source else '' return 'Create {} ({})'.format(self.new, source) class Update(Change): # Leader is just to allow us to work around heven eating leading whitespace # in our output. When we call this from the Manager.sync plan summary # section we'll pass in a leader, otherwise we'll just let it default and # do nothing def __repr__(self, leader=''): source = self.new.source.id if self.new.source else '' return 'Update\n{leader} {existing} ->\n{leader} {new} ({src})' \ .format(existing=self.existing, new=self.new, leader=leader, src=source) class Delete(Change): def __init__(self, existing): super(Delete, self).__init__(existing, None) def __repr__(self, leader=''): return 'Delete {}'.format(self.existing) class ValidationError(Exception): @classmethod def build_message(cls, fqdn, reasons): return 'Invalid record {}\n - {}'.format(fqdn, '\n - '.join(reasons)) def __init__(self, fqdn, reasons): super(Exception, self).__init__(self.build_message(fqdn, reasons)) self.fqdn = fqdn self.reasons = reasons class Record(EqualityTupleMixin): log = getLogger('Record') @classmethod def new(cls, zone, name, data, source=None, lenient=False): name = text_type(name) fqdn = '{}.{}'.format(name, zone.name) if name else zone.name try: _type = data['type'] except KeyError: raise Exception('Invalid record {}, missing type'.format(fqdn)) try: _class = { 'A': ARecord, 'AAAA': AaaaRecord, 'ALIAS': AliasRecord, 'CAA': CaaRecord, 'CNAME': CnameRecord, 'MX': MxRecord, 'NAPTR': NaptrRecord, 'NS': NsRecord, 'PTR': PtrRecord, 'SPF': SpfRecord, 'SRV': SrvRecord, 'SSHFP': SshfpRecord, 'TXT': TxtRecord, }[_type] except KeyError: raise Exception('Unknown record type: "{}"'.format(_type)) reasons = _class.validate(name, fqdn, data) try: lenient |= data['octodns']['lenient'] except KeyError: pass if reasons: if lenient: cls.log.warn(ValidationError.build_message(fqdn, reasons)) else: raise ValidationError(fqdn, reasons) return _class(zone, name, data, source=source) @classmethod def validate(cls, name, fqdn, data): reasons = [] n = len(fqdn) if n > 253: reasons.append('invalid fqdn, "{}" is too long at {} chars, max ' 'is 253'.format(fqdn, n)) n = len(name) if n > 63: reasons.append('invalid name, "{}" is too long at {} chars, max ' 'is 63'.format(name, n)) try: ttl = int(data['ttl']) if ttl < 0: reasons.append('invalid ttl') except KeyError: reasons.append('missing ttl') try: if data['octodns']['healthcheck']['protocol'] \ not in ('HTTP', 'HTTPS', 'TCP'): reasons.append('invalid healthcheck protocol') except KeyError: pass return reasons def __init__(self, zone, name, data, source=None): self.log.debug('__init__: zone.name=%s, type=%11s, name=%s', zone.name, self.__class__.__name__, name) self.zone = zone # force everything lower-case just to be safe self.name = text_type(name).lower() if name else name self.source = source self.ttl = int(data['ttl']) self._octodns = data.get('octodns', {}) def _data(self): return {'ttl': self.ttl} @property def data(self): return self._data() @property def fqdn(self): if self.name: return '{}.{}'.format(self.name, self.zone.name) return self.zone.name @property def ignored(self): return self._octodns.get('ignored', False) @property def excluded(self): return self._octodns.get('excluded', []) @property def included(self): return self._octodns.get('included', []) @property def healthcheck_host(self): healthcheck = self._octodns.get('healthcheck', {}) if healthcheck.get('protocol', None) == 'TCP': return None try: return healthcheck['host'] except KeyError: return self.fqdn[:-1] @property def healthcheck_path(self): healthcheck = self._octodns.get('healthcheck', {}) if healthcheck.get('protocol', None) == 'TCP': return None try: return healthcheck['path'] except KeyError: return '/_dns' @property def healthcheck_protocol(self): try: return self._octodns['healthcheck']['protocol'] except KeyError: return 'HTTPS' @property def healthcheck_port(self): try: return int(self._octodns['healthcheck']['port']) except KeyError: return 443 def changes(self, other, target): # We're assuming we have the same name and type if we're being compared if self.ttl != other.ttl: return Update(self, other) # NOTE: we're using __hash__ and ordering methods that consider Records # equivalent if they have the same name & _type. Values are ignored. This # is useful when computing diffs/changes. def __hash__(self): return '{}:{}'.format(self.name, self._type).__hash__() def _equality_tuple(self): return (self.name, self._type) def __repr__(self): # Make sure this is always overridden raise NotImplementedError('Abstract base class, __repr__ required') class GeoValue(EqualityTupleMixin): geo_re = re.compile(r'^(?P<continent_code>\w\w)(-(?P<country_code>\w\w)' r'(-(?P<subdivision_code>\w\w))?)?$') @classmethod def _validate_geo(cls, code): reasons = [] match = cls.geo_re.match(code) if not match: reasons.append('invalid geo "{}"'.format(code)) return reasons def __init__(self, geo, values): self.code = geo match = self.geo_re.match(geo) self.continent_code = match.group('continent_code') self.country_code = match.group('country_code') self.subdivision_code = match.group('subdivision_code') self.values = sorted(values) @property def parents(self): bits = self.code.split('-')[:-1] while bits: yield '-'.join(bits) bits.pop() def _equality_tuple(self): return (self.continent_code, self.country_code, self.subdivision_code, self.values) def __repr__(self): return "'Geo {} {} {} {}'".format(self.continent_code, self.country_code, self.subdivision_code, self.values) class _ValuesMixin(object): @classmethod def validate(cls, name, fqdn, data): reasons = super(_ValuesMixin, cls).validate(name, fqdn, data) values = data.get('values', data.get('value', [])) reasons.extend(cls._value_type.validate(values, cls._type)) return reasons def __init__(self, zone, name, data, source=None): super(_ValuesMixin, self).__init__(zone, name, data, source=source) try: values = data['values'] except KeyError: values = [data['value']] self.values = sorted(self._value_type.process(values)) def changes(self, other, target): if self.values != other.values: return Update(self, other) return super(_ValuesMixin, self).changes(other, target) def _data(self): ret = super(_ValuesMixin, self)._data() if len(self.values) > 1: values = [getattr(v, 'data', v) for v in self.values if v] if len(values) > 1: ret['values'] = values elif len(values) == 1: ret['value'] = values[0] elif len(self.values) == 1: v = self.values[0] if v: ret['value'] = getattr(v, 'data', v) return ret def __repr__(self): values = "['{}']".format("', '".join([text_type(v) for v in self.values])) return '<{} {} {}, {}, {}>'.format(self.__class__.__name__, self._type, self.ttl, self.fqdn, values) class _GeoMixin(_ValuesMixin): ''' Adds GeoDNS support to a record. Must be included before `Record`. ''' @classmethod def validate(cls, name, fqdn, data): reasons = super(_GeoMixin, cls).validate(name, fqdn, data) try: geo = dict(data['geo']) for code, values in geo.items(): reasons.extend(GeoValue._validate_geo(code)) reasons.extend(cls._value_type.validate(values, cls._type)) except KeyError: pass return reasons def __init__(self, zone, name, data, *args, **kwargs): super(_GeoMixin, self).__init__(zone, name, data, *args, **kwargs) try: self.geo = dict(data['geo']) except KeyError: self.geo = {} for code, values in self.geo.items(): self.geo[code] = GeoValue(code, values) def _data(self): ret = super(_GeoMixin, self)._data() if self.geo: geo = {} for code, value in self.geo.items(): geo[code] = value.values ret['geo'] = geo return ret def changes(self, other, target): if target.SUPPORTS_GEO: if self.geo != other.geo: return Update(self, other) return super(_GeoMixin, self).changes(other, target) def __repr__(self): if self.geo: return '<{} {} {}, {}, {}, {}>'.format(self.__class__.__name__, self._type, self.ttl, self.fqdn, self.values, self.geo) return super(_GeoMixin, self).__repr__() class _ValueMixin(object): @classmethod def validate(cls, name, fqdn, data): reasons = super(_ValueMixin, cls).validate(name, fqdn, data) reasons.extend(cls._value_type.validate(data.get('value', None), cls._type)) return reasons def __init__(self, zone, name, data, source=None): super(_ValueMixin, self).__init__(zone, name, data, source=source) self.value = self._value_type.process(data['value']) def changes(self, other, target): if self.value != other.value: return Update(self, other) return super(_ValueMixin, self).changes(other, target) def _data(self): ret = super(_ValueMixin, self)._data() if self.value: ret['value'] = getattr(self.value, 'data', self.value) return ret def __repr__(self): return '<{} {} {}, {}, {}>'.format(self.__class__.__name__, self._type, self.ttl, self.fqdn, self.value) class _DynamicPool(object): def __init__(self, _id, data): self._id = _id values = [ { 'value': d['value'], 'weight': d.get('weight', 1), } for d in data['values'] ] values.sort(key=lambda d: d['value']) fallback = data.get('fallback', None) self.data = { 'fallback': fallback if fallback != 'default' else None, 'values': values, } def _data(self): return self.data def __eq__(self, other): if not isinstance(other, _DynamicPool): return False return self.data == other.data def __ne__(self, other): return not self.__eq__(other) def __repr__(self): return '{}'.format(self.data) class _DynamicRule(object): def __init__(self, i, data): self.i = i self.data = {} try: self.data['pool'] = data['pool'] except KeyError: pass try: self.data['geos'] = sorted(data['geos']) except KeyError: pass def _data(self): return self.data def __eq__(self, other): if not isinstance(other, _DynamicRule): return False return self.data == other.data def __ne__(self, other): return not self.__eq__(other) def __repr__(self): return '{}'.format(self.data) class _Dynamic(object): def __init__(self, pools, rules): self.pools = pools self.rules = rules def _data(self): pools = {} for _id, pool in self.pools.items(): pools[_id] = pool._data() rules = [] for rule in self.rules: rules.append(rule._data()) return { 'pools': pools, 'rules': rules, } def __eq__(self, other): if not isinstance(other, _Dynamic): return False ret = self.pools == other.pools and self.rules == other.rules return ret def __ne__(self, other): return not self.__eq__(other) def __repr__(self): return '{}, {}'.format(self.pools, self.rules) class _DynamicMixin(object): geo_re = re.compile(r'^(?P<continent_code>\w\w)(-(?P<country_code>\w\w)' r'(-(?P<subdivision_code>\w\w))?)?$') @classmethod def validate(cls, name, fqdn, data): reasons = super(_DynamicMixin, cls).validate(name, fqdn, data) if 'dynamic' not in data: return reasons elif 'geo' in data: reasons.append('"dynamic" record with "geo" content') try: pools = data['dynamic']['pools'] except KeyError: pools = {} pools_exist = set() pools_seen = set() if not isinstance(pools, dict): reasons.append('pools must be a dict') elif not pools: reasons.append('missing pools') else: for _id, pool in sorted(pools.items()): if not isinstance(pool, dict): reasons.append('pool "{}" must be a dict'.format(_id)) continue try: values = pool['values'] except KeyError: reasons.append('pool "{}" is missing values'.format(_id)) continue pools_exist.add(_id) for i, value in enumerate(values): value_num = i + 1 try: weight = value['weight'] weight = int(weight) if weight < 1 or weight > 15: reasons.append('invalid weight "{}" in pool "{}" ' 'value {}'.format(weight, _id, value_num)) except KeyError: pass except ValueError: reasons.append('invalid weight "{}" in pool "{}" ' 'value {}'.format(weight, _id, value_num)) try: value = value['value'] reasons.extend(cls._value_type.validate(value, cls._type)) except KeyError: reasons.append('missing value in pool "{}" ' 'value {}'.format(_id, value_num)) fallback = pool.get('fallback', None) if fallback is not None and fallback not in pools: reasons.append('undefined fallback "{}" for pool "{}"' .format(fallback, _id)) # Check for loops fallback = pools[_id].get('fallback', None) seen = [_id, fallback] while fallback is not None: # See if there's a next fallback fallback = pools.get(fallback, {}).get('fallback', None) if fallback in seen: loop = ' -> '.join(seen) reasons.append('loop in pool fallbacks: {}' .format(loop)) # exit the loop break seen.append(fallback) try: rules = data['dynamic']['rules'] except KeyError: rules = [] if not isinstance(rules, (list, tuple)): reasons.append('rules must be a list') elif not rules: reasons.append('missing rules') else: seen_default = False # TODO: don't allow 'default' as a pool name, reserved for i, rule in enumerate(rules): rule_num = i + 1 try: pool = rule['pool'] except KeyError: reasons.append('rule {} missing pool'.format(rule_num)) continue try: geos = rule['geos'] except KeyError: geos = [] if not isinstance(pool, string_types): reasons.append('rule {} invalid pool "{}"' .format(rule_num, pool)) else: if pool not in pools: reasons.append('rule {} undefined pool "{}"' .format(rule_num, pool)) pools_seen.add(pool) elif pool in pools_seen and geos: reasons.append('rule {} invalid, target pool "{}" ' 'reused'.format(rule_num, pool)) pools_seen.add(pool) if not geos: if seen_default: reasons.append('rule {} duplicate default' .format(rule_num)) seen_default = True if not isinstance(geos, (list, tuple)): reasons.append('rule {} geos must be a list' .format(rule_num)) else: for geo in geos: reasons.extend(GeoCodes.validate(geo, 'rule {} ' .format(rule_num))) unused = pools_exist - pools_seen if unused: unused = '", "'.join(sorted(unused)) reasons.append('unused pools: "{}"'.format(unused)) return reasons def __init__(self, zone, name, data, *args, **kwargs): super(_DynamicMixin, self).__init__(zone, name, data, *args, **kwargs) self.dynamic = {} if 'dynamic' not in data: return # pools try: pools = dict(data['dynamic']['pools']) except: pools = {} for _id, pool in sorted(pools.items()): pools[_id] = _DynamicPool(_id, pool) # rules try: rules = list(data['dynamic']['rules']) except: rules = [] parsed = [] for i, rule in enumerate(rules): parsed.append(_DynamicRule(i, rule)) # dynamic self.dynamic = _Dynamic(pools, parsed) def _data(self): ret = super(_DynamicMixin, self)._data() if self.dynamic: ret['dynamic'] = self.dynamic._data() return ret def changes(self, other, target): if target.SUPPORTS_DYNAMIC: if self.dynamic != other.dynamic: return Update(self, other) return super(_DynamicMixin, self).changes(other, target) def __repr__(self): # TODO: improve this whole thing, we need multi-line... if self.dynamic: # TODO: this hack can't going to cut it, as part of said # improvements the value types should deal with serializing their # value try: values = self.values except AttributeError: values = self.value return '<{} {} {}, {}, {}, {}>'.format(self.__class__.__name__, self._type, self.ttl, self.fqdn, values, self.dynamic) return super(_DynamicMixin, self).__repr__() class _IpList(object): @classmethod def validate(cls, data, _type): if not isinstance(data, (list, tuple)): data = (data,) if len(data) == 0: return ['missing value(s)'] reasons = [] for value in data: if value == '': reasons.append('empty value') elif value is None: reasons.append('missing value(s)') else: try: cls._address_type(text_type(value)) except Exception: reasons.append('invalid {} address "{}"' .format(cls._address_name, value)) return reasons @classmethod def process(cls, values): # Translating None into '' so that the list will be sortable in python3 return [v if v is not None else '' for v in values] class Ipv4List(_IpList): _address_name = 'IPv4' _address_type = IPv4Address class Ipv6List(_IpList): _address_name = 'IPv6' _address_type = IPv6Address class _TargetValue(object): @classmethod def validate(cls, data, _type): reasons = [] if data == '': reasons.append('empty value') elif not data: reasons.append('missing value') elif not data.endswith('.'): reasons.append('{} value "{}" missing trailing .' .format(_type, data)) return reasons @classmethod def process(self, value): if value: return value.lower() return value class CnameValue(_TargetValue): pass class ARecord(_DynamicMixin, _GeoMixin, Record): _type = 'A' _value_type = Ipv4List class AaaaRecord(_DynamicMixin, _GeoMixin, Record): _type = 'AAAA' _value_type = Ipv6List class AliasValue(_TargetValue): pass class AliasRecord(_ValueMixin, Record): _type = 'ALIAS' _value_type = AliasValue class CaaValue(EqualityTupleMixin): # https://tools.ietf.org/html/rfc6844#page-5 @classmethod def validate(cls, data, _type): if not isinstance(data, (list, tuple)): data = (data,) reasons = [] for value in data: try: flags = int(value.get('flags', 0)) if flags < 0 or flags > 255: reasons.append('invalid flags "{}"'.format(flags)) except ValueError: reasons.append('invalid flags "{}"'.format(value['flags'])) if 'tag' not in value: reasons.append('missing tag') if 'value' not in value: reasons.append('missing value') return reasons @classmethod def process(cls, values): return [CaaValue(v) for v in values] def __init__(self, value): self.flags = int(value.get('flags', 0)) self.tag = value['tag'] self.value = value['value'] @property def data(self): return { 'flags': self.flags, 'tag': self.tag, 'value': self.value, } def _equality_tuple(self): return (self.flags, self.tag, self.value) def __repr__(self): return '{} {} "{}"'.format(self.flags, self.tag, self.value) class CaaRecord(_ValuesMixin, Record): _type = 'CAA' _value_type = CaaValue class CnameRecord(_DynamicMixin, _ValueMixin, Record): _type = 'CNAME' _value_type = CnameValue @classmethod def validate(cls, name, fqdn, data): reasons = [] if name == '': reasons.append('root CNAME not allowed') reasons.extend(super(CnameRecord, cls).validate(name, fqdn, data)) return reasons class MxValue(EqualityTupleMixin): @classmethod def validate(cls, data, _type): if not isinstance(data, (list, tuple)): data = (data,) reasons = [] for value in data: try: try: int(value['preference']) except KeyError: int(value['priority']) except KeyError: reasons.append('missing preference') except ValueError: reasons.append('invalid preference "{}"' .format(value['preference'])) exchange = None try: exchange = value.get('exchange', None) or value['value'] if not exchange.endswith('.'): reasons.append('MX value "{}" missing trailing .' .format(exchange)) except KeyError: reasons.append('missing exchange') return reasons @classmethod def process(cls, values): return [MxValue(v) for v in values] def __init__(self, value): # RFC1035 says preference, half the providers use priority try: preference = value['preference'] except KeyError: preference = value['priority'] self.preference = int(preference) # UNTIL 1.0 remove value fallback try: exchange = value['exchange'] except KeyError: exchange = value['value'] self.exchange = exchange.lower() @property def data(self): return { 'preference': self.preference, 'exchange': self.exchange, } def __hash__(self): return hash((self.preference, self.exchange)) def _equality_tuple(self): return (self.preference, self.exchange) def __repr__(self): return "'{} {}'".format(self.preference, self.exchange) class MxRecord(_ValuesMixin, Record): _type = 'MX' _value_type = MxValue class NaptrValue(EqualityTupleMixin): VALID_FLAGS = ('S', 'A', 'U', 'P') @classmethod def validate(cls, data, _type): if not isinstance(data, (list, tuple)): data = (data,) reasons = [] for value in data: try: int(value['order']) except KeyError: reasons.append('missing order') except ValueError: reasons.append('invalid order "{}"'.format(value['order'])) try: int(value['preference']) except KeyError: reasons.append('missing preference') except ValueError: reasons.append('invalid preference "{}"' .format(value['preference'])) try: flags = value['flags'] if flags not in cls.VALID_FLAGS: reasons.append('unrecognized flags "{}"'.format(flags)) except KeyError: reasons.append('missing flags') # TODO: validate these... they're non-trivial for k in ('service', 'regexp', 'replacement'): if k not in value: reasons.append('missing {}'.format(k)) return reasons @classmethod def process(cls, values): return [NaptrValue(v) for v in values] def __init__(self, value): self.order = int(value['order']) self.preference = int(value['preference']) self.flags = value['flags'] self.service = value['service'] self.regexp = value['regexp'] self.replacement = value['replacement'] @property def data(self): return { 'order': self.order, 'preference': self.preference, 'flags': self.flags, 'service': self.service, 'regexp': self.regexp, 'replacement': self.replacement, } def __hash__(self): return hash(self.__repr__()) def _equality_tuple(self): return (self.order, self.preference, self.flags, self.service, self.regexp, self.replacement) def __repr__(self): flags = self.flags if self.flags is not None else '' service = self.service if self.service is not None else '' regexp = self.regexp if self.regexp is not None else '' return "'{} {} \"{}\" \"{}\" \"{}\" {}'" \ .format(self.order, self.preference, flags, service, regexp, self.replacement) class NaptrRecord(_ValuesMixin, Record): _type = 'NAPTR' _value_type = NaptrValue class _NsValue(object): @classmethod def validate(cls, data, _type): if not data: return ['missing value(s)'] elif not isinstance(data, (list, tuple)): data = (data,) reasons = [] for value in data: if not value.endswith('.'): reasons.append('NS value "{}" missing trailing .' .format(value)) return reasons @classmethod def process(cls, values): return values class NsRecord(_ValuesMixin, Record): _type = 'NS' _value_type = _NsValue class PtrValue(_TargetValue): pass class PtrRecord(_ValueMixin, Record): _type = 'PTR' _value_type = PtrValue class SshfpValue(EqualityTupleMixin): VALID_ALGORITHMS = (1, 2, 3, 4) VALID_FINGERPRINT_TYPES = (1, 2) @classmethod def validate(cls, data, _type): if not isinstance(data, (list, tuple)): data = (data,) reasons = [] for value in data: try: algorithm = int(value['algorithm']) if algorithm not in cls.VALID_ALGORITHMS: reasons.append('unrecognized algorithm "{}"' .format(algorithm)) except KeyError: reasons.append('missing algorithm') except ValueError: reasons.append('invalid algorithm "{}"' .format(value['algorithm'])) try: fingerprint_type = int(value['fingerprint_type']) if fingerprint_type not in cls.VALID_FINGERPRINT_TYPES: reasons.append('unrecognized fingerprint_type "{}"' .format(fingerprint_type)) except KeyError: reasons.append('missing fingerprint_type') except ValueError: reasons.append('invalid fingerprint_type "{}"' .format(value['fingerprint_type'])) if 'fingerprint' not in value: reasons.append('missing fingerprint') return reasons @classmethod def process(cls, values): return [SshfpValue(v) for v in values] def __init__(self, value): self.algorithm = int(value['algorithm']) self.fingerprint_type = int(value['fingerprint_type']) self.fingerprint = value['fingerprint'] @property def data(self): return { 'algorithm': self.algorithm, 'fingerprint_type': self.fingerprint_type, 'fingerprint': self.fingerprint, } def __hash__(self): return hash(self.__repr__()) def _equality_tuple(self): return (self.algorithm, self.fingerprint_type, self.fingerprint) def __repr__(self): return "'{} {} {}'".format(self.algorithm, self.fingerprint_type, self.fingerprint) class SshfpRecord(_ValuesMixin, Record): _type = 'SSHFP' _value_type = SshfpValue class _ChunkedValuesMixin(_ValuesMixin): CHUNK_SIZE = 255 _unescaped_semicolon_re = re.compile(r'\w;') def chunked_value(self, value): value = value.replace('"', '\\"') vs = [value[i:i + self.CHUNK_SIZE] for i in range(0, len(value), self.CHUNK_SIZE)] vs = '" "'.join(vs) return '"{}"'.format(vs) @property def chunked_values(self): values = [] for v in self.values: values.append(self.chunked_value(v)) return values class _ChunkedValue(object): _unescaped_semicolon_re = re.compile(r'\w;') @classmethod def validate(cls, data, _type): if not data: return ['missing value(s)'] elif not isinstance(data, (list, tuple)): data = (data,) reasons = [] for value in data: if cls._unescaped_semicolon_re.search(value): reasons.append('unescaped ; in "{}"'.format(value)) return reasons @classmethod def process(cls, values): ret = [] for v in values: if v and v[0] == '"': v = v[1:-1] ret.append(v.replace('" "', '')) return ret class SpfRecord(_ChunkedValuesMixin, Record): _type = 'SPF' _value_type = _ChunkedValue class SrvValue(EqualityTupleMixin): @classmethod def validate(cls, data, _type): if not isinstance(data, (list, tuple)): data = (data,) reasons = [] for value in data: # TODO: validate algorithm and fingerprint_type values try: int(value['priority']) except KeyError: reasons.append('missing priority') except ValueError: reasons.append('invalid priority "{}"' .format(value['priority'])) try: int(value['weight']) except KeyError: reasons.append('missing weight') except ValueError: reasons.append('invalid weight "{}"'.format(value['weight'])) try: int(value['port']) except KeyError: reasons.append('missing port') except ValueError: reasons.append('invalid port "{}"'.format(value['port'])) try: if not value['target'].endswith('.'): reasons.append('SRV value "{}" missing trailing .' .format(value['target'])) except KeyError: reasons.append('missing target') return reasons @classmethod def process(cls, values): return [SrvValue(v) for v in values] def __init__(self, value): self.priority = int(value['priority']) self.weight = int(value['weight']) self.port = int(value['port']) self.target = value['target'].lower() @property def data(self): return { 'priority': self.priority, 'weight': self.weight, 'port': self.port, 'target': self.target, } def __hash__(self): return hash(self.__repr__()) def _equality_tuple(self): return (self.priority, self.weight, self.port, self.target) def __repr__(self): return "'{} {} {} {}'".format(self.priority, self.weight, self.port, self.target) class SrvRecord(_ValuesMixin, Record): _type = 'SRV' _value_type = SrvValue _name_re = re.compile(r'^_[^\.]+\.[^\.]+') @classmethod def validate(cls, name, fqdn, data): reasons = [] if not cls._name_re.match(name): reasons.append('invalid name for SRV record') reasons.extend(super(SrvRecord, cls).validate(name, fqdn, data)) return reasons class _TxtValue(_ChunkedValue): pass class TxtRecord(_ChunkedValuesMixin, Record): _type = 'TXT' _value_type = _TxtValue
the-stack_106_30825
""" test with the .transform """ from io import StringIO import numpy as np import pytest from pandas.core.dtypes.common import ensure_platform_int, is_timedelta64_dtype import pandas as pd from pandas import ( Categorical, DataFrame, MultiIndex, Series, Timestamp, concat, date_range, ) import pandas._testing as tm from pandas.core.groupby.groupby import DataError def assert_fp_equal(a, b): assert (np.abs(a - b) < 1e-12).all() def test_transform(): data = Series(np.arange(9) // 3, index=np.arange(9)) index = np.arange(9) np.random.shuffle(index) data = data.reindex(index) grouped = data.groupby(lambda x: x // 3) transformed = grouped.transform(lambda x: x * x.sum()) assert transformed[7] == 12 # GH 8046 # make sure that we preserve the input order df = DataFrame( np.arange(6, dtype="int64").reshape(3, 2), columns=["a", "b"], index=[0, 2, 1] ) key = [0, 0, 1] expected = ( df.sort_index() .groupby(key) .transform(lambda x: x - x.mean()) .groupby(key) .mean() ) result = df.groupby(key).transform(lambda x: x - x.mean()).groupby(key).mean() tm.assert_frame_equal(result, expected) def demean(arr): return arr - arr.mean() people = DataFrame( np.random.randn(5, 5), columns=["a", "b", "c", "d", "e"], index=["Joe", "Steve", "Wes", "Jim", "Travis"], ) key = ["one", "two", "one", "two", "one"] result = people.groupby(key).transform(demean).groupby(key).mean() expected = people.groupby(key).apply(demean).groupby(key).mean() tm.assert_frame_equal(result, expected) # GH 8430 df = tm.makeTimeDataFrame() g = df.groupby(pd.Grouper(freq="M")) g.transform(lambda x: x - 1) # GH 9700 df = DataFrame({"a": range(5, 10), "b": range(5)}) result = df.groupby("a").transform(max) expected = DataFrame({"b": range(5)}) tm.assert_frame_equal(result, expected) def test_transform_fast(): df = DataFrame({"id": np.arange(100000) / 3, "val": np.random.randn(100000)}) grp = df.groupby("id")["val"] values = np.repeat(grp.mean().values, ensure_platform_int(grp.count().values)) expected = Series(values, index=df.index, name="val") result = grp.transform(np.mean) tm.assert_series_equal(result, expected) result = grp.transform("mean") tm.assert_series_equal(result, expected) # GH 12737 df = DataFrame( { "grouping": [0, 1, 1, 3], "f": [1.1, 2.1, 3.1, 4.5], "d": pd.date_range("2014-1-1", "2014-1-4"), "i": [1, 2, 3, 4], }, columns=["grouping", "f", "i", "d"], ) result = df.groupby("grouping").transform("first") dates = [ Timestamp("2014-1-1"), Timestamp("2014-1-2"), Timestamp("2014-1-2"), Timestamp("2014-1-4"), ] expected = DataFrame( {"f": [1.1, 2.1, 2.1, 4.5], "d": dates, "i": [1, 2, 2, 4]}, columns=["f", "i", "d"], ) tm.assert_frame_equal(result, expected) # selection result = df.groupby("grouping")[["f", "i"]].transform("first") expected = expected[["f", "i"]] tm.assert_frame_equal(result, expected) # dup columns df = DataFrame([[1, 2, 3], [4, 5, 6]], columns=["g", "a", "a"]) result = df.groupby("g").transform("first") expected = df.drop("g", axis=1) tm.assert_frame_equal(result, expected) def test_transform_broadcast(tsframe, ts): grouped = ts.groupby(lambda x: x.month) result = grouped.transform(np.mean) tm.assert_index_equal(result.index, ts.index) for _, gp in grouped: assert_fp_equal(result.reindex(gp.index), gp.mean()) grouped = tsframe.groupby(lambda x: x.month) result = grouped.transform(np.mean) tm.assert_index_equal(result.index, tsframe.index) for _, gp in grouped: agged = gp.mean() res = result.reindex(gp.index) for col in tsframe: assert_fp_equal(res[col], agged[col]) # group columns grouped = tsframe.groupby({"A": 0, "B": 0, "C": 1, "D": 1}, axis=1) result = grouped.transform(np.mean) tm.assert_index_equal(result.index, tsframe.index) tm.assert_index_equal(result.columns, tsframe.columns) for _, gp in grouped: agged = gp.mean(1) res = result.reindex(columns=gp.columns) for idx in gp.index: assert_fp_equal(res.xs(idx), agged[idx]) def test_transform_axis_1(request, transformation_func): # GH 36308 warn = None if transformation_func == "tshift": warn = FutureWarning request.node.add_marker(pytest.mark.xfail(reason="tshift is deprecated")) args = ("ffill",) if transformation_func == "fillna" else () df = DataFrame({"a": [1, 2], "b": [3, 4], "c": [5, 6]}, index=["x", "y"]) with tm.assert_produces_warning(warn): result = df.groupby([0, 0, 1], axis=1).transform(transformation_func, *args) expected = df.T.groupby([0, 0, 1]).transform(transformation_func, *args).T if transformation_func == "diff": # Result contains nans, so transpose coerces to float expected["b"] = expected["b"].astype("int64") # cumcount returns Series; the rest are DataFrame tm.assert_equal(result, expected) def test_transform_axis_ts(tsframe): # make sure that we are setting the axes # correctly when on axis=0 or 1 # in the presence of a non-monotonic indexer # GH12713 base = tsframe.iloc[0:5] r = len(base.index) c = len(base.columns) tso = DataFrame( np.random.randn(r, c), index=base.index, columns=base.columns, dtype="float64" ) # monotonic ts = tso grouped = ts.groupby(lambda x: x.weekday()) result = ts - grouped.transform("mean") expected = grouped.apply(lambda x: x - x.mean()) tm.assert_frame_equal(result, expected) ts = ts.T grouped = ts.groupby(lambda x: x.weekday(), axis=1) result = ts - grouped.transform("mean") expected = grouped.apply(lambda x: (x.T - x.mean(1)).T) tm.assert_frame_equal(result, expected) # non-monotonic ts = tso.iloc[[1, 0] + list(range(2, len(base)))] grouped = ts.groupby(lambda x: x.weekday()) result = ts - grouped.transform("mean") expected = grouped.apply(lambda x: x - x.mean()) tm.assert_frame_equal(result, expected) ts = ts.T grouped = ts.groupby(lambda x: x.weekday(), axis=1) result = ts - grouped.transform("mean") expected = grouped.apply(lambda x: (x.T - x.mean(1)).T) tm.assert_frame_equal(result, expected) def test_transform_dtype(): # GH 9807 # Check transform dtype output is preserved df = DataFrame([[1, 3], [2, 3]]) result = df.groupby(1).transform("mean") expected = DataFrame([[1.5], [1.5]]) tm.assert_frame_equal(result, expected) def test_transform_bug(): # GH 5712 # transforming on a datetime column df = DataFrame({"A": Timestamp("20130101"), "B": np.arange(5)}) result = df.groupby("A")["B"].transform(lambda x: x.rank(ascending=False)) expected = Series(np.arange(5, 0, step=-1), name="B") tm.assert_series_equal(result, expected) def test_transform_numeric_to_boolean(): # GH 16875 # inconsistency in transforming boolean values expected = Series([True, True], name="A") df = DataFrame({"A": [1.1, 2.2], "B": [1, 2]}) result = df.groupby("B").A.transform(lambda x: True) tm.assert_series_equal(result, expected) df = DataFrame({"A": [1, 2], "B": [1, 2]}) result = df.groupby("B").A.transform(lambda x: True) tm.assert_series_equal(result, expected) def test_transform_datetime_to_timedelta(): # GH 15429 # transforming a datetime to timedelta df = DataFrame({"A": Timestamp("20130101"), "B": np.arange(5)}) expected = Series([Timestamp("20130101") - Timestamp("20130101")] * 5, name="A") # this does date math without changing result type in transform base_time = df["A"][0] result = ( df.groupby("A")["A"].transform(lambda x: x.max() - x.min() + base_time) - base_time ) tm.assert_series_equal(result, expected) # this does date math and causes the transform to return timedelta result = df.groupby("A")["A"].transform(lambda x: x.max() - x.min()) tm.assert_series_equal(result, expected) def test_transform_datetime_to_numeric(): # GH 10972 # convert dt to float df = DataFrame({"a": 1, "b": date_range("2015-01-01", periods=2, freq="D")}) result = df.groupby("a").b.transform( lambda x: x.dt.dayofweek - x.dt.dayofweek.mean() ) expected = Series([-0.5, 0.5], name="b") tm.assert_series_equal(result, expected) # convert dt to int df = DataFrame({"a": 1, "b": date_range("2015-01-01", periods=2, freq="D")}) result = df.groupby("a").b.transform( lambda x: x.dt.dayofweek - x.dt.dayofweek.min() ) expected = Series([0, 1], name="b") tm.assert_series_equal(result, expected) def test_transform_casting(): # 13046 data = """ idx A ID3 DATETIME 0 B-028 b76cd912ff "2014-10-08 13:43:27" 1 B-054 4a57ed0b02 "2014-10-08 14:26:19" 2 B-076 1a682034f8 "2014-10-08 14:29:01" 3 B-023 b76cd912ff "2014-10-08 18:39:34" 4 B-023 f88g8d7sds "2014-10-08 18:40:18" 5 B-033 b76cd912ff "2014-10-08 18:44:30" 6 B-032 b76cd912ff "2014-10-08 18:46:00" 7 B-037 b76cd912ff "2014-10-08 18:52:15" 8 B-046 db959faf02 "2014-10-08 18:59:59" 9 B-053 b76cd912ff "2014-10-08 19:17:48" 10 B-065 b76cd912ff "2014-10-08 19:21:38" """ df = pd.read_csv( StringIO(data), sep=r"\s+", index_col=[0], parse_dates=["DATETIME"] ) result = df.groupby("ID3")["DATETIME"].transform(lambda x: x.diff()) assert is_timedelta64_dtype(result.dtype) result = df[["ID3", "DATETIME"]].groupby("ID3").transform(lambda x: x.diff()) assert is_timedelta64_dtype(result.DATETIME.dtype) def test_transform_multiple(ts): grouped = ts.groupby([lambda x: x.year, lambda x: x.month]) grouped.transform(lambda x: x * 2) grouped.transform(np.mean) def test_dispatch_transform(tsframe): df = tsframe[::5].reindex(tsframe.index) grouped = df.groupby(lambda x: x.month) filled = grouped.fillna(method="pad") fillit = lambda x: x.fillna(method="pad") expected = df.groupby(lambda x: x.month).transform(fillit) tm.assert_frame_equal(filled, expected) def test_transform_transformation_func(request, transformation_func): # GH 30918 df = DataFrame( { "A": ["foo", "foo", "foo", "foo", "bar", "bar", "baz"], "B": [1, 2, np.nan, 3, 3, np.nan, 4], }, index=pd.date_range("2020-01-01", "2020-01-07"), ) if transformation_func == "cumcount": test_op = lambda x: x.transform("cumcount") mock_op = lambda x: Series(range(len(x)), x.index) elif transformation_func == "fillna": test_op = lambda x: x.transform("fillna", value=0) mock_op = lambda x: x.fillna(value=0) elif transformation_func == "tshift": msg = ( "Current behavior of groupby.tshift is inconsistent with other " "transformations. See GH34452 for more details" ) request.node.add_marker(pytest.mark.xfail(reason=msg)) else: test_op = lambda x: x.transform(transformation_func) mock_op = lambda x: getattr(x, transformation_func)() result = test_op(df.groupby("A")) groups = [df[["B"]].iloc[:4], df[["B"]].iloc[4:6], df[["B"]].iloc[6:]] expected = concat([mock_op(g) for g in groups]) if transformation_func == "cumcount": tm.assert_series_equal(result, expected) else: tm.assert_frame_equal(result, expected) def test_transform_select_columns(df): f = lambda x: x.mean() result = df.groupby("A")[["C", "D"]].transform(f) selection = df[["C", "D"]] expected = selection.groupby(df["A"]).transform(f) tm.assert_frame_equal(result, expected) def test_transform_exclude_nuisance(df): # this also tests orderings in transform between # series/frame to make sure it's consistent expected = {} grouped = df.groupby("A") expected["C"] = grouped["C"].transform(np.mean) expected["D"] = grouped["D"].transform(np.mean) expected = DataFrame(expected) result = df.groupby("A").transform(np.mean) tm.assert_frame_equal(result, expected) def test_transform_function_aliases(df): result = df.groupby("A").transform("mean") expected = df.groupby("A").transform(np.mean) tm.assert_frame_equal(result, expected) result = df.groupby("A")["C"].transform("mean") expected = df.groupby("A")["C"].transform(np.mean) tm.assert_series_equal(result, expected) def test_series_fast_transform_date(): # GH 13191 df = DataFrame( {"grouping": [np.nan, 1, 1, 3], "d": pd.date_range("2014-1-1", "2014-1-4")} ) result = df.groupby("grouping")["d"].transform("first") dates = [ pd.NaT, Timestamp("2014-1-2"), Timestamp("2014-1-2"), Timestamp("2014-1-4"), ] expected = Series(dates, name="d") tm.assert_series_equal(result, expected) def test_transform_length(): # GH 9697 df = DataFrame({"col1": [1, 1, 2, 2], "col2": [1, 2, 3, np.nan]}) expected = Series([3.0] * 4) def nsum(x): return np.nansum(x) results = [ df.groupby("col1").transform(sum)["col2"], df.groupby("col1")["col2"].transform(sum), df.groupby("col1").transform(nsum)["col2"], df.groupby("col1")["col2"].transform(nsum), ] for result in results: tm.assert_series_equal(result, expected, check_names=False) def test_transform_coercion(): # 14457 # when we are transforming be sure to not coerce # via assignment df = DataFrame({"A": ["a", "a"], "B": [0, 1]}) g = df.groupby("A") expected = g.transform(np.mean) result = g.transform(lambda x: np.mean(x)) tm.assert_frame_equal(result, expected) def test_groupby_transform_with_int(): # GH 3740, make sure that we might upcast on item-by-item transform # floats df = DataFrame( { "A": [1, 1, 1, 2, 2, 2], "B": Series(1, dtype="float64"), "C": Series([1, 2, 3, 1, 2, 3], dtype="float64"), "D": "foo", } ) with np.errstate(all="ignore"): result = df.groupby("A").transform(lambda x: (x - x.mean()) / x.std()) expected = DataFrame( {"B": np.nan, "C": Series([-1, 0, 1, -1, 0, 1], dtype="float64")} ) tm.assert_frame_equal(result, expected) # int case df = DataFrame( { "A": [1, 1, 1, 2, 2, 2], "B": 1, "C": [1, 2, 3, 1, 2, 3], "D": "foo", } ) with np.errstate(all="ignore"): result = df.groupby("A").transform(lambda x: (x - x.mean()) / x.std()) expected = DataFrame({"B": np.nan, "C": [-1, 0, 1, -1, 0, 1]}) tm.assert_frame_equal(result, expected) # int that needs float conversion s = Series([2, 3, 4, 10, 5, -1]) df = DataFrame({"A": [1, 1, 1, 2, 2, 2], "B": 1, "C": s, "D": "foo"}) with np.errstate(all="ignore"): result = df.groupby("A").transform(lambda x: (x - x.mean()) / x.std()) s1 = s.iloc[0:3] s1 = (s1 - s1.mean()) / s1.std() s2 = s.iloc[3:6] s2 = (s2 - s2.mean()) / s2.std() expected = DataFrame({"B": np.nan, "C": concat([s1, s2])}) tm.assert_frame_equal(result, expected) # int downcasting result = df.groupby("A").transform(lambda x: x * 2 / 2) expected = DataFrame({"B": 1, "C": [2, 3, 4, 10, 5, -1]}) tm.assert_frame_equal(result, expected) def test_groupby_transform_with_nan_group(): # GH 9941 df = DataFrame({"a": range(10), "b": [1, 1, 2, 3, np.nan, 4, 4, 5, 5, 5]}) result = df.groupby(df.b)["a"].transform(max) expected = Series([1.0, 1.0, 2.0, 3.0, np.nan, 6.0, 6.0, 9.0, 9.0, 9.0], name="a") tm.assert_series_equal(result, expected) def test_transform_mixed_type(): index = MultiIndex.from_arrays([[0, 0, 0, 1, 1, 1], [1, 2, 3, 1, 2, 3]]) df = DataFrame( { "d": [1.0, 1.0, 1.0, 2.0, 2.0, 2.0], "c": np.tile(["a", "b", "c"], 2), "v": np.arange(1.0, 7.0), }, index=index, ) def f(group): group["g"] = group["d"] * 2 return group[:1] grouped = df.groupby("c") result = grouped.apply(f) assert result["d"].dtype == np.float64 # this is by definition a mutating operation! with pd.option_context("mode.chained_assignment", None): for key, group in grouped: res = f(group) tm.assert_frame_equal(res, result.loc[key]) @pytest.mark.parametrize( "op, args, targop", [ ("cumprod", (), lambda x: x.cumprod()), ("cumsum", (), lambda x: x.cumsum()), ("shift", (-1,), lambda x: x.shift(-1)), ("shift", (1,), lambda x: x.shift()), ], ) def test_cython_transform_series(op, args, targop): # GH 4095 s = Series(np.random.randn(1000)) s_missing = s.copy() s_missing.iloc[2:10] = np.nan labels = np.random.randint(0, 50, size=1000).astype(float) # series for data in [s, s_missing]: # print(data.head()) expected = data.groupby(labels).transform(targop) tm.assert_series_equal(expected, data.groupby(labels).transform(op, *args)) tm.assert_series_equal(expected, getattr(data.groupby(labels), op)(*args)) @pytest.mark.parametrize("op", ["cumprod", "cumsum"]) @pytest.mark.parametrize("skipna", [False, True]) @pytest.mark.parametrize( "input, exp", [ # When everything is NaN ({"key": ["b"] * 10, "value": np.nan}, Series([np.nan] * 10, name="value")), # When there is a single NaN ( {"key": ["b"] * 10 + ["a"] * 2, "value": [3] * 3 + [np.nan] + [3] * 8}, { ("cumprod", False): [3.0, 9.0, 27.0] + [np.nan] * 7 + [3.0, 9.0], ("cumprod", True): [ 3.0, 9.0, 27.0, np.nan, 81.0, 243.0, 729.0, 2187.0, 6561.0, 19683.0, 3.0, 9.0, ], ("cumsum", False): [3.0, 6.0, 9.0] + [np.nan] * 7 + [3.0, 6.0], ("cumsum", True): [ 3.0, 6.0, 9.0, np.nan, 12.0, 15.0, 18.0, 21.0, 24.0, 27.0, 3.0, 6.0, ], }, ), ], ) def test_groupby_cum_skipna(op, skipna, input, exp): df = DataFrame(input) result = df.groupby("key")["value"].transform(op, skipna=skipna) if isinstance(exp, dict): expected = exp[(op, skipna)] else: expected = exp expected = Series(expected, name="value") tm.assert_series_equal(expected, result) @pytest.mark.arm_slow @pytest.mark.parametrize( "op, args, targop", [ ("cumprod", (), lambda x: x.cumprod()), ("cumsum", (), lambda x: x.cumsum()), ("shift", (-1,), lambda x: x.shift(-1)), ("shift", (1,), lambda x: x.shift()), ], ) def test_cython_transform_frame(op, args, targop): s = Series(np.random.randn(1000)) s_missing = s.copy() s_missing.iloc[2:10] = np.nan labels = np.random.randint(0, 50, size=1000).astype(float) strings = list("qwertyuiopasdfghjklz") strings_missing = strings[:] strings_missing[5] = np.nan df = DataFrame( { "float": s, "float_missing": s_missing, "int": [1, 1, 1, 1, 2] * 200, "datetime": pd.date_range("1990-1-1", periods=1000), "timedelta": pd.timedelta_range(1, freq="s", periods=1000), "string": strings * 50, "string_missing": strings_missing * 50, }, columns=[ "float", "float_missing", "int", "datetime", "timedelta", "string", "string_missing", ], ) df["cat"] = df["string"].astype("category") df2 = df.copy() df2.index = pd.MultiIndex.from_product([range(100), range(10)]) # DataFrame - Single and MultiIndex, # group by values, index level, columns for df in [df, df2]: for gb_target in [ {"by": labels}, {"level": 0}, {"by": "string"}, ]: # {"by": 'string_missing'}]: # {"by": ['int','string']}]: gb = df.groupby(**gb_target) # allowlisted methods set the selection before applying # bit a of hack to make sure the cythonized shift # is equivalent to pre 0.17.1 behavior if op == "shift": gb._set_group_selection() if op != "shift" and "int" not in gb_target: # numeric apply fastpath promotes dtype so have # to apply separately and concat i = gb[["int"]].apply(targop) f = gb[["float", "float_missing"]].apply(targop) expected = pd.concat([f, i], axis=1) else: expected = gb.apply(targop) expected = expected.sort_index(axis=1) tm.assert_frame_equal(expected, gb.transform(op, *args).sort_index(axis=1)) tm.assert_frame_equal(expected, getattr(gb, op)(*args).sort_index(axis=1)) # individual columns for c in df: if c not in ["float", "int", "float_missing"] and op != "shift": msg = "No numeric types to aggregate" with pytest.raises(DataError, match=msg): gb[c].transform(op) with pytest.raises(DataError, match=msg): getattr(gb[c], op)() else: expected = gb[c].apply(targop) expected.name = c tm.assert_series_equal(expected, gb[c].transform(op, *args)) tm.assert_series_equal(expected, getattr(gb[c], op)(*args)) def test_transform_with_non_scalar_group(): # GH 10165 cols = pd.MultiIndex.from_tuples( [ ("syn", "A"), ("mis", "A"), ("non", "A"), ("syn", "C"), ("mis", "C"), ("non", "C"), ("syn", "T"), ("mis", "T"), ("non", "T"), ("syn", "G"), ("mis", "G"), ("non", "G"), ] ) df = DataFrame( np.random.randint(1, 10, (4, 12)), columns=cols, index=["A", "C", "G", "T"] ) msg = "transform must return a scalar value for each group.*" with pytest.raises(ValueError, match=msg): df.groupby(axis=1, level=1).transform(lambda z: z.div(z.sum(axis=1), axis=0)) @pytest.mark.parametrize( "cols,exp,comp_func", [ ("a", Series([1, 1, 1], name="a"), tm.assert_series_equal), ( ["a", "c"], DataFrame({"a": [1, 1, 1], "c": [1, 1, 1]}), tm.assert_frame_equal, ), ], ) @pytest.mark.parametrize("agg_func", ["count", "rank", "size"]) def test_transform_numeric_ret(cols, exp, comp_func, agg_func, request): if agg_func == "size" and isinstance(cols, list): # https://github.com/pytest-dev/pytest/issues/6300 # workaround to xfail fixture/param permutations reason = "'size' transformation not supported with NDFrameGroupy" request.node.add_marker(pytest.mark.xfail(reason=reason)) # GH 19200 df = DataFrame( {"a": pd.date_range("2018-01-01", periods=3), "b": range(3), "c": range(7, 10)} ) result = df.groupby("b")[cols].transform(agg_func) if agg_func == "rank": exp = exp.astype("float") comp_func(result, exp) def test_transform_ffill(): # GH 24211 data = [["a", 0.0], ["a", float("nan")], ["b", 1.0], ["b", float("nan")]] df = DataFrame(data, columns=["key", "values"]) result = df.groupby("key").transform("ffill") expected = DataFrame({"values": [0.0, 0.0, 1.0, 1.0]}) tm.assert_frame_equal(result, expected) result = df.groupby("key")["values"].transform("ffill") expected = Series([0.0, 0.0, 1.0, 1.0], name="values") tm.assert_series_equal(result, expected) @pytest.mark.parametrize("mix_groupings", [True, False]) @pytest.mark.parametrize("as_series", [True, False]) @pytest.mark.parametrize("val1,val2", [("foo", "bar"), (1, 2), (1.0, 2.0)]) @pytest.mark.parametrize( "fill_method,limit,exp_vals", [ ( "ffill", None, [np.nan, np.nan, "val1", "val1", "val1", "val2", "val2", "val2"], ), ("ffill", 1, [np.nan, np.nan, "val1", "val1", np.nan, "val2", "val2", np.nan]), ( "bfill", None, ["val1", "val1", "val1", "val2", "val2", "val2", np.nan, np.nan], ), ("bfill", 1, [np.nan, "val1", "val1", np.nan, "val2", "val2", np.nan, np.nan]), ], ) def test_group_fill_methods( mix_groupings, as_series, val1, val2, fill_method, limit, exp_vals ): vals = [np.nan, np.nan, val1, np.nan, np.nan, val2, np.nan, np.nan] _exp_vals = list(exp_vals) # Overwrite placeholder values for index, exp_val in enumerate(_exp_vals): if exp_val == "val1": _exp_vals[index] = val1 elif exp_val == "val2": _exp_vals[index] = val2 # Need to modify values and expectations depending on the # Series / DataFrame that we ultimately want to generate if mix_groupings: # ['a', 'b', 'a, 'b', ...] keys = ["a", "b"] * len(vals) def interweave(list_obj): temp = [] for x in list_obj: temp.extend([x, x]) return temp _exp_vals = interweave(_exp_vals) vals = interweave(vals) else: # ['a', 'a', 'a', ... 'b', 'b', 'b'] keys = ["a"] * len(vals) + ["b"] * len(vals) _exp_vals = _exp_vals * 2 vals = vals * 2 df = DataFrame({"key": keys, "val": vals}) if as_series: result = getattr(df.groupby("key")["val"], fill_method)(limit=limit) exp = Series(_exp_vals, name="val") tm.assert_series_equal(result, exp) else: result = getattr(df.groupby("key"), fill_method)(limit=limit) exp = DataFrame({"val": _exp_vals}) tm.assert_frame_equal(result, exp) @pytest.mark.parametrize("fill_method", ["ffill", "bfill"]) def test_pad_stable_sorting(fill_method): # GH 21207 x = [0] * 20 y = [np.nan] * 10 + [1] * 10 if fill_method == "bfill": y = y[::-1] df = DataFrame({"x": x, "y": y}) expected = df.drop("x", 1) result = getattr(df.groupby("x"), fill_method)() tm.assert_frame_equal(result, expected) @pytest.mark.parametrize("test_series", [True, False]) @pytest.mark.parametrize( "freq", [ None, pytest.param( "D", marks=pytest.mark.xfail( reason="GH#23918 before method uses freq in vectorized approach" ), ), ], ) @pytest.mark.parametrize("periods", [1, -1]) @pytest.mark.parametrize("fill_method", ["ffill", "bfill", None]) @pytest.mark.parametrize("limit", [None, 1]) def test_pct_change(test_series, freq, periods, fill_method, limit): # GH 21200, 21621, 30463 vals = [3, np.nan, np.nan, np.nan, 1, 2, 4, 10, np.nan, 4] keys = ["a", "b"] key_v = np.repeat(keys, len(vals)) df = DataFrame({"key": key_v, "vals": vals * 2}) df_g = df if fill_method is not None: df_g = getattr(df.groupby("key"), fill_method)(limit=limit) grp = df_g.groupby(df.key) expected = grp["vals"].obj / grp["vals"].shift(periods) - 1 if test_series: result = df.groupby("key")["vals"].pct_change( periods=periods, fill_method=fill_method, limit=limit, freq=freq ) tm.assert_series_equal(result, expected) else: result = df.groupby("key").pct_change( periods=periods, fill_method=fill_method, limit=limit, freq=freq ) tm.assert_frame_equal(result, expected.to_frame("vals")) @pytest.mark.parametrize( "func, expected_status", [ ("ffill", ["shrt", "shrt", "lng", np.nan, "shrt", "ntrl", "ntrl"]), ("bfill", ["shrt", "lng", "lng", "shrt", "shrt", "ntrl", np.nan]), ], ) def test_ffill_bfill_non_unique_multilevel(func, expected_status): # GH 19437 date = pd.to_datetime( [ "2018-01-01", "2018-01-01", "2018-01-01", "2018-01-01", "2018-01-02", "2018-01-01", "2018-01-02", ] ) symbol = ["MSFT", "MSFT", "MSFT", "AAPL", "AAPL", "TSLA", "TSLA"] status = ["shrt", np.nan, "lng", np.nan, "shrt", "ntrl", np.nan] df = DataFrame({"date": date, "symbol": symbol, "status": status}) df = df.set_index(["date", "symbol"]) result = getattr(df.groupby("symbol")["status"], func)() index = MultiIndex.from_tuples( tuples=list(zip(*[date, symbol])), names=["date", "symbol"] ) expected = Series(expected_status, index=index, name="status") tm.assert_series_equal(result, expected) @pytest.mark.parametrize("func", [np.any, np.all]) def test_any_all_np_func(func): # GH 20653 df = DataFrame( [["foo", True], [np.nan, True], ["foo", True]], columns=["key", "val"] ) exp = Series([True, np.nan, True], name="val") res = df.groupby("key")["val"].transform(func) tm.assert_series_equal(res, exp) def test_groupby_transform_rename(): # https://github.com/pandas-dev/pandas/issues/23461 def demean_rename(x): result = x - x.mean() if isinstance(x, pd.Series): return result result = result.rename(columns={c: "{c}_demeaned" for c in result.columns}) return result df = DataFrame({"group": list("ababa"), "value": [1, 1, 1, 2, 2]}) expected = DataFrame({"value": [-1.0 / 3, -0.5, -1.0 / 3, 0.5, 2.0 / 3]}) result = df.groupby("group").transform(demean_rename) tm.assert_frame_equal(result, expected) result_single = df.groupby("group").value.transform(demean_rename) tm.assert_series_equal(result_single, expected["value"]) @pytest.mark.parametrize("func", [min, max, np.min, np.max, "first", "last"]) def test_groupby_transform_timezone_column(func): # GH 24198 ts = pd.to_datetime("now", utc=True).tz_convert("Asia/Singapore") result = DataFrame({"end_time": [ts], "id": [1]}) result["max_end_time"] = result.groupby("id").end_time.transform(func) expected = DataFrame([[ts, 1, ts]], columns=["end_time", "id", "max_end_time"]) tm.assert_frame_equal(result, expected) @pytest.mark.parametrize( "func, values", [ ("idxmin", ["1/1/2011"] * 2 + ["1/3/2011"] * 7 + ["1/10/2011"]), ("idxmax", ["1/2/2011"] * 2 + ["1/9/2011"] * 7 + ["1/10/2011"]), ], ) def test_groupby_transform_with_datetimes(func, values): # GH 15306 dates = pd.date_range("1/1/2011", periods=10, freq="D") stocks = DataFrame({"price": np.arange(10.0)}, index=dates) stocks["week_id"] = dates.isocalendar().week result = stocks.groupby(stocks["week_id"])["price"].transform(func) expected = Series(data=pd.to_datetime(values), index=dates, name="price") tm.assert_series_equal(result, expected) @pytest.mark.parametrize("func", ["cumsum", "cumprod", "cummin", "cummax"]) def test_transform_absent_categories(func): # GH 16771 # cython transforms with more groups than rows x_vals = [1] x_cats = range(2) y = [1] df = DataFrame({"x": Categorical(x_vals, x_cats), "y": y}) result = getattr(df.y.groupby(df.x), func)() expected = df.y tm.assert_series_equal(result, expected) @pytest.mark.parametrize("func", ["ffill", "bfill", "shift"]) @pytest.mark.parametrize("key, val", [("level", 0), ("by", Series([0]))]) def test_ffill_not_in_axis(func, key, val): # GH 21521 df = DataFrame([[np.nan]]) result = getattr(df.groupby(**{key: val}), func)() expected = df tm.assert_frame_equal(result, expected) def test_transform_invalid_name_raises(): # GH#27486 df = DataFrame({"a": [0, 1, 1, 2]}) g = df.groupby(["a", "b", "b", "c"]) with pytest.raises(ValueError, match="not a valid function name"): g.transform("some_arbitrary_name") # method exists on the object, but is not a valid transformation/agg assert hasattr(g, "aggregate") # make sure the method exists with pytest.raises(ValueError, match="not a valid function name"): g.transform("aggregate") # Test SeriesGroupBy g = df["a"].groupby(["a", "b", "b", "c"]) with pytest.raises(ValueError, match="not a valid function name"): g.transform("some_arbitrary_name") @pytest.mark.parametrize( "obj", [ DataFrame( {"a": [0, 0, 0, 1, 1, 1], "b": range(6)}, index=["A", "B", "C", "D", "E", "F"], ), Series([0, 0, 0, 1, 1, 1], index=["A", "B", "C", "D", "E", "F"]), ], ) def test_transform_agg_by_name(request, reduction_func, obj): func = reduction_func g = obj.groupby(np.repeat([0, 1], 3)) if func == "ngroup": # GH#27468 request.node.add_marker( pytest.mark.xfail(reason="TODO: g.transform('ngroup') doesn't work") ) if func == "size" and obj.ndim == 2: # GH#27469 request.node.add_marker( pytest.mark.xfail(reason="TODO: g.transform('size') doesn't work") ) if func == "corrwith" and isinstance(obj, Series): # GH#32293 request.node.add_marker( pytest.mark.xfail(reason="TODO: implement SeriesGroupBy.corrwith") ) args = {"nth": [0], "quantile": [0.5], "corrwith": [obj]}.get(func, []) result = g.transform(func, *args) # this is the *definition* of a transformation tm.assert_index_equal(result.index, obj.index) if hasattr(obj, "columns"): tm.assert_index_equal(result.columns, obj.columns) # verify that values were broadcasted across each group assert len(set(DataFrame(result).iloc[-3:, -1])) == 1 def test_transform_lambda_with_datetimetz(): # GH 27496 df = DataFrame( { "time": [ Timestamp("2010-07-15 03:14:45"), Timestamp("2010-11-19 18:47:06"), ], "timezone": ["Etc/GMT+4", "US/Eastern"], } ) result = df.groupby(["timezone"])["time"].transform( lambda x: x.dt.tz_localize(x.name) ) expected = Series( [ Timestamp("2010-07-15 03:14:45", tz="Etc/GMT+4"), Timestamp("2010-11-19 18:47:06", tz="US/Eastern"), ], name="time", ) tm.assert_series_equal(result, expected) def test_transform_fastpath_raises(): # GH#29631 case where fastpath defined in groupby.generic _choose_path # raises, but slow_path does not df = DataFrame({"A": [1, 1, 2, 2], "B": [1, -1, 1, 2]}) gb = df.groupby("A") def func(grp): # we want a function such that func(frame) fails but func.apply(frame) # works if grp.ndim == 2: # Ensure that fast_path fails raise NotImplementedError("Don't cross the streams") return grp * 2 # Check that the fastpath raises, see _transform_general obj = gb._obj_with_exclusions gen = gb.grouper.get_iterator(obj, axis=gb.axis) fast_path, slow_path = gb._define_paths(func) _, group = next(gen) with pytest.raises(NotImplementedError, match="Don't cross the streams"): fast_path(group) result = gb.transform(func) expected = DataFrame([2, -2, 2, 4], columns=["B"]) tm.assert_frame_equal(result, expected) def test_transform_lambda_indexing(): # GH 7883 df = DataFrame( { "A": ["foo", "bar", "foo", "bar", "foo", "flux", "foo", "flux"], "B": ["one", "one", "two", "three", "two", "six", "five", "three"], "C": range(8), "D": range(8), "E": range(8), } ) df = df.set_index(["A", "B"]) df = df.sort_index() result = df.groupby(level="A").transform(lambda x: x.iloc[-1]) expected = DataFrame( { "C": [3, 3, 7, 7, 4, 4, 4, 4], "D": [3, 3, 7, 7, 4, 4, 4, 4], "E": [3, 3, 7, 7, 4, 4, 4, 4], }, index=MultiIndex.from_tuples( [ ("bar", "one"), ("bar", "three"), ("flux", "six"), ("flux", "three"), ("foo", "five"), ("foo", "one"), ("foo", "two"), ("foo", "two"), ], names=["A", "B"], ), ) tm.assert_frame_equal(result, expected) def test_categorical_and_not_categorical_key(observed): # Checks that groupby-transform, when grouping by both a categorical # and a non-categorical key, doesn't try to expand the output to include # non-observed categories but instead matches the input shape. # GH 32494 df_with_categorical = DataFrame( { "A": Categorical(["a", "b", "a"], categories=["a", "b", "c"]), "B": [1, 2, 3], "C": ["a", "b", "a"], } ) df_without_categorical = DataFrame( {"A": ["a", "b", "a"], "B": [1, 2, 3], "C": ["a", "b", "a"]} ) # DataFrame case result = df_with_categorical.groupby(["A", "C"], observed=observed).transform("sum") expected = df_without_categorical.groupby(["A", "C"]).transform("sum") tm.assert_frame_equal(result, expected) expected_explicit = DataFrame({"B": [4, 2, 4]}) tm.assert_frame_equal(result, expected_explicit) # Series case result = df_with_categorical.groupby(["A", "C"], observed=observed)["B"].transform( "sum" ) expected = df_without_categorical.groupby(["A", "C"])["B"].transform("sum") tm.assert_series_equal(result, expected) expected_explicit = Series([4, 2, 4], name="B") tm.assert_series_equal(result, expected_explicit)
the-stack_106_30827
#!/usr/bin/env python """REST Translation server.""" from __future__ import print_function import codecs import sys import os import time import json import threading import re import traceback import importlib import torch import onmt.opts from itertools import islice from copy import deepcopy from onmt.utils.logging import init_logger from onmt.utils.misc import set_random_seed from onmt.utils.misc import check_model_config from onmt.utils.alignment import to_word_align from onmt.utils.parse import ArgumentParser from onmt.translate.translator import build_translator def critical(func): """Decorator for critical section (mutually exclusive code)""" def wrapper(server_model, *args, **kwargs): if sys.version_info[0] == 3: if not server_model.running_lock.acquire(True, 120): raise ServerModelError("Model %d running lock timeout" % server_model.model_id) else: # semaphore doesn't have a timeout arg in Python 2.7 server_model.running_lock.acquire(True) try: o = func(server_model, *args, **kwargs) except (Exception, RuntimeError): server_model.running_lock.release() raise server_model.running_lock.release() return o return wrapper class Timer: def __init__(self, start=False): self.stime = -1 self.prev = -1 self.times = {} if start: self.start() def start(self): self.stime = time.time() self.prev = self.stime self.times = {} def tick(self, name=None, tot=False): t = time.time() if not tot: elapsed = t - self.prev else: elapsed = t - self.stime self.prev = t if name is not None: self.times[name] = elapsed return elapsed class ServerModelError(Exception): pass class CTranslate2Translator(object): """ This class wraps the ctranslate2.Translator object to reproduce the onmt.translate.translator API. """ def __init__(self, model_path, device, device_index, batch_size, beam_size, n_best, preload=False): import ctranslate2 self.translator = ctranslate2.Translator( model_path, device=device, device_index=device_index, inter_threads=1, intra_threads=1, compute_type="default") self.batch_size = batch_size self.beam_size = beam_size self.n_best = n_best if preload: # perform a first request to initialize everything dummy_translation = self.translate(["a"]) print("Performed a dummy translation to initialize the model", dummy_translation) time.sleep(1) self.translator.unload_model(to_cpu=True) def translate(self, texts_to_translate, batch_size=8): batch = [item.split(" ") for item in texts_to_translate] preds = self.translator.translate_batch( batch, max_batch_size=self.batch_size, beam_size=self.beam_size, num_hypotheses=self.n_best ) scores = [[item["score"] for item in ex] for ex in preds] predictions = [[" ".join(item["tokens"]) for item in ex] for ex in preds] return scores, predictions def to_cpu(self): self.translator.unload_model(to_cpu=True) def to_gpu(self): self.translator.load_model() class TranslationServer(object): def __init__(self): self.models = {} self.next_id = 0 def start(self, config_file): """Read the config file and pre-/load the models.""" self.config_file = config_file with open(self.config_file) as f: self.confs = json.load(f) self.models_root = self.confs.get('models_root', './available_models') for i, conf in enumerate(self.confs["models"]): if "models" not in conf: if "model" in conf: # backwards compatibility for confs conf["models"] = [conf["model"]] else: raise ValueError("""Incorrect config file: missing 'models' parameter for model #%d""" % i) check_model_config(conf, self.models_root) kwargs = {'timeout': conf.get('timeout', None), 'load': conf.get('load', None), 'preprocess_opt': conf.get('preprocess', None), 'tokenizer_opt': conf.get('tokenizer', None), 'postprocess_opt': conf.get('postprocess', None), 'on_timeout': conf.get('on_timeout', None), 'model_root': conf.get('model_root', self.models_root), 'ct2_model': conf.get('ct2_model', None) } kwargs = {k: v for (k, v) in kwargs.items() if v is not None} model_id = conf.get("id", None) opt = conf["opt"] opt["models"] = conf["models"] self.preload_model(opt, model_id=model_id, **kwargs) def clone_model(self, model_id, opt, timeout=-1): """Clone a model `model_id`. Different options may be passed. If `opt` is None, it will use the same set of options """ if model_id in self.models: if opt is None: opt = self.models[model_id].user_opt opt["models"] = self.models[model_id].opt.models return self.load_model(opt, timeout) else: raise ServerModelError("No such model '%s'" % str(model_id)) def load_model(self, opt, model_id=None, **model_kwargs): """Load a model given a set of options """ model_id = self.preload_model(opt, model_id=model_id, **model_kwargs) load_time = self.models[model_id].load_time return model_id, load_time def preload_model(self, opt, model_id=None, **model_kwargs): """Preloading the model: updating internal datastructure It will effectively load the model if `load` is set """ if model_id is not None: if model_id in self.models.keys(): raise ValueError("Model ID %d already exists" % model_id) else: model_id = self.next_id while model_id in self.models.keys(): model_id += 1 self.next_id = model_id + 1 print("Pre-loading model %d" % model_id) model = ServerModel(opt, model_id, **model_kwargs) self.models[model_id] = model return model_id def run(self, inputs): """Translate `inputs` We keep the same format as the Lua version i.e. ``[{"id": model_id, "src": "sequence to translate"},{ ...}]`` We use inputs[0]["id"] as the model id """ model_id = inputs[0].get("id", 0) if model_id in self.models and self.models[model_id] is not None: return self.models[model_id].run(inputs) else: print("Error No such model '%s'" % str(model_id)) raise ServerModelError("No such model '%s'" % str(model_id)) def unload_model(self, model_id): """Manually unload a model. It will free the memory and cancel the timer """ if model_id in self.models and self.models[model_id] is not None: self.models[model_id].unload() else: raise ServerModelError("No such model '%s'" % str(model_id)) def list_models(self): """Return the list of available models """ models = [] for _, model in self.models.items(): models += [model.to_dict()] return models class ServerModel(object): """Wrap a model with server functionality. Args: opt (dict): Options for the Translator model_id (int): Model ID preprocess_opt (list): Options for preprocess processus or None (extend for CJK) tokenizer_opt (dict): Options for the tokenizer or None postprocess_opt (list): Options for postprocess processus or None (extend for CJK) load (bool): whether to load the model during :func:`__init__()` timeout (int): Seconds before running :func:`do_timeout()` Negative values means no timeout on_timeout (str): Options are ["to_cpu", "unload"]. Set what to do on timeout (see :func:`do_timeout()`.) model_root (str): Path to the model directory it must contain the model and tokenizer file """ def __init__(self, opt, model_id, preprocess_opt=None, tokenizer_opt=None, postprocess_opt=None, load=False, timeout=-1, on_timeout="to_cpu", model_root="./", ct2_model=None): self.model_root = model_root self.opt = self.parse_opt(opt) self.model_id = model_id self.preprocess_opt = preprocess_opt self.tokenizer_opt = tokenizer_opt self.postprocess_opt = postprocess_opt self.timeout = timeout self.on_timeout = on_timeout self.ct2_model = os.path.join(model_root, ct2_model) \ if ct2_model is not None else None self.unload_timer = None self.user_opt = opt self.tokenizer = None if len(self.opt.log_file) > 0: log_file = os.path.join(model_root, self.opt.log_file) else: log_file = None self.logger = init_logger(log_file=log_file, log_file_level=self.opt.log_file_level, rotate=True) self.loading_lock = threading.Event() self.loading_lock.set() self.running_lock = threading.Semaphore(value=1) set_random_seed(self.opt.seed, self.opt.cuda) if self.preprocess_opt is not None: self.logger.info("Loading preprocessor") self.preprocessor = [] for function_path in self.preprocess_opt: function = get_function_by_path(function_path) self.preprocessor.append(function) if self.tokenizer_opt is not None: self.logger.info("Loading tokenizer") if "type" not in self.tokenizer_opt: raise ValueError( "Missing mandatory tokenizer option 'type'") if self.tokenizer_opt['type'] == 'sentencepiece': if "model" not in self.tokenizer_opt: raise ValueError( "Missing mandatory tokenizer option 'model'") import sentencepiece as spm sp = spm.SentencePieceProcessor() model_path = os.path.join(self.model_root, self.tokenizer_opt['model']) sp.Load(model_path) self.tokenizer = sp elif self.tokenizer_opt['type'] == 'pyonmttok': if "params" not in self.tokenizer_opt: raise ValueError( "Missing mandatory tokenizer option 'params'") import pyonmttok if self.tokenizer_opt["mode"] is not None: mode = self.tokenizer_opt["mode"] else: mode = None # load can be called multiple times: modify copy tokenizer_params = dict(self.tokenizer_opt["params"]) for key, value in self.tokenizer_opt["params"].items(): if key.endswith("path"): tokenizer_params[key] = os.path.join( self.model_root, value) tokenizer = pyonmttok.Tokenizer(mode, **tokenizer_params) self.tokenizer = tokenizer else: raise ValueError("Invalid value for tokenizer type") if self.postprocess_opt is not None: self.logger.info("Loading postprocessor") self.postprocessor = [] for function_path in self.postprocess_opt: function = get_function_by_path(function_path) self.postprocessor.append(function) if load: self.load(preload=True) self.stop_unload_timer() def parse_opt(self, opt): """Parse the option set passed by the user using `onmt.opts` Args: opt (dict): Options passed by the user Returns: opt (argparse.Namespace): full set of options for the Translator """ prec_argv = sys.argv sys.argv = sys.argv[:1] parser = ArgumentParser() onmt.opts.translate_opts(parser) models = opt['models'] if not isinstance(models, (list, tuple)): models = [models] opt['models'] = [os.path.join(self.model_root, model) for model in models] opt['src'] = "dummy_src" for (k, v) in opt.items(): if k == 'models': sys.argv += ['-model'] sys.argv += [str(model) for model in v] elif type(v) == bool: sys.argv += ['-%s' % k] else: sys.argv += ['-%s' % k, str(v)] opt = parser.parse_args() ArgumentParser.validate_translate_opts(opt) opt.cuda = opt.gpu > -1 sys.argv = prec_argv return opt @property def loaded(self): return hasattr(self, 'translator') def load(self, preload=False): self.loading_lock.clear() timer = Timer() self.logger.info("Loading model %d" % self.model_id) timer.start() try: if self.ct2_model is not None: self.translator = CTranslate2Translator( self.ct2_model, device="cuda", device_index=self.opt.gpu, batch_size=self.opt.batch_size, beam_size=self.opt.beam_size, n_best=self.opt.n_best, preload=preload) else: self.translator = build_translator( self.opt, report_score=False, out_file=codecs.open(os.devnull, "w", "utf-8")) except RuntimeError as e: raise ServerModelError("Runtime Error: %s" % str(e)) timer.tick("model_loading") self.load_time = timer.tick() self.reset_unload_timer() self.loading_lock.set() @critical def run(self, inputs): """Translate `inputs` using this model Args: inputs (List[dict[str, str]]): [{"src": "..."},{"src": ...}] Returns: result (list): translations times (dict): containing times """ self.stop_unload_timer() timer = Timer() timer.start() self.logger.info("Running translation using %d" % self.model_id) if not self.loading_lock.is_set(): self.logger.info( "Model #%d is being loaded by another thread, waiting" % self.model_id) if not self.loading_lock.wait(timeout=30): raise ServerModelError("Model %d loading timeout" % self.model_id) else: if not self.loaded: self.load() timer.tick(name="load") elif self.opt.cuda: self.to_gpu() timer.tick(name="to_gpu") texts = [] head_spaces = [] tail_spaces = [] sslength = [] all_preprocessed = [] for i, inp in enumerate(inputs): src = inp['src'] whitespaces_before, whitespaces_after = "", "" match_before = re.search(r'^\s+', src) match_after = re.search(r'\s+$', src) if match_before is not None: whitespaces_before = match_before.group(0) if match_after is not None: whitespaces_after = match_after.group(0) head_spaces.append(whitespaces_before) # every segment becomes a dict for flexibility purposes seg_dict = self.maybe_preprocess(src.strip()) all_preprocessed.append(seg_dict) for seg in seg_dict["seg"]: tok = self.maybe_tokenize(seg) texts.append(tok) sslength.append(len(tok.split())) tail_spaces.append(whitespaces_after) empty_indices = [i for i, x in enumerate(texts) if x == ""] texts_to_translate = [x for x in texts if x != ""] scores = [] predictions = [] if len(texts_to_translate) > 0: try: scores, predictions = self.translator.translate( texts_to_translate, batch_size=len(texts_to_translate) if self.opt.batch_size == 0 else self.opt.batch_size) except (RuntimeError, Exception) as e: err = "Error: %s" % str(e) self.logger.error(err) self.logger.error("repr(text_to_translate): " + repr(texts_to_translate)) self.logger.error("model: #%s" % self.model_id) self.logger.error("model opt: " + str(self.opt.__dict__)) self.logger.error(traceback.format_exc()) raise ServerModelError(err) timer.tick(name="translation") self.logger.info("""Using model #%d\t%d inputs \ttranslation time: %f""" % (self.model_id, len(texts), timer.times['translation'])) self.reset_unload_timer() # NOTE: translator returns lists of `n_best` list def flatten_list(_list): return sum(_list, []) tiled_texts = [t for t in texts_to_translate for _ in range(self.opt.n_best)] results = flatten_list(predictions) def maybe_item(x): return x.item() if type(x) is torch.Tensor else x scores = [maybe_item(score_tensor) for score_tensor in flatten_list(scores)] results = [self.maybe_detokenize_with_align(result, src) for result, src in zip(results, tiled_texts)] aligns = [align for _, align in results] # build back results with empty texts for i in empty_indices: j = i * self.opt.n_best results = (results[:j] + [("", None)] * self.opt.n_best + results[j:]) aligns = aligns[:j] + [None] * self.opt.n_best + aligns[j:] scores = scores[:j] + [0] * self.opt.n_best + scores[j:] rebuilt_segs, scores, aligns = self.rebuild_seg_packages( all_preprocessed, results, scores, aligns, self.opt.n_best) results = [self.maybe_postprocess(seg) for seg in rebuilt_segs] head_spaces = [h for h in head_spaces for i in range(self.opt.n_best)] tail_spaces = [h for h in tail_spaces for i in range(self.opt.n_best)] results = ["".join(items) for items in zip(head_spaces, results, tail_spaces)] self.logger.info("Translation Results: %d", len(results)) return results, scores, self.opt.n_best, timer.times, aligns def rebuild_seg_packages(self, all_preprocessed, results, scores, aligns, n_best): """ Rebuild proper segment packages based on initial n_seg. """ offset = 0 rebuilt_segs = [] avg_scores = [] merged_aligns = [] for i, seg_dict in enumerate(all_preprocessed): sub_results = results[n_best * offset: (offset + seg_dict["n_seg"]) * n_best] sub_scores = scores[n_best * offset: (offset + seg_dict["n_seg"]) * n_best] sub_aligns = aligns[n_best * offset: (offset + seg_dict["n_seg"]) * n_best] for j in range(n_best): _seg_dict = deepcopy(seg_dict) _sub_segs = list(list(zip(*sub_results))[0]) _seg_dict["seg"] = list(islice(_sub_segs, j, None, n_best)) rebuilt_segs.append(_seg_dict) sub_sub_scores = list(islice(sub_scores, j, None, n_best)) avg_scores.append(sum(sub_sub_scores)/_seg_dict["n_seg"]) sub_sub_aligns = list(islice(sub_aligns, j, None, n_best)) merged_aligns.append(sub_sub_aligns) offset += _seg_dict["n_seg"] return rebuilt_segs, avg_scores, merged_aligns def do_timeout(self): """Timeout function that frees GPU memory. Moves the model to CPU or unloads it; depending on attr`self.on_timemout` value """ if self.on_timeout == "unload": self.logger.info("Timeout: unloading model %d" % self.model_id) self.unload() if self.on_timeout == "to_cpu": self.logger.info("Timeout: sending model %d to CPU" % self.model_id) self.to_cpu() @critical def unload(self): self.logger.info("Unloading model %d" % self.model_id) del self.translator if self.opt.cuda: torch.cuda.empty_cache() self.stop_unload_timer() self.unload_timer = None def stop_unload_timer(self): if self.unload_timer is not None: self.unload_timer.cancel() def reset_unload_timer(self): if self.timeout < 0: return self.stop_unload_timer() self.unload_timer = threading.Timer(self.timeout, self.do_timeout) self.unload_timer.start() def to_dict(self): hide_opt = ["models", "src"] d = {"model_id": self.model_id, "opt": {k: self.user_opt[k] for k in self.user_opt.keys() if k not in hide_opt}, "models": self.user_opt["models"], "loaded": self.loaded, "timeout": self.timeout, } if self.tokenizer_opt is not None: d["tokenizer"] = self.tokenizer_opt return d @critical def to_cpu(self): """Move the model to CPU and clear CUDA cache.""" if type(self.translator) == CTranslate2Translator: self.translator.to_cpu() else: self.translator.model.cpu() if self.opt.cuda: torch.cuda.empty_cache() def to_gpu(self): """Move the model to GPU.""" if type(self.translator) == CTranslate2Translator: self.translator.to_gpu() else: torch.cuda.set_device(self.opt.gpu) self.translator.model.cuda() def maybe_preprocess(self, sequence): """Preprocess the sequence (or not) """ if type(sequence) is str: sequence = { "seg": [sequence], "n_seg": 1 } if self.preprocess_opt is not None: return self.preprocess(sequence) return sequence def preprocess(self, sequence): """Preprocess a single sequence. Args: sequence (str): The sequence to preprocess. Returns: sequence (str): The preprocessed sequence. """ if self.preprocessor is None: raise ValueError("No preprocessor loaded") for function in self.preprocessor: sequence = function(sequence) return sequence def maybe_tokenize(self, sequence): """Tokenize the sequence (or not). Same args/returns as `tokenize` """ if self.tokenizer_opt is not None: return self.tokenize(sequence) return sequence def tokenize(self, sequence): """Tokenize a single sequence. Args: sequence (str): The sequence to tokenize. Returns: tok (str): The tokenized sequence. """ if self.tokenizer is None: raise ValueError("No tokenizer loaded") if self.tokenizer_opt["type"] == "sentencepiece": tok = self.tokenizer.EncodeAsPieces(sequence) tok = " ".join(tok) elif self.tokenizer_opt["type"] == "pyonmttok": tok, _ = self.tokenizer.tokenize(sequence) tok = " ".join(tok) return tok @property def tokenizer_marker(self): marker = None tokenizer_type = self.tokenizer_opt.get('type', None) if tokenizer_type == "pyonmttok": params = self.tokenizer_opt.get('params', None) if params is not None: if params.get("joiner_annotate", None) is not None: marker = 'joiner' elif params.get("spacer_annotate", None) is not None: marker = 'spacer' elif tokenizer_type == "sentencepiece": marker = 'spacer' return marker def maybe_detokenize_with_align(self, sequence, src): """De-tokenize (or not) the sequence (with alignment). Args: sequence (str): The sequence to detokenize, possible with alignment seperate by ` ||| `. Returns: sequence (str): The detokenized sequence. align (str): The alignment correspand to detokenized src/tgt sorted or None if no alignment in output. """ align = None if self.opt.report_align: # output contain alignment sequence, align = sequence.split(' ||| ') if align != '': align = self.maybe_convert_align(src, sequence, align) sequence = self.maybe_detokenize(sequence) return (sequence, align) def maybe_detokenize(self, sequence): """De-tokenize the sequence (or not) Same args/returns as :func:`tokenize()` """ if self.tokenizer_opt is not None and ''.join(sequence.split()) != '': return self.detokenize(sequence) return sequence def detokenize(self, sequence): """Detokenize a single sequence Same args/returns as :func:`tokenize()` """ if self.tokenizer is None: raise ValueError("No tokenizer loaded") if self.tokenizer_opt["type"] == "sentencepiece": detok = self.tokenizer.DecodePieces(sequence.split()) elif self.tokenizer_opt["type"] == "pyonmttok": detok = self.tokenizer.detokenize(sequence.split()) return detok def maybe_convert_align(self, src, tgt, align): """Convert alignment to match detokenized src/tgt (or not). Args: src (str): The tokenized source sequence. tgt (str): The tokenized target sequence. align (str): The alignment correspand to src/tgt pair. Returns: align (str): The alignment correspand to detokenized src/tgt. """ if self.tokenizer_marker is not None and ''.join(tgt.split()) != '': return to_word_align(src, tgt, align, mode=self.tokenizer_marker) return align def maybe_postprocess(self, sequence): """Postprocess the sequence (or not) """ if self.postprocess_opt is not None: return self.postprocess(sequence) else: return sequence["seg"][0] def postprocess(self, sequence): """Preprocess a single sequence. Args: sequence (str): The sequence to process. Returns: sequence (str): The postprocessed sequence. """ if self.postprocessor is None: raise ValueError("No postprocessor loaded") for function in self.postprocessor: sequence = function(sequence) return sequence def get_function_by_path(path, args=[], kwargs={}): module_name = ".".join(path.split(".")[:-1]) function_name = path.split(".")[-1] try: module = importlib.import_module(module_name) except ValueError as e: print("Cannot import module '%s'" % module_name) raise e function = getattr(module, function_name) return function
the-stack_106_30830
# coding: utf-8 import re import six from huaweicloudsdkcore.utils.http_utils import sanitize_for_serialization class NovaServerFault: """ Attributes: openapi_types (dict): The key is attribute name and the value is attribute type. attribute_map (dict): The key is attribute name and the value is json key in definition. """ sensitive_list = [] openapi_types = { 'code': 'int', 'created': 'str', 'message': 'str', 'details': 'str' } attribute_map = { 'code': 'code', 'created': 'created', 'message': 'message', 'details': 'details' } def __init__(self, code=None, created=None, message=None, details=None): """NovaServerFault - a model defined in huaweicloud sdk""" self._code = None self._created = None self._message = None self._details = None self.discriminator = None if code is not None: self.code = code if created is not None: self.created = created if message is not None: self.message = message if details is not None: self.details = details @property def code(self): """Gets the code of this NovaServerFault. ้”™่ฏฏ็ ใ€‚ :return: The code of this NovaServerFault. :rtype: int """ return self._code @code.setter def code(self, code): """Sets the code of this NovaServerFault. ้”™่ฏฏ็ ใ€‚ :param code: The code of this NovaServerFault. :type: int """ self._code = code @property def created(self): """Gets the created of this NovaServerFault. ๅผ‚ๅธธๅ‡บ็Žฐ็š„ๆ—ถ้—ดใ€‚ :return: The created of this NovaServerFault. :rtype: str """ return self._created @created.setter def created(self, created): """Sets the created of this NovaServerFault. ๅผ‚ๅธธๅ‡บ็Žฐ็š„ๆ—ถ้—ดใ€‚ :param created: The created of this NovaServerFault. :type: str """ self._created = created @property def message(self): """Gets the message of this NovaServerFault. ๅผ‚ๅธธๆ่ฟฐไฟกๆฏใ€‚ :return: The message of this NovaServerFault. :rtype: str """ return self._message @message.setter def message(self, message): """Sets the message of this NovaServerFault. ๅผ‚ๅธธๆ่ฟฐไฟกๆฏใ€‚ :param message: The message of this NovaServerFault. :type: str """ self._message = message @property def details(self): """Gets the details of this NovaServerFault. ๅผ‚ๅธธ่ฏฆๆƒ…ไฟกๆฏใ€‚ :return: The details of this NovaServerFault. :rtype: str """ return self._details @details.setter def details(self, details): """Sets the details of this NovaServerFault. ๅผ‚ๅธธ่ฏฆๆƒ…ไฟกๆฏใ€‚ :param details: The details of this NovaServerFault. :type: str """ self._details = details def to_dict(self): """Returns the model properties as a dict""" result = {} for attr, _ in six.iteritems(self.openapi_types): value = getattr(self, attr) if isinstance(value, list): result[attr] = list(map( lambda x: x.to_dict() if hasattr(x, "to_dict") else x, value )) elif hasattr(value, "to_dict"): result[attr] = value.to_dict() elif isinstance(value, dict): result[attr] = dict(map( lambda item: (item[0], item[1].to_dict()) if hasattr(item[1], "to_dict") else item, value.items() )) else: if attr in self.sensitive_list: result[attr] = "****" else: result[attr] = value return result def to_str(self): """Returns the string representation of the model""" import simplejson as json if six.PY2: import sys reload(sys) sys.setdefaultencoding("utf-8") return json.dumps(sanitize_for_serialization(self), ensure_ascii=False) def __repr__(self): """For `print`""" return self.to_str() def __eq__(self, other): """Returns true if both objects are equal""" if not isinstance(other, NovaServerFault): return False return self.__dict__ == other.__dict__ def __ne__(self, other): """Returns true if both objects are not equal""" return not self == other
the-stack_106_30831
import six import colorama # import this name for other promptly modules # to use from colorama import Style as AnsiStyle colorama.init() class Style(object): reset_all = colorama.Style.RESET_ALL @classmethod def styles_for_key(cls, key, stylesheet): styles = {} context = stylesheet try: styles.update(context['selectors']['body']['value']) except KeyError: pass for part in key.split('.'): try: context = context['selectors'][part] styles.update(context['value']) except KeyError: break return cls(styles) def __init__(self, data): self.data = data @property def color(self): try: return getattr(colorama.Fore, self.data['color'].upper()) except (KeyError, AttributeError): return '' @property def background_color(self): try: return getattr(colorama.Back, self.data['background_color'].upper()) except (KeyError, AttributeError): return '' @property def font_weight(self): try: font_weight = self.data['font_weight'] except KeyError: return '' choices = {'normal': colorama.Style.NORMAL, 'bold': colorama.Style.BRIGHT, 'lighter': colorama.Style.DIM} return choices.get(font_weight, colorama.Style.NORMAL) def __call__(self, value): # Fore: BLACK, RED, GREEN, YELLOW, BLUE, MAGENTA, CYAN, WHITE, RESET. # Back: BLACK, RED, GREEN, YELLOW, BLUE, MAGENTA, CYAN, WHITE, RESET. # Style: DIM, NORMAL, BRIGHT, RESET_ALL return self.reset_all + self.color + \ self.background_color + \ self.font_weight + \ value + \ self.reset_all class CSSTokens(object): SELECTOR = 'SELECTOR' BODY_START = 'BODY_START' BODY_END = 'BODY_END' PROPERTY = 'PROPERTY' PROPERTY_VALUE = 'PROPERTY_VALUE' class CSSParser(object): @staticmethod def parse_string(value): parser = CSSParser() return parser.parse(value) def parse(self, value): if six.PY3: value = value.decode('utf-8') sheet = {'selectors':{}} context = sheet property_name = None for token, value in self.tokenize(value): if token == CSSTokens.SELECTOR: data = self.format(value) if data: context = context.setdefault('selectors', {}) context = context.setdefault(data, {'value': {}}) elif token == CSSTokens.BODY_START: data = self.format(value) if data: context = context.setdefault('selectors', {}) context = context.setdefault(data, {'value': {}}) elif token == CSSTokens.PROPERTY: property_name = self.format(value) elif token == CSSTokens.PROPERTY_VALUE: context['value'][property_name] = self.format(value) property_name = None elif token == CSSTokens.BODY_END: data = self.format(value) if data: context['value'][property_name] = data context = sheet return sheet def format(self, value): return value \ .strip() \ .replace('-', '_') \ .lower() def tokenize(self, value): buf = '' for char in value: if char == '.': yield CSSTokens.SELECTOR, buf buf = '' elif char == '{': yield CSSTokens.BODY_START, buf buf = '' elif char == '}': yield CSSTokens.BODY_END, buf buf = '' elif char == ':': yield CSSTokens.PROPERTY, buf buf = '' elif char == ';': yield CSSTokens.PROPERTY_VALUE, buf buf = '' else: buf = buf + str(char)
the-stack_106_30833
#Escreva um rpograma que converta segundos em horas , minutos e segundos #Exemplo de execuรงรฃo: input: 3850 # output: 1h 4min 10s time = int(input('Digite o valor do tempo em segundos: ')) hours = time // 3600 seconds_left = time - (hours*3600) minutes = seconds_left // 60 seconds_left -= (minutes*60) print(f'Valor convertido: {hours} h {minutes} min {seconds_left} s')
the-stack_106_30834
# -*- coding: utf-8 -*- from copy import copy from json_writer import JsonWriter import util # ๅฝ“ๅ‰Writer็š„ๅŠŸ่ƒฝๆ˜ฏ็”Ÿๆˆjavaไธ“็”จ็š„jsonๆ ผๅผ๏ผŒ่€Œไธๆ˜ฏjavaไปฃ็  # jsonๆ ผๅผ๏ผš # ๆ•ดไฝ“ๆ˜ฏไธ€ไธชๅญ—ๅ…ธ๏ผŒๅŒ…ๅซไธคไธชๅ…ƒ็ด ๏ผŒheaderๅ’Œbody # headerๆœ‰ไธค่กŒ๏ผš # ็ฌฌไธ€่กŒๆ˜ฏ่กจๅคด # ็ฌฌไบŒ่กŒๆ˜ฏๅˆ—ๅ # bodyๆ˜ฏไธ€ไธชไบŒ็ปดๆ•ฐ็ป„๏ผš # ๅฏนๅบ”ไบ†excel็š„ๅ„ไธชๅ•ๅ…ƒ class JavaWriter(JsonWriter): def begin_write(self): super(JavaWriter, self).begin_write() module_info = self.data_module.info parser_name = module_info["parser"].split('.')[-1] class_name = util.to_class_name(parser_name) self.write_value("class", class_name) self.is_multi_key = module_info["multi_key"] self.write_value("multiKey", self.is_multi_key) sheet_types = module_info["sheet_types"]["main_sheet"] fields = sheet_types.keys() fields.sort() self.fields = fields texts = [sheet_types[field][2] for field in fields] headers = [texts, fields, ] self.write_value("header", headers, 2) def write_sheet(self, name, sheet): if name != "main_sheet": return key_field = self.fields[0] body = [] keys = sheet.keys() keys.sort() for k in keys: row = sheet[k] new_row = None if isinstance(row, list): new_row = [] for sub_row in row: r = copy(sub_row) r[key_field] = k new_row.append(r) else: new_row = copy(row) new_row[key_field] = k body.append(new_row) self.write_value("body", body, 3 if self.is_multi_key else 2) def write_module(self, module): pass
the-stack_106_30836
import os # Includes the parent directory into sys.path, to make imports work import os.path, sys sys.path.append( os.path.join( os.path.dirname( os.path.realpath(__file__) ), os.pardir ) ) from constants import ( FILENAMES_OF_QUERIES, PLSH_INDEX, NLSH_INDEX, QUERIES_PATH ) from loader import _read_dataset_names def build_commands(): commands = [] queries_list = _read_dataset_names( path=FILENAMES_OF_QUERIES, audio_path=QUERIES_PATH ) for index_type in [NLSH_INDEX, PLSH_INDEX]: for query_name in queries_list[:100]: cmd = " ".join([ f"python main.py search -i {index_type} -f {query_name}", "--num_permutations 1000" ]) commands.append(cmd) return commands def execute_commands(): commands = build_commands() for c in commands: os.system(c) if __name__ == "__main__": execute_commands()
the-stack_106_30837
import os.path __all__ = [ "__title__", "__summary__", "__uri__", "__version__", "__commit__", "__author__", "__email__", "__license__", "__copyright__", ] try: base_dir = os.path.dirname(os.path.abspath(__file__)) except NameError: base_dir = None __title__ = "ipfreely" __summary__ = "ipfreely emails you when your dynamic IP address changes" __uri__ = "https://github.com/NickleDave/ipfreely" __version__ = "0.1.0" if base_dir is not None and os.path.exists(os.path.join(base_dir, ".commit")): with open(os.path.join(base_dir, ".commit")) as fp: __commit__ = fp.read().strip() else: __commit__ = None __author__ = "David Nicholson" __email__ = "[email protected]" __license__ = "BSD" __copyright__ = "2020 %s" % __author__
the-stack_106_30839
# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from trt_layer_auto_scan_test import TrtLayerAutoScanTest, SkipReasons from program_config import TensorConfig, ProgramConfig import numpy as np import paddle.inference as paddle_infer from functools import partial from typing import Optional, List, Callable, Dict, Any, Set import unittest class TrtConvertStridedSliceTest(TrtLayerAutoScanTest): def is_program_valid(self, program_config: ProgramConfig) -> bool: inputs = program_config.inputs weights = program_config.weights attrs = [ program_config.ops[i].attrs for i in range(len(program_config.ops)) ] return True def sample_program_configs(self): def generate_input1(attrs: List[Dict[str, Any]]): return np.ones([1, 56, 56, 192]).astype(np.float32) for axes in [[1, 2]]: for starts in [[1, 1]]: for ends in [[10000000, 10000000]]: for decrease_axis in [[]]: for infer_flags in [[1, 1]]: for strides in [[2, 2]]: dics = [{ "axes": axes, "starts": starts, "ends": ends, "decrease_axis": decrease_axis, "infer_flags": infer_flags, "strides": strides }] ops_config = [{ "op_type": "strided_slice", "op_inputs": { "Input": ["input_data"] }, "op_outputs": { "Out": ["slice_output_data"] }, "op_attrs": dics[0] }] ops = self.generate_op_config(ops_config) program_config = ProgramConfig( ops=ops, weights={}, inputs={ "input_data": TensorConfig( data_gen=partial(generate_input1, dics)) }, outputs=["slice_output_data"]) yield program_config def sample_predictor_configs( self, program_config) -> (paddle_infer.Config, List[int], float): def generate_dynamic_shape(attrs): self.dynamic_shape.min_input_shape = { "input_data": [1, 56, 56, 192] } self.dynamic_shape.max_input_shape = { "input_data": [8, 56, 56, 192] } self.dynamic_shape.opt_input_shape = { "input_data": [4, 56, 56, 192] } def clear_dynamic_shape(): self.dynamic_shape.min_input_shape = {} self.dynamic_shape.max_input_shape = {} self.dynamic_shape.opt_input_shape = {} def generate_trt_nodes_num(attrs, dynamic_shape): inputs = program_config.inputs if dynamic_shape: for i in range(len(attrs[0]["starts"])): if attrs[0]["starts"][i] < 0 or attrs[0]["ends"][i] < 0: return 0, 3 if not dynamic_shape: for x in attrs[0]["axes"]: if x == 0: return 0, 3 return 1, 2 attrs = [ program_config.ops[i].attrs for i in range(len(program_config.ops)) ] # for dynamic_shape generate_dynamic_shape(attrs) self.trt_param.precision = paddle_infer.PrecisionType.Float32 yield self.create_inference_config(), generate_trt_nodes_num(attrs, True), 1e-5 def test(self): self.run_test()
the-stack_106_30840
"""distutils.command.install_headers Implements the Distutils 'install_headers' command, to install C/C++ header files to the Python include directory.""" # created 2000/05/26, Greg Ward __revision__ = "$Id: install_headers.py,v 1.7 2000/09/30 17:34:50 gward Exp $" import os from distutils.core import Command class install_headers (Command): description = "install C/C++ header files" user_options = [('install-dir=', 'd', "directory to install header files to"), ('force', 'f', "force installation (overwrite existing files)"), ] boolean_options = ['force'] def initialize_options (self): self.install_dir = None self.force = 0 self.outfiles = [] def finalize_options (self): self.set_undefined_options('install', ('install_headers', 'install_dir'), ('force', 'force')) def run (self): headers = self.distribution.headers if not headers: return self.mkpath(self.install_dir) for header in headers: (out, _) = self.copy_file(header, self.install_dir) self.outfiles.append(out) def get_inputs (self): return self.distribution.headers or [] def get_outputs (self): return self.outfiles # class install_headers
the-stack_106_30841
import dash, flask, os import dash_core_components as dcc import dash_html_components as html import plotly.graph_objs as go import pandas as pd import pickle server = flask.Flask(__name__) server.secret_key = os.environ.get('secret_key', 'secret') app = dash.Dash(name = __name__, server = server) app.config.supress_callback_exceptions = True #relies on pickle file here, might want to change to JSON dic = pickle.load( open( './data/WRF_extract_GFDL_1970-2100_multiloc_dod.p', "rb" ) ) #the truth has been sent by email to Nancy and contains only Greely df_greely_historical = pd.read_csv('./data/truth.csv',index_col=0) df_greely_historical.index = pd.to_datetime( df_greely_historical.index ) app.css.append_css({'external_url': 'https://cdn.rawgit.com/plotly/dash-app-stylesheets/2d266c578d2a6e8850ebce48fdb52759b2aef506/stylesheet-oil-and-gas.css'}) app.layout = html.Div([ html.Div( [ html.H1( 'WRF Temperature Exploration for DOD project', className='eight columns', ), html.Img( src="https://uaf-snap.org/wp-content/uploads/2020/06/SNAP_color_all.svg", className='one columns', style={ 'height': '80', 'width': '225', 'float': 'right', 'position': 'relative', }, ), ], className='row' ), html.Div([ html.Div([ dcc.Dropdown( id='nb_days', options=[{'label': 'Consecutive days : {}'.format(i), 'value': i} for i in range(1,10)], #consecutive days selection value=2 ), ],className='three columns'), html.Div([ dcc.Dropdown( id='temperature', options=[{'label': 'Temperature below : {} celsius'.format(i), 'value': i} for i in range(0,-40, -5)], # temperature threshold selection value=0 ) ],className='three columns'), html.Div([ dcc.Dropdown( id='location', options=[{'label': 'Location: {}'.format(i), 'value': i} for i in dic.keys() ], #location extracted from pickle file value='Greely' ) ],className='three columns'), ]), html.Div([ dcc.Graph(id='indicator-graphic'), ],className='eleven columns') ],className='ten columns offset-by-one') @app.callback( dash.dependencies.Output('indicator-graphic', 'figure'), [dash.dependencies.Input('nb_days', 'value'), dash.dependencies.Input('temperature', 'value'), dash.dependencies.Input('location', 'value')]) def update_graph(nb_days, temperature, location): def rolling_count_serie(serie , temperature , nb_days): '''This function is a non rolling window method, value 1 for number of days obviously doesn't work but it is okay for this purpose. Non rolling window was request by the group.''' ct = 0 ls = [] for i in serie : if i <= temperature and ct < nb_days : ct +=1 elif ct == nb_days : ct = 1 else : ct = 0 ls.append(ct) return ls #Dealing with the actual WRF outputs df = dic[ location ].copy() df.index = pd.to_datetime( df.index ) df['count'] = rolling_count_serie( df['max'], temperature , int( nb_days )) dff = df[ df['count'] == int(nb_days) ] dff = dff.groupby( dff.index.year ).count() #Dealing with historical CSV file df_greely_historical['count'] = rolling_count_serie( df_greely_historical[ 'max' ], temperature , int( nb_days )) df_hist = df_greely_historical[ df_greely_historical[ 'count' ] == int( nb_days ) ] df_hist = df_hist.groupby( df_hist.index.year ).count() df_hist = df_hist.loc[1970:] #we just have historical data for Greely so we only display if Greely is selected if location == 'Greely' : return { 'data': [go.Bar( x = dff.index, y = dff['count'], name = 'WRF modeled' ), go.Scatter( x = dff.index, y = df_hist['count'], mode = 'markers', name = 'Greely historical' )], 'layout': go.Layout( xaxis=dict( title = 'Years', range = [1969,2101], #there was some axes issues so hard coded ), yaxis={ 'title': 'Number of occurences', }, margin={'l': 40, 'b': 40, 't': 10, 'r': 0}, hovermode='closest' ) } else : #without Greely just display bars showing amount of days return { 'data': [go.Bar( x=dff.index, y=dff['count'], name = 'WRF modeled' )], 'layout': go.Layout( xaxis={ 'title': 'Years', 'autorange':True, }, yaxis={ 'title': 'Number of occurences', }, margin={'l': 40, 'b': 40, 't': 10, 'r': 0}, hovermode='closest' ) } if __name__ == '__main__': app.server.run()
the-stack_106_30844
import translator_bot import os """ This script is invoked from translate-strings.sh, which is intended to be run as part of a Github Action. If you want to run the tool on the command line, use translator_bot.py instead. """ def show_required_field_error(required_field: str): print('%s is a required field.' % required_field) print('Please confirm your Workflow YML is set up as described at https://github.com/LeiaInc/TranslatorBot#usage.') if __name__ == "__main__": api_key = os.getenv('INPUT_TRANSLATIONKEY') if not api_key: show_required_field_error('translationKey') exit() output_languages = os.getenv('INPUT_OUTPUTLANGUAGES') if not output_languages: show_required_field_error('outputLanguages') exit() res_directories = os.getenv('INPUT_RESDIRECTORIES') if not res_directories: show_required_field_error('resDirectories') exit() output_language_list = output_languages.split(',') for res_dir in res_directories.split(','): translator_bot.translate_res_dir(api_key, output_language_list, res_dir)
the-stack_106_30846
# vim: tabstop=4 shiftwidth=4 softtabstop=4 # Copyright (c) 2013 The Johns Hopkins University/Applied Physics Laboratory # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ A mock implementation of a key manager. This module should NOT be used for anything but integration testing. """ import array import uuid from nova import exception from nova.keymgr import key from nova.keymgr import key_mgr from nova.openstack.common import log as logging from nova import utils LOG = logging.getLogger(__name__) class MockKeyManager(key_mgr.KeyManager): """ This mock key manager implementation supports all the methods specified by the key manager interface. This implementation creates a single key in response to all invocations of create_key. Side effects (e.g., raising exceptions) for each method are handled as specified by the key manager interface. This class should NOT be used for anything but integration testing because the same key is created for all invocations of create_key and keys are not stored persistently. """ def __init__(self): self.keys = {} def create_key(self, ctxt, **kwargs): """Creates a key. This implementation returns a UUID for the created key. A NotAuthorized exception is raised if the specified context is None. """ if ctxt is None: raise exception.NotAuthorized() # generate the key key_length = kwargs.get('key_length', 256) # hex digit => 4 bits hex_string = utils.generate_password(length=key_length / 4, symbolgroups='0123456789ABCDEF') _bytes = array.array('B', hex_string.decode('hex')).tolist() _key = key.SymmetricKey('AES', _bytes) return self.store_key(ctxt, _key) def store_key(self, ctxt, key, **kwargs): """Stores (i.e., registers) a key with the key manager. This implementation does nothing -- i.e., the specified key is discarded. """ if ctxt is None: raise exception.NotAuthorized() # generate UUID and ensure that it isn't in use key_id = uuid.uuid4() while key_id in self.keys: key_id = uuid.uuid4() self.keys[key_id] = key return key_id def get_key(self, ctxt, key_id, **kwargs): """Retrieves the key identified by the specified id. This implementation returns a fixed key that is associated with the UUID returned by the create_key method. A NotAuthorized exception is raised if the specified context is None; a KeyError is raised if the UUID is invalid. """ if ctxt is None: raise exception.NotAuthorized() return self.keys[key_id] def delete_key(self, ctxt, key_id, **kwargs): """Deletes the key identified by the specified id. This implementation intentionally does nothing except raise a NotAuthorized exception is the context is None or a KeyError if the UUID is invalid. """ if ctxt is None: raise exception.NotAuthorized() del self.keys[key_id]
the-stack_106_30847
# Copyright 2020 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import os from uuid import uuid4 import create_batch_prediction_job_tabular_forecasting_sample import pytest import helpers PROJECT_ID = os.getenv("BUILD_SPECIFIC_GCLOUD_PROJECT") LOCATION = "us-central1" MODEL_ID = "8531330622239539200" # Permanent restaurant rating model DISPLAY_NAME = f"temp_create_batch_prediction_tabular_forecasting_test_{uuid4()}" GCS_SOURCE_URI = "gs://cloud-samples-data/ai-platform/covid/bigquery-public-covid-nyt-us-counties-train.csv" GCS_OUTPUT_URI = "gs://ucaip-samples-test-output/" PREDICTIONS_FORMAT = "csv" @pytest.fixture(scope="function", autouse=True) def teardown(teardown_batch_prediction_job): yield @pytest.mark.skip(reason="https://github.com/googleapis/java-aiplatform/issues/420") # Creating AutoML Tabular Forecasting Classification batch prediction job def test_create_batch_prediction_job_tabular_forecasting_sample(capsys, shared_state): model_name = f"projects/{PROJECT_ID}/locations/{LOCATION}/models/{MODEL_ID}" create_batch_prediction_job_tabular_forecasting_sample.create_batch_prediction_job_tabular_forecasting_sample( project=PROJECT_ID, display_name=DISPLAY_NAME, model_name=model_name, gcs_source_uri=GCS_SOURCE_URI, gcs_destination_output_uri_prefix=GCS_OUTPUT_URI, predictions_format=PREDICTIONS_FORMAT, ) out, _ = capsys.readouterr() # Save resource name of the newly created batch prediction job shared_state["batch_prediction_job_name"] = helpers.get_name(out)
the-stack_106_30849
# qubit number=3 # total number=11 import numpy as np from qiskit import QuantumCircuit, execute, Aer, QuantumRegister, ClassicalRegister, transpile, BasicAer, IBMQ import networkx as nx from qiskit.visualization import plot_histogram from typing import * from pprint import pprint from math import log2 from collections import Counter from qiskit.test.mock import FakeVigo, FakeYorktown kernel = 'circuit/bernstein' def make_circuit(n:int) -> QuantumCircuit: # circuit begin input_qubit = QuantumRegister(n,"qc") prog = QuantumCircuit(input_qubit) prog.h(input_qubit[0]) # number=1 prog.h(input_qubit[1]) # number=2 prog.rx(2.9845130209103035,input_qubit[2]) # number=7 prog.h(input_qubit[2]) # number=3 prog.x(input_qubit[2]) # number=6 prog.rx(1.6807520696705391,input_qubit[3]) # number=8 prog.h(input_qubit[3]) # number=4 prog.y(input_qubit[3]) # number=5 for edge in E: k = edge[0] l = edge[1] prog.cp(-2 * gamma, input_qubit[k-1], input_qubit[l-1]) prog.p(gamma, k) prog.p(gamma, l) prog.rx(2 * beta, range(len(V))) prog.x(input_qubit[1]) # number=9 prog.x(input_qubit[1]) # number=10 # circuit end return prog if __name__ == '__main__': n = 4 V = np.arange(0, n, 1) E = [(0, 1, 1.0), (0, 2, 1.0), (1, 2, 1.0), (3, 2, 1.0), (3, 1, 1.0)] G = nx.Graph() G.add_nodes_from(V) G.add_weighted_edges_from(E) step_size = 0.1 a_gamma = np.arange(0, np.pi, step_size) a_beta = np.arange(0, np.pi, step_size) a_gamma, a_beta = np.meshgrid(a_gamma, a_beta) F1 = 3 - (np.sin(2 * a_beta) ** 2 * np.sin(2 * a_gamma) ** 2 - 0.5 * np.sin(4 * a_beta) * np.sin(4 * a_gamma)) * ( 1 + np.cos(4 * a_gamma) ** 2) result = np.where(F1 == np.amax(F1)) a = list(zip(result[0], result[1]))[0] gamma = a[0] * step_size beta = a[1] * step_size prog = make_circuit(4) sample_shot =3962 writefile = open("../data/startQiskit_Class238.csv", "w") # prog.draw('mpl', filename=(kernel + '.png')) backend = BasicAer.get_backend('statevector_simulator') circuit1 = transpile(prog, FakeYorktown()) prog = circuit1 info = execute(prog,backend=backend, shots=sample_shot).result().get_counts() print(info, file=writefile) print("results end", file=writefile) print(circuit1.depth(), file=writefile) print(circuit1, file=writefile) writefile.close()
the-stack_106_30857
import os from os.path import join import tensorflow as tf from tensorflow.python.framework import ops current_dir = os.path.dirname(__file__) build_dir = join(current_dir, '../build') deform_conv2d_op_exe = tf.load_op_library(join(build_dir, 'deform_conv_op.so')) deform_conv2d_grad_op_exe = tf.load_op_library(join(build_dir, 'deform_conv_grad_op.so')) def deform_conv2d(x, filter, offset, strides, rates, padding, deform_group, data_format): ''' :param x: Input tensor, a 4-d tf_tensor in "NCHW" order; :param filter: Convolution kernel, a 4-d tf_tensor, the order shoule be '[output_channel, input_channel, kernel_height, kernel_width]'; :param offset: A 4-d tf_tensor, offsets of the convolution sampling location, the order should be '[batch_size, offset_dim(deform_group * kernel_height * kernel_width * 2), output_height, output_width]'; :param strides: stride, a list of length 4 in "NCHW" order; :param rates: rate (dilation), a list of length 4 in "NCHW" order; :param padding: padding method, string. Can either be "SAME" or "VALID"; :param deform_group: a non-zero int, indicating how many different groups of offsets are applied, Default=1; :param data_format: string, only "NCHW" is supported for now. :return: Output tensor, a 4-d tf_tensor in "NCHW" order. ''' return deform_conv2d_op_exe.deform_conv2d_op(x=x, filter=filter, offset=offset, strides=strides, rates=rates, padding=padding, deform_group=deform_group, data_format=data_format) @ops.RegisterGradient("DeformConv2dOp") def _deform_conv2d_grad_op(op, grad): input_tensor = op.inputs[0] filter_tensor = op.inputs[1] offset_tensor = op.inputs[2] strides = op.get_attr('strides') rates = op.get_attr('rates') padding = op.get_attr('padding') deform_group = op.get_attr('deform_group') data_format = op.get_attr('data_format') data_grad = deform_conv2d_grad_op_exe.deform_conv2d_grad_op(x=input_tensor, filter=filter_tensor, offset=offset_tensor, out_grad=grad, strides=strides, rates=rates, padding=padding, deform_group=deform_group, data_format=data_format) return data_grad
the-stack_106_30858
import numpy as np class ExperienceBuffer(): #Store the data from the episodes def __init__(self): self.num_episodes = 0 self.num_experiences = 0 self.states_buffer = [] self.actions_buffer = [] self.rewards_buffer = [] self.safety_costs_buffer = [] self.next_states_buffer = [] self.discounted_future_rewards_buffer = [] self.discounted_future_safety_costs_buffer = [] self.advantages_buffer = [] self.safety_advantages_buffer = [] self.total_episode_reward = [] self.total_episode_cost = [] self.episode_length = 0 self.episode_states_buffer = [] self.episode_actions_buffer = [] self.episode_rewards_buffer = [] self.episode_safety_costs_buffer = [] self.episode_next_states_buffer = [] self.episode_discounted_future_rewards_buffer = [] self.episode_discounted_future_safety_costs_buffer = [] def get_experiences(self): return np.vstack(self.states_buffer), np.asarray(self.actions_buffer), np.asarray(self.rewards_buffer), np.asarray(self.safety_costs_buffer), np.vstack(self.next_states_buffer), np.asarray(self.discounted_future_rewards_buffer), np.asarray(self.discounted_future_safety_costs_buffer) def get_episode_experiences(self): return np.vstack(self.episode_states_buffer), np.asarray(self.episode_actions_buffer), np.asarray(self.episode_rewards_buffer), np.asarray(self.episode_safety_costs_buffer), np.vstack(self.episode_next_states_buffer), np.asarray(self.episode_discounted_future_rewards_buffer), np.asarray(self.episode_discounted_future_safety_costs_buffer) def get_advantages(self): return np.asarray(self.advantages_buffer), np.asarray(self.safety_advantages_buffer) def get_number_of_experiences(self): return self.num_experiences def get_number_of_episodes(self): return self.num_episodes def get_discounted_future_returns(self, returns, discount_factor): discounted_future_returns = [0]*len(returns) r = 0 for t in reversed(range(len(returns))): r = returns[t] + discount_factor * r discounted_future_returns[t] = r return discounted_future_returns def add_experience(self, state, action, reward, safety_cost, next_state): self.episode_states_buffer.append(state) self.episode_actions_buffer.append(action) self.episode_rewards_buffer.append(reward) self.episode_safety_costs_buffer.append(safety_cost) self.episode_next_states_buffer.append(next_state) self.episode_length += 1 def add_advantages(self, advantages, safety_advantages): self.advantages_buffer += list(advantages) self.safety_advantages_buffer += list(safety_advantages) def clear_episode_buffer(self): self.episode_length = 0 self.episode_states_buffer = [] self.episode_actions_buffer = [] self.episode_rewards_buffer = [] self.episode_safety_costs_buffer = [] self.episode_next_states_buffer = [] self.episode_discounted_future_rewards_buffer = [] self.episode_discounted_future_safety_costs_buffer = [] def add_episode(self, reward_discount_factor, safety_discount_factor): self.episode_discounted_future_rewards_buffer = self.get_discounted_future_returns(self.episode_rewards_buffer, reward_discount_factor) self.episode_discounted_future_safety_costs_buffer = self.get_discounted_future_returns(self.episode_safety_costs_buffer, safety_discount_factor) self.states_buffer += self.episode_states_buffer self.actions_buffer += self.episode_actions_buffer self.rewards_buffer += self.episode_rewards_buffer self.safety_costs_buffer += self.episode_safety_costs_buffer self.next_states_buffer += self.episode_next_states_buffer self.discounted_future_rewards_buffer += self.episode_discounted_future_rewards_buffer self.discounted_future_safety_costs_buffer += self.episode_discounted_future_safety_costs_buffer self.num_episodes += 1 self.num_experiences += self.episode_length def clear_buffer(self): self.num_episodes = 0 self.num_experiences = 0 self.states_buffer = [] self.actions_buffer = [] self.rewards_buffer = [] self.safety_costs_buffer = [] self.next_states_buffer = [] self.discounted_future_rewards_buffer = [] self.discounted_future_safety_costs_buffer = [] self.advantages_buffer = [] self.safety_advantages_buffer = []
the-stack_106_30859
from datetime import datetime, timedelta from django.test import TestCase from casexml.apps.case.tests import check_xml_line_by_line from corehq.apps.mobile_auth.utils import new_key_record, get_mobile_auth_payload from dimagi.ext.jsonobject import HISTORICAL_DATETIME_FORMAT class MobileAuthTest(TestCase): @staticmethod def format_datetime_no_usec(dt): # phone handler can't deal with microseconds return dt.strftime(HISTORICAL_DATETIME_FORMAT) def test_xml(self): now = datetime.utcnow() domain = 'test' now_plus_30 = now + timedelta(days=30) now_minus_30 = now - timedelta(days=30) record = new_key_record(None, None, now=now) xml = get_mobile_auth_payload([record], domain, now=now) check_xml_line_by_line(self, xml, """ <OpenRosaResponse xmlns="http://openrosa.org/http/response"> <message nature="submit_success">Here are your keys!</message> <auth_keys domain="{domain}" issued="{now}"> <key_record valid="{now}" expires="{now_plus_30}"> <uuid>{record.uuid}</uuid> <key type="{record.type}">{record.key}</key> </key_record> </auth_keys> </OpenRosaResponse> """.format( now=self.format_datetime_no_usec(now), now_plus_30=self.format_datetime_no_usec(now_plus_30), record=record, domain=domain, )) record = new_key_record(None, None, now=now, valid=now_minus_30) xml = get_mobile_auth_payload([record], domain, now=now) check_xml_line_by_line(self, xml, """ <OpenRosaResponse xmlns="http://openrosa.org/http/response"> <message nature="submit_success">Here are your keys!</message> <auth_keys domain="{domain}" issued="{now}"> <key_record valid="{now_minus_30}" expires="{now_plus_30}"> <uuid>{record.uuid}</uuid> <key type="{record.type}">{record.key}</key> </key_record> </auth_keys> </OpenRosaResponse> """.format( now=self.format_datetime_no_usec(now), now_plus_30=self.format_datetime_no_usec(now_plus_30), now_minus_30=self.format_datetime_no_usec(now_minus_30), record=record, domain=domain, ))
the-stack_106_30861
#!/usr/bin/env python3 # -*- coding: utf-8 -*- """ Smart Home Bot for Telegram. Copyright (c) 2017-2018 Oliver Lau <[email protected]> This program is free software: you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation, either version 3 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program. If not, see <https://www.gnu.org/licenses/>. """ import sys import os import datetime import json import time import random import telepot import subprocess import shelve import urllib3 import threading import queue import shutil import pygame import pygame.mixer from tempfile import mkstemp from telepot.namedtuple import InlineKeyboardMarkup, InlineKeyboardButton from telepot.delegate import per_chat_id_in, create_open, pave_event_space, include_callback_query_chat_id from pprint import pprint from watchdog.observers import Observer from watchdog.events import FileSystemEventHandler from apscheduler.schedulers.background import BackgroundScheduler from apscheduler.job import Job from PIL import Image APPNAME = 'smarthomebot' APPVERSION = '1.1.0' TELEGRAM_AUDIO_BITRATE = 48000 TELEGRAM_MAX_MESSAGE_SIZE = 2048 TELEGRAM_MAX_PHOTO_DIMENSION = 1280 class easydict(dict): def __missing__(self, key): self[key] = easydict() return self[key] def send_msg_to_all(msg): if isinstance(msg, str): while len(msg) > 0: for user in authorized_users: bot.sendMessage(user, msg[:TELEGRAM_MAX_MESSAGE_SIZE]) msg = msg[TELEGRAM_MAX_MESSAGE_SIZE:] def take_snapshot_thread(): def get_image_from_url(url, username, password): error_msg = None response = None try: http = urllib3.PoolManager() headers = urllib3.util.make_headers(basic_auth='{}:{}' .format(username, password)) if username and password else None response = http.request('GET', url, headers=headers) except urllib3.exceptions.HTTPError as e: error_msg = e return response, error_msg while True: task = snapshot_queue.get() if task is None: break for camera in task['cameras']: if camera.get('snapshot_url'): bot.sendChatAction(task['chat_id'], action='upload_photo') response, error_msg = \ get_image_from_url(camera.get('snapshot_url'), camera.get('username'), camera.get('password')) if error_msg: bot.sendMessage(task['chat_id'], 'Fehler beim Abrufen des Schnappschusses via {}: {}' .format(camera.get('snapshot_url'), error_msg)) elif response and response.data: _, photo_filename = mkstemp(prefix='snapshot-', suffix='.jpg') f = open(photo_filename, 'wb+') f.write(response.data) f.close() bot.sendPhoto(task['chat_id'], open(photo_filename, 'rb'), caption=datetime.datetime.now().strftime('%d.%m.%Y %H:%M:%S')) os.remove(photo_filename) snapshot_queue.task_done() if 'callback' in task and callable(task['callback']): task['callback']() def make_snapshot(chat_id): snapshot_queue.put({'cameras': cameras.values(), 'chat_id': chat_id}) def process_text_thread(): while True: task = text_queue.get() if task is None: break for encoding in encodings: try: with open(task['src_filename'], 'r', encoding=encoding) as f: msg = f.read(max_text_file_size) except UnicodeDecodeError: if verbose: print('Decoding file as {:s} failed, trying another encoding ...'.format(encoding)) else: break send_msg_to_all(msg) os.remove(task['src_filename']) def process_document_thread(): while True: task = document_queue.get() if task is None: break for user in authorized_users: bot.sendDocument(user, open(task['src_filename'], 'rb'), caption=datetime.datetime.now().strftime('%d.%m.%Y %H:%M:%S')) os.remove(task['src_filename']) def process_video_thread(): while True: task = video_queue.get() if task is None: break for user in authorized_users: bot.sendChatAction(user, action='upload_video') _, dst_video_filename = mkstemp(prefix='smarthomebot-', suffix='.mp4') cmd = [path_to_ffmpeg, '-y', '-loglevel', 'panic', '-i', task['src_filename'], '-vf', 'scale=640:-1', '-movflags', '+faststart', '-c:v', 'libx264', '-preset', 'fast', dst_video_filename] if verbose: print('Started {}'.format(' '.join(cmd))) subprocess.call(cmd, shell=False) for user in authorized_users: bot.sendVideo(user, open(dst_video_filename, 'rb'), caption='{} ({})'.format(os.path.basename(task['src_filename']), datetime.datetime.now().strftime('%d.%m.%Y %H:%M:%S'))) print('Removing converted video file: {}'.format(dst_video_filename)) os.remove(dst_video_filename) print('Removing original video file: {}'.format(task['src_filename'])) os.remove(task['src_filename']) video_queue.task_done() def process_voice_thread(): while True: task = voice_queue.get() if task is None: break _, voice_filename = mkstemp(prefix='voice-', suffix='.oga') _, converted_audio_filename = mkstemp(prefix='converted-audio-', suffix='.wav') bot.sendChatAction(task['chat_id'], action='upload_audio') bot.download_file(task['file_id'], voice_filename) cmd = [path_to_ffmpeg, '-y', '-loglevel', 'panic', '-i', voice_filename, '-codec:a', 'pcm_s16le', converted_audio_filename] if verbose: print('Started {}'.format(' '.join(cmd))) subprocess.call(cmd, shell=False) voice = pygame.mixer.Sound(converted_audio_filename) voice.set_volume(audio_volume) voice.play() os.remove(converted_audio_filename) os.remove(voice_filename) bot.sendMessage(task['chat_id'], 'Sprachnachricht wurde abgespielt.') voice_queue.task_done() def process_photo_thread(): while True: task = photo_queue.get() if task is None: break dst_photo_filename = task['src_filename'] if type(max_photo_size) is int: im = Image.open(task['src_filename']) if im.width > max_photo_size or im.height > max_photo_size: im.thumbnail((max_photo_size, max_photo_size), Image.BILINEAR) _, dst_photo_filename = mkstemp(prefix='smarthomebot-', suffix='.jpg') if verbose: print('Resizing photo to {} ...'.format(dst_photo_filename)) im.save(dst_photo_filename, format='JPEG', quality=87) os.remove(task['src_filename']) im.close() if verbose: print('Sending photo {} ...'.format(dst_photo_filename)) for user in authorized_users: bot.sendPhoto(user, open(dst_photo_filename, 'rb'), caption=datetime.datetime.now().strftime('%d.%m.%Y %H:%M:%S')) os.remove(dst_photo_filename) def garbage_collector(): print('Garbage collection ...') def delete_too_old(file_or_dir): print('delete_too_old("{}")'.format(file_or_dir)) for root, subdirs, files in os.walk(upload_folder, topdown=False, onerror=None, followlinks=False): for filename in files: fname = os.path.join(root, filename) ctime = datetime.datetime.fromtimestamp(os.path.getctime(fname)) if ctime + datetime.timedelta(days=15) < datetime.datetime.now(): delete_too_old(fname) for _ in subdirs: pass def file_write_ok(filename, timeout_secs=5): CheckIntervalMS = 100 n_cycles = 1000 * timeout_secs // CheckIntervalMS while os.stat(filename).st_size == 0: # make sure file is written time.sleep(CheckIntervalMS / 1000) n_cycles -= 1 if n_cycles < 0: os.remove(filename) return False return True class UploadDirectoryEventHandler(FileSystemEventHandler): def __init__(self, *args, **kwargs): super(UploadDirectoryEventHandler, self).__init__() def on_created(self, event): if not event.is_directory: _, ext = os.path.splitext(os.path.basename(event.src_path)) ext = ext.lower() if isinstance(copy_to, str): print('Backing up {:s} to {:s} ...'.format(event.src_path, copy_to)) shutil.copy2(event.src_path, copy_to) if ext in ['.jpg', '.png']: self.process_photo(event.src_path) elif ext in ['.txt']: self.process_text(event.src_path) elif ext in ['.avi', '.mp4', '.mkv', '.m4v', '.mov', '.mpg']: self.process_video(event.src_path) else: self.process_document(event.src_path) def process_text(self, src_text_filename): if file_write_ok(src_text_filename): if verbose: print('New text file detected: {}'.format(src_text_filename)) if alerting_on and do_send_text: text_queue.put({'src_filename': src_text_filename}) else: os.remove(src_text_filename) def process_document(self, src_document_filename): if file_write_ok(src_document_filename): if verbose: print('New document detected: {}'.format(src_document_filename)) if alerting_on and do_send_documents: document_queue.put({'src_filename': src_document_filename}) else: os.remove(src_document_filename) def process_photo(self, src_photo_filename): if file_write_ok(src_photo_filename): if verbose: print('New photo file detected: {}'.format(src_photo_filename)) if alerting_on and do_send_photos: photo_queue.put({'src_filename': src_photo_filename}) else: os.remove(src_photo_filename) def process_video(self, src_video_filename): if file_write_ok(src_video_filename): if verbose: print('New video file detected: {}'.format(src_video_filename)) if alerting_on and do_send_videos and type(path_to_ffmpeg) is str: video_queue.put({'src_filename': src_video_filename}) else: print('Removing {}'.format(src_video_filename)) os.remove(src_video_filename) class ChatUser(telepot.helper.ChatHandler): IdleMessages = ['tรผdelรผ โ€ฆ', '*gรคhn*', 'Mir ist langweilig.', 'Chill dein Life! Alles cool hier.', 'Hier ist tote Hose.', 'Nix los hier โ€ฆ', 'Scheint niemand zu Hause zu sein.', 'Sanft ruht der See.', 'Hallo-o!!!', 'Alles cool, Digga.', 'Ich kuck und kuck, aber nix passiert.', 'Das Adlerauge ist wachsam, sieht aber nรผscht.', 'Nix tut sich.', 'Mach du dein Ding. Ich mach hier meins.', 'Alles voll secure in da house.'] def __init__(self, *args, **kwargs): super(ChatUser, self).__init__(*args, **kwargs) self.snapshot_job = None def open(self, initial_msg, seed): _, _, chat_id = telepot.glance(initial_msg) self.init_scheduler(chat_id) def init_scheduler(self, chat_id): global settings, scheduler interval = settings[chat_id]['snapshot']['interval'] if type(interval) is not int: interval = 0 settings[chat_id]['snapshot']['interval'] = interval if interval > 0: if type(self.snapshot_job) is Job: self.snapshot_job.remove() self.snapshot_job = scheduler.add_job( make_snapshot, 'interval', seconds=interval, kwargs={'chat_id': chat_id}) else: if type(self.snapshot_job) is Job: self.snapshot_job.remove() def on__idle(self, event): if alerting_on: ridx = random.randint(0, len(ChatUser.IdleMessages) - 1) self.sender.sendMessage(ChatUser.IdleMessages[ridx], parse_mode='Markdown') def send_snapshot_menu(self): kbd = [ InlineKeyboardButton(text=cameras[c]['name'], callback_data=c) for c in cameras.keys() ] keyboard = InlineKeyboardMarkup(inline_keyboard=[kbd]) self.sender.sendMessage('Schnappschuss anzeigen von:', reply_markup=keyboard) def send_main_menu(self): global alerting_on kbd = [ InlineKeyboardButton(text=chr(0x1F4F7) + 'Schnappschuss', callback_data='snapshot'), InlineKeyboardButton(text=[chr(0x25B6) + chr(0xFE0F) + 'Alarme ein', chr(0x23F9) + 'Alarme aus'][alerting_on], callback_data=['enable', 'disable'][alerting_on]) ] keyboard = InlineKeyboardMarkup(inline_keyboard=[kbd]) self.sender.sendMessage('Wรคhle eine Aktion:', reply_markup=keyboard) def on_callback_query(self, msg): global alerting_on, snapshooter, snapshot_queue query_id, from_id, query_data = telepot.glance(msg, flavor='callback_query') if verbose: print('Callback Query:', query_id, from_id, query_data) if cameras.get(query_data): bot.answerCallbackQuery(query_id, text='Schnappschuss von deiner Kamera "{}"'.format(query_data)) snapshot_queue.put({'cameras': [cameras[query_data]], 'chat_id': from_id, 'callback': lambda: self.send_snapshot_menu()}) elif query_data == 'disable': alerting_on = False self.bot.answerCallbackQuery(query_id, text='Alarme wurden ausgeschaltet.') self.send_main_menu() elif query_data == 'enable': alerting_on = True self.bot.answerCallbackQuery(query_id, text='Alarme wurden eingeschaltet.') self.send_main_menu() elif query_data == 'snapshot': self.bot.answerCallbackQuery(query_id) self.send_snapshot_menu() def on_chat_message(self, msg): global scheduler, settings, alerting_on content_type, chat_type, chat_id = telepot.glance(msg) if content_type == 'text': if verbose: pprint(msg) msg_text = msg['text'] if msg_text.startswith('/start'): self.sender.sendMessage('*Hallo, ich bin dein Heimรผberwachungs-Bot* v' + APPVERSION + chr(0x1F916) + "\n\n" 'Ich benachrichtige dich, wenn deine Webcams Bewegungen ' 'und laute Gerรคusche erkannt haben ' 'und sende dir ein Video von dem Vorfall.' + "\n", parse_mode='Markdown') self.send_main_menu() elif msg_text.startswith('/uptime'): dt = datetime.datetime.now() - start_timestamp hours = dt.seconds // (60 * 60) minutes = (dt.seconds - hours * 60 * 60) // 60 seconds = dt.seconds - hours * 60 * 60 - minutes * 60 self.sender.sendMessage('Online seit {:s}: {:d} Tage, {:d} Stunden, {:d} Minuten, {:d} Sekunden' .format(start_timestamp.strftime('%d.%m.%Y %H:%M:%S'), dt.days, hours, minutes, seconds)) elif msg_text.startswith('/snapshot'): c = msg_text.split()[1:] subcmd = c[0].lower() if len(c) > 0 else None if subcmd is None: self.send_snapshot_menu() else: if subcmd == 'interval': if len(c) > 1: interval = int(c[1]) settings[chat_id]['snapshot']['interval'] = interval if interval > 0: if type(self.snapshot_job) is Job: self.snapshot_job.remove() self.snapshot_job = scheduler.add_job(make_snapshot, 'interval', seconds=interval, kwargs={'chat_id': chat_id}) self.sender.sendMessage('Schnappschรผsse sind aktiviert. ' 'Das Intervall ist auf {} Sekunden eingestellt.' .format(interval)) else: if type(self.snapshot_job) is Job: self.snapshot_job.remove() self.sender.sendMessage('Zeitgesteuerte Schnappschรผsse sind nun deaktiviert.') else: self.sender.sendMessage('Es waren keine zeitgesteuerten Schnappschรผsse aktiviert.') else: if type(settings[chat_id]['snapshot']['interval']) is not int: self.sender.sendMessage('Schnappschussintervall wurde noch nicht eingestellt.') elif settings[chat_id]['snapshot']['interval'] < 1: self.sender.sendMessage('Aufnehmen von Schnappschรผssen in Intervallen ' 'ist derzeit deaktiviert.') else: self.sender.sendMessage('Schnappschussintervall ist derzeit auf ' '{} Sekunden eingestellt.' .format(settings[chat_id]['snapshot']['interval'])) elif msg_text.startswith('/enable') or \ any(cmd in msg_text.lower() for cmd in ['on', 'go', '1', 'ein']): alerting_on = True send_msg_to_all('รœberwachung wurde eingeschaltet.') elif msg_text.startswith('/disable') or \ any(cmd in msg_text.lower() for cmd in ['off', 'stop', '0', 'aus']): alerting_on = False send_msg_to_all('รœberwachung wurde ausgeschaltet.') elif msg_text.startswith('/toggle'): alerting_on = not alerting_on send_msg_to_all('รœberwachung ist nun {}geschaltet.'.format(['aus', 'ein'][alerting_on])) elif msg_text.startswith('/help'): self.sender.sendMessage("Verfรผgbare Kommandos:\n\n" "/help diese Nachricht anzeigen\n" "/enable /disable /toggle Benachrichtigungen aktivieren/deaktivieren\n" "/snapshot Liste der Kameras anzeigen, die Schnappschรผsse liefern kรถnnen\n" "/snapshot `interval` Das Zeitintervall (Sek.) anzeigen, in dem " "Schnappschรผsse von den Kameras abgerufen und angezeigt werden sollen\n" "/snapshot `interval` `secs` Schnappschussintervall auf `secs` Sekunden " "setzen (`0` fรผr aus)\n" "/uptime Uptime anzeigen\n" "/start den Bot (neu)starten\n", parse_mode='Markdown') elif msg_text.startswith('/'): self.sender.sendMessage('Unbekanntes Kommando. /help fรผr weitere Infos eintippen.') else: self.sender.sendMessage('Ich bin nicht sehr gesprรคchig. Tippe /help fรผr weitere Infos ein.') elif content_type == 'voice': if audio_on: voice_queue.put({'file_id': msg['voice']['file_id'], 'chat_id': chat_id}) else: self.sender.sendMessage('Keine Sprachausgabe aktiv.') else: self.sender.sendMessage('Dein "{}" ist im Nirwana gelandet ...'.format(content_type)) settings = easydict() scheduler = BackgroundScheduler() snapshot_queue = None text_queue = None document_queue = None video_queue = None voice_queue = None photo_queue = None snapshooter = None text_processor = None document_processor = None video_processor = None voice_processor = None photo_processor = None authorized_users = None upload_folder = None cameras = None verbose = None path_to_ffmpeg = None max_photo_size = None bot = None alerting_on = True copy_to = None audio_on = None audio_volume = 1.0 do_send_videos = None do_send_photos = None do_send_text = None do_send_documents = None max_text_file_size = None encodings = ['utf-8', 'latin1', 'macroman', 'windows-1252', 'windows-1250'] start_timestamp = datetime.datetime.now() def main(): global bot, authorized_users, cameras, verbose, settings, \ scheduler, cronsched, \ encodings, path_to_ffmpeg, max_photo_size, \ snapshot_queue, snapshooter, copy_to, \ do_send_text, text_queue, max_text_file_size, \ do_send_documents, document_queue, \ do_send_videos, video_queue, video_processor, \ audio_on, audio_volume, voice_queue, voice_processor, upload_folder, \ do_send_photos, photo_queue, photo_processor config_filename = 'smarthomebot-config.json' shelf = shelve.open('.smarthomebot.shelf') if APPNAME in shelf.keys(): settings = easydict(shelf[APPNAME]) try: with open(config_filename, 'r') as config_file: config = json.load(config_file) except FileNotFoundError as e: print('Error: config file not found: {}'.format(e)) return except ValueError as e: print('Error: invalid config file "{}": {}'.format(config_filename, e)) return telegram_bot_token = config.get('telegram_bot_token') if not telegram_bot_token: print('Error: config file doesn\'t contain a `telegram_bot_token`') return authorized_users = config.get('authorized_users') if type(authorized_users) is not list or len(authorized_users) == 0: print('Error: config file doesn\'t contain an `authorized_users` list') return cameras = config.get('cameras') if type(cameras) is not dict: print('Error: config file doesn\'t define any `cameras`') return timeout_secs = config.get('timeout_secs', 10*60) upload_folder = config.get('image_folder', '/home/ftp-upload') event_handler = UploadDirectoryEventHandler(ignore_directories=True) observer = Observer() observer.schedule(event_handler, upload_folder, recursive=True) try: observer.start() except OSError as e: import pwd print('ERROR: Cannot start observer. Make sure the folder {:s} exists and is writable for {:s}.' .format(upload_folder, pwd.getpwuid(os.getuid()).pw_name)) return path_to_ffmpeg = config.get('path_to_ffmpeg') max_photo_size = config.get('max_photo_size', TELEGRAM_MAX_PHOTO_DIMENSION) verbose = config.get('verbose', False) do_send_photos = config.get('send_photos', False) do_send_videos = config.get('send_videos', True) do_send_text = config.get('send_text', False) copy_to = config.get('copy_to', None) if isinstance(copy_to, str): if not os.path.isdir(copy_to): print('Error: {:s} (`copy_to`) doesn\'t point to a directory.'.format(copy_to)) return if not os.access(copy_to, os.W_OK): print('Error: {:s} (`copy_to`) is not writable.'.format(copy_to)) return if verbose: print('All received surveillance files will be backed up to {:s}'.format(copy_to)) max_text_file_size = config.get('max_text_file_size', 10 * TELEGRAM_MAX_MESSAGE_SIZE) do_send_documents = config.get('send_documents', False) audio_on = config.get('audio', {}).get('enabled', False) audio_volume = config.get('audio', {}).get('volume', 1.0) bot = telepot.DelegatorBot(telegram_bot_token, [ include_callback_query_chat_id(pave_event_space())(per_chat_id_in(authorized_users, types='private'), create_open, ChatUser, timeout=timeout_secs) ]) snapshot_queue = queue.Queue() snapshooter = threading.Thread(target=take_snapshot_thread) snapshooter.start() if do_send_text: text_queue = queue.Queue() text_processor = threading.Thread(target=process_text_thread) text_processor.start() if verbose: print('Enabled text processing.') if do_send_documents: document_queue = queue.Queue() document_processor = threading.Thread(target=process_document_thread) document_processor.start() if verbose: print('Enabled document processing.') if do_send_photos: photo_queue = queue.Queue() photo_processor = threading.Thread(target=process_photo_thread) photo_processor.start() if verbose: print('Enabled photo processing.') if do_send_videos: video_queue = queue.Queue() video_processor = threading.Thread(target=process_video_thread) video_processor.start() if verbose: print('Enabled video processing.') if audio_on: try: pygame.mixer.pre_init(frequency=TELEGRAM_AUDIO_BITRATE, size=-16, channels=2, buffer=4096) pygame.mixer.init() except: print("\nWARNING: Cannot initialize audio.\n" "*** See above warnings for details.\n" "*** Consider deactivating audio in your \n" "*** SurveillanceBot config file.\n") audio_on = False else: voice_queue = queue.Queue() voice_processor = threading.Thread(target=process_voice_thread) voice_processor.start() if verbose: print('Enabled audio processing.') if verbose: print('Monitoring {} ...'.format(upload_folder)) scheduler.start() scheduler.add_job(garbage_collector, 'cron', hour=0) try: bot.message_loop(run_forever='Bot listening ... (Press Ctrl+C to exit.)') except KeyboardInterrupt: pass if verbose: print('Exiting ...') observer.stop() observer.join() shelf[APPNAME] = settings shelf.sync() shelf.close() scheduler.shutdown() snapshot_queue.put(None) snapshooter.join() if do_send_videos: video_queue.put(None) video_processor.join() if do_send_photos: photo_queue.put(None) photo_processor.join() if do_send_text: text_queue.put(None) text_processor.join() if do_send_documents: document_queue.put(None) document_processor.join() if audio_on: voice_queue.put(None) voice_processor.join() if __name__ == '__main__': main()
the-stack_106_30865
import os import shutil import cv2 import numpy as np from myGpuFeatures import myGpuFeatures class Method: # ๅ…ณไบŽ GPU ๅŠ ้€Ÿ็š„่ฎพ็ฝฎ is_gpu_available = False # ๅ…ณไบŽๆ‰“ๅฐไฟกๆฏ็š„่ฎพ็ฝฎ input_dir = "" is_out_log_file = False log_file = "evaluate.txt" is_print_screen = True # ๅ…ณไบŽๅ›พๅƒๅขžๅผบ็š„ๆ“ไฝœ is_enhance = False is_clahe = False clip_limit = 20 tile_size = 5 # ๅ…ณไบŽ็‰นๅพๆœ็ดข็š„่ฎพ็ฝฎ roi_ratio = 0.2 feature_method = "surf" # "sift","surf" or "orb" search_ratio = 0.75 # 0.75 is common value for matches # ๅ…ณไบŽ็‰นๅพ้…ๅ‡†็š„่ฎพ็ฝฎ offset_calculate = "mode" # "mode" or "ransac" offset_evaluate = 5 # ๅ…ณไบŽ GPU-SURF ็š„่ฎพ็ฝฎ surf_hessian_threshold = 100.0 surf_n_octaves = 4 surf_n_octave_layers = 3 surf_is_extended = True surf_key_points_ratio = 0.01 surf_is_upright = False # ๅ…ณไบŽ GPU-ORB ็š„่ฎพ็ฝฎ orb_n_features = 5000 orb_scale_factor = 1.2 orb_n_levels = 8 orb_edge_threshold = 31 orb_first_level = 0 orb_wta_k = 2 orb_patch_size = 31 orb_fast_threshold = 20 orb_blur_for_descriptor = False orb_max_distance = 30 def print_and_log(self, content): """ ๅ‘ๅฑๅน•ๆˆ–่€…txtๆ‰“ๅฐไฟกๆฏ :param content: :return: """ if self.is_print_screen: print(content) if self.is_out_log_file: f = open(os.path.join(self.input_dir, self.log_file), "a") f.write(content) f.write("\n") f.close() @staticmethod def make_out_dir(dir_path): """ ๅˆ›้€ ไธ€ไธชๆ–‡ไปถๅคน :param dir_path:ๆ–‡ไปถๅคน็›ฎๅฝ• :return: """ try: os.makedirs(dir_path) except OSError: pass @staticmethod def delete_folder(dir_address): """ ๅˆ ้™คไธ€ไธชๆ–‡ไปถๅคนไธ‹ๆ‰€ๆœ‰ๆ–‡ไปถไปฅๅŠ่ฏฅๆ–‡ไปถๅคน :param dir_address: ๆ–‡ไปถๅคน็›ฎๅฝ• :return: """ shutil.rmtree(dir_address) @staticmethod def resize_image(origin_image, resize_times, inter_method=cv2.INTER_AREA): """ ็ผฉๆ”พๅ›พๅƒ :param origin_image:ๅŽŸๅง‹ๅ›พๅƒ :param resize_times: ็ผฉๆ”พๆฏ”็އ :param inter_method: ๆ’ๅ€ผๆ–นๆณ• :return: ็ผฉๆ”พ็ป“ๆžœ """ (h, w) = origin_image.shape resize_h = int(h * resize_times) resize_w = int(w * resize_times) # cv2.INTER_AREAๆ˜ฏๆต‹่ฏ•ๅŽๆœ€ๅฅฝ็š„ๆ–นๆณ• resized_image = cv2.resize( origin_image, (resize_w, resize_h), interpolation=inter_method) return resized_image def generate_video_from_image(self, source_image, output_dir): """ Convert source_image to video, simply crop sub-image in source_image in row direction with one pixel increment :param source_image: source_image :param output_dir: video output dir :return: """ height, width = source_image.shape[:2] print(height, width) fps = 16 self.make_out_dir(output_dir) # video_writer = cv2.VideoWriter(os.path.join(output_dir, "test_video.avi"), # cv2.VideoWriter_fourcc(*'XVID'), fps, (width, width)) # video_writer = cv2.VideoWriter(os.path.join(output_dir, "test_video.avi"), # cv2.VideoWriter_fourcc('I', '4', '2', '0'), fps, (width, width)) video_writer = cv2.VideoWriter( os.path.join( output_dir, "test_video.avi"), cv2.VideoWriter_fourcc( 'M', 'J', 'P', 'G'), fps, (width, width)) self.print_and_log( "Video setting: fps is {} and the frame size is {}".format( fps, (width, width))) self.print_and_log("Start converting") row_index = 0 while True: if row_index + width > height: break image_temp = source_image[row_index: row_index + width, :, :] video_writer.write(image_temp) self.print_and_log( "The {}th frame with shape of {}".format( row_index + 1, image_temp.shape)) row_index = row_index + 1 video_writer.release() self.print_and_log("Convert end") @staticmethod def np_to_list_for_keypoints(array): """ GPU่ฟ”ๅ›žnumpyๅฝขๅผ็š„็‰นๅพ็‚น๏ผŒ่ฝฌๆˆlistๅฝขๅผ :param array: :return: """ kps = [] row, col = array.shape for i in range(row): kps.append([array[i, 0], array[i, 1]]) return kps @staticmethod def np_to_list_for_matches(array): """ GPU่ฟ”ๅ›žnumpyๅฝขๅผ็š„ๅŒน้…ๅฏน๏ผŒ่ฝฌๆˆlistๅฝขๅผ :param array: :return: """ descriptors = [] row, col = array.shape for i in range(row): descriptors.append((array[i, 0], array[i, 1])) return descriptors @staticmethod def np_to_kps_and_descriptors(array): """ GPU่ฟ”ๅ›žnumpyๅฝขๅผ็š„kps๏ผŒdescripotrs๏ผŒ่ฝฌๆˆlistๅฝขๅผ :param array: :return: """ kps = [] descriptors = array[:, :, 1] for i in range(array.shape[0]): kps.append([array[i, 0, 0], array[i, 1, 0]]) return kps, descriptors def detect_and_describe(self, image): """ ็ป™ๅฎšไธ€ๅผ ๅ›พๅƒ๏ผŒๆฑ‚ๅ–็‰นๅพ็‚นๅ’Œ็‰นๅพๆ่ฟฐ็ฌฆ :param image: ่พ“ๅ…ฅๅ›พๅƒ :return: kps๏ผŒfeatures ่ฟ”ๅ›ž็‰นๅพ็‚น้›†๏ผŒๅŠๅฏนๅบ”็š„ๆ่ฟฐ็‰นๅพ """ descriptor = None kps = None features = None if self.is_gpu_available is False: # CPU mode if self.feature_method == "sift": descriptor = cv2.xfeatures2d.SIFT_create() elif self.feature_method == "surf": descriptor = cv2.xfeatures2d.SURF_create() elif self.feature_method == "orb": descriptor = cv2.ORB_create( self.orb_n_features, self.orb_scale_factor, self.orb_n_levels, self.orb_edge_threshold, self.orb_first_level, self.orb_wta_k, 0, self.orb_patch_size, self.orb_fast_threshold) # ๆฃ€ๆต‹SIFT็‰นๅพ็‚น๏ผŒๅนถ่ฎก็ฎ—ๆ่ฟฐๅญ kps, features = descriptor.detectAndCompute(image, None) # ๅฐ†็ป“ๆžœ่ฝฌๆขๆˆNumPyๆ•ฐ็ป„ kps = np.float32([kp.pt for kp in kps]) else: # GPU mode if self.feature_method == "sift": # ็›ฎๅ‰GPU-SIFTๅฐšๆœชๅผ€ๅ‘๏ผŒๅ…ˆ้‡‡็”จCPU็‰ˆๆœฌ็š„ๆ›ฟไปฃ descriptor = cv2.xfeatures2d.SIFT_create() kps, features = descriptor.detectAndCompute(image, None) kps = np.float32([kp.pt for kp in kps]) elif self.feature_method == "surf": kps, features = self.np_to_kps_and_descriptors( myGpuFeatures.detectAndDescribeBySurf(image, self.surf_hessian_threshold, self.surf_n_octaves, self.surf_n_octave_layers, self.surf_is_extended, self.surf_key_points_ratio, self.surf_is_upright)) elif self.feature_method == "orb": kps, features = self.np_to_kps_and_descriptors( myGpuFeatures.detectAndDescribeByOrb(image, self.orb_n_features, self.orb_scale_factor, self.orb_n_levels, self.orb_edge_threshold, self.orb_first_level, self.orb_wta_k, 0, self.orb_patch_size, self.orb_fast_threshold, self.orb_blur_for_descriptor)) # ่ฟ”ๅ›ž็‰นๅพ็‚น้›†๏ผŒๅŠๅฏนๅบ”็š„ๆ่ฟฐ็‰นๅพ return kps, features def match_descriptors(self, last_features, next_features): """ ๆ นๆฎไธคๅผ ๅ›พๅƒ็š„็‰นๅพๆ่ฟฐ็ฌฆ๏ผŒๆ‰พๅˆฐ็›ธๅบ”ๅŒน้…ๅฏน :param last_features: ไธŠไธ€ๅผ ๅ›พๅƒ็‰นๅพๆ่ฟฐ็ฌฆ :param next_features: ไธ‹ไธ€ๅผ ๅ›พๅƒ็‰นๅพๆ่ฟฐ็ฌฆ :return: matches๏ผŒ ๅŒน้…็Ÿฉ้˜ต """ # matches = None # if self.feature_method == "surf" or self.feature_method == "sift": # matcher = cv2.DescriptorMatcher_create("BruteForce") # # ไฝฟ็”จKNNๆฃ€ๆต‹ๆฅ่‡ชAใ€Bๅ›พ็š„SIFT็‰นๅพๅŒน้…ๅฏน๏ผŒK=2๏ผŒ่ฟ”ๅ›žไธ€ไธชๅˆ—่กจ # raw_matches = matcher.knnMatch(last_features, next_features, 2) # matches = [] # for m in raw_matches: # # ๅฝ“ๆœ€่ฟ‘่ท็ฆป่ทŸๆฌก่ฟ‘่ท็ฆป็š„ๆฏ”ๅ€ผๅฐไบŽratioๅ€ผๆ—ถ๏ผŒไฟ็•™ๆญคๅŒน้…ๅฏน # if len(m) == 2 and m[0].distance < m[1].distance * self.search_ratio: # # ๅญ˜ๅ‚จไธคไธช็‚นๅœจfeaturesA, featuresBไธญ็š„็ดขๅผ•ๅ€ผ # matches.append((m[0].trainIdx, m[0].queryIdx)) # elif self.feature_method == "orb": # matcher = cv2.DescriptorMatcher_create("BruteForce-Hamming") # raw_matches = matcher.match(last_features, next_features) # matches = [] # for m in raw_matches: # matches.append((m.trainIdx, m.queryIdx)) matches = None if self.is_gpu_available is False: # CPU Mode # ๅปบ็ซ‹ๆšดๅŠ›ๅŒน้…ๅ™จ if self.feature_method == "surf" or self.feature_method == "sift": matcher = cv2.DescriptorMatcher_create("BruteForce") # ไฝฟ็”จKNNๆฃ€ๆต‹ๆฅ่‡ชAใ€Bๅ›พ็š„SIFT็‰นๅพๅŒน้…ๅฏน๏ผŒK=2๏ผŒ่ฟ”ๅ›žไธ€ไธชๅˆ—่กจ raw_matches = matcher.knnMatch(last_features, next_features, 2) matches = [] for m in raw_matches: # ๅฝ“ๆœ€่ฟ‘่ท็ฆป่ทŸๆฌก่ฟ‘่ท็ฆป็š„ๆฏ”ๅ€ผๅฐไบŽratioๅ€ผๆ—ถ๏ผŒไฟ็•™ๆญคๅŒน้…ๅฏน if len( m) == 2 and m[0].distance < m[1].distance * self.search_ratio: # ๅญ˜ๅ‚จไธคไธช็‚นๅœจfeaturesA, featuresBไธญ็š„็ดขๅผ•ๅ€ผ matches.append((m[0].trainIdx, m[0].queryIdx)) elif self.feature_method == "orb": matcher = cv2.DescriptorMatcher_create("BruteForce-Hamming") raw_matches = matcher.match(last_features, next_features) matches = [] for m in raw_matches: matches.append((m.trainIdx, m.queryIdx)) else: # GPU Mode if self.feature_method == "surf": matches = self.np_to_list_for_matches(myGpuFeatures.matchDescriptors( np.array(last_features), np.array(next_features), 2, self.search_ratio)) elif self.feature_method == "orb": matches = self.np_to_list_for_matches( myGpuFeatures.matchDescriptors( np.array(last_features), np.array(next_features), 3, self.orb_max_distance)) return matches def calculate_feature(self, input_image): """ ่ฎก็ฎ—ๅ›พๅƒ็‰นๅพ็‚น :param input_image: ่พ“ๅ…ฅๅ›พๅƒ :return: kps, features๏ผŒ ่ฟ”ๅ›ž็‰นๅพ็‚นๅŠๅ…ถ็›ธๅบ”็‰นๅพๆ่ฟฐ็ฌฆ """ # ๅˆคๆ–ญๆ˜ฏๅฆๆœ‰ๅขžๅผบ if self.is_enhance: if self.is_clahe: clahe = cv2.createCLAHE( clipLimit=self.clip_limit, tileGridSize=( self.tile_size, self.tile_size)) input_image = clahe.apply(input_image) elif self.is_clahe is False: input_image = cv2.equalizeHist(input_image) kps, features = self.detect_and_describe(input_image) return kps, features def get_offset_by_mode(self, last_kps, next_kps, matches, use_round=True): """ ้€š่ฟ‡ไผ—ๆ•ฐ็š„ๆ–นๆณ•ๆฑ‚ๅ–ไฝ็งป :param last_kps: ไธŠไธ€ๅผ ๅ›พๅƒ็š„็‰นๅพ็‚น :param next_kps: ไธ‹ไธ€ๅผ ๅ›พๅƒ็š„็‰นๅพ็‚น :param matches: ๅŒน้…็Ÿฉ้˜ต :param use_round: ่ฎก็ฎ—ๅๆ ‡ๅ็งป้‡ๆ—ถๆ˜ฏๅฆ่ฆๅ››่ˆไบ”ๅ…ฅ :return: ่ฟ”ๅ›žๆ‹ผๆŽฅ็ป“ๆžœๅ›พๅƒ """ total_status = True if len(matches) == 0: total_status = False return total_status, "the two images have no matches" dx_list = [] dy_list = [] for trainIdx, queryIdx in matches: last_pt = (last_kps[queryIdx][1], last_kps[queryIdx][0]) next_pt = (next_kps[trainIdx][1], next_kps[trainIdx][0]) if int( last_pt[0] - next_pt[0]) == 0 and int( last_pt[1] - next_pt[1]) == 0: continue if use_round: dx_list.append(int(round(last_pt[0] - next_pt[0]))) dy_list.append(int(round(last_pt[1] - next_pt[1]))) else: dx_list.append(int(last_pt[0] - next_pt[0])) dy_list.append(int(last_pt[1] - next_pt[1])) if len(dx_list) == 0: dx_list.append(0) dy_list.append(0) # Get Mode offset in [dxList, dyList], thanks for clovermini zipped = zip(dx_list, dy_list) zip_list = list(zipped) zip_dict = dict((a, zip_list.count(a)) for a in zip_list) zip_dict_sorted = dict( sorted( zip_dict.items(), key=lambda x: x[1], reverse=True)) dx = list(zip_dict_sorted)[0][0] dy = list(zip_dict_sorted)[0][1] num = zip_dict_sorted[list(zip_dict_sorted)[0]] if num < self.offset_evaluate: total_status = False return total_status, "the two images have less common offset" else: return total_status, [dx, dy] def get_offset_by_ransac(self, last_kps, next_kps, matches): """ ้€š่ฟ‡ransacๆ–นๆณ•ๆฑ‚ๅ–ไฝ็งป :param last_kps: ไธŠไธ€ๅผ ๅ›พๅƒ็š„็‰นๅพ็‚น :param next_kps: ไธ‹ไธ€ๅผ ๅ›พๅƒ็š„็‰นๅพ็‚น :param matches: ๅŒน้…็Ÿฉ้˜ต :return: ่ฟ”ๅ›žๆ‹ผๆŽฅ็ป“ๆžœๅ›พๅƒ """ total_status = False last_pts = np.float32([last_kps[i] for (_, i) in matches]) next_pts = np.float32([next_kps[i] for (i, _) in matches]) if len(matches) == 0: return total_status, [0, 0], 0 (H, status) = cv2.findHomography(last_pts, next_pts, cv2.RANSAC, 3, 0.9) true_count = 0 for i in range(0, len(status)): if status[i]: true_count = true_count + 1 if true_count >= self.offset_evaluate: total_status = True adjust_h = H.copy() adjust_h[0, 2] = 0 adjust_h[1, 2] = 0 adjust_h[2, 0] = 0 adjust_h[2, 1] = 0 return total_status, [np.round(np.array(H).astype(np.int)[1, 2]) * (-1), np.round(np.array(H).astype(np.int)[0, 2]) * (-1)], adjust_h else: return total_status, [0, 0], 0
the-stack_106_30866
"""Support for MyQ-Enabled Garage Doors.""" import logging import time import voluptuous as vol from homeassistant.components.cover import ( DEVICE_CLASS_GARAGE, DEVICE_CLASS_GATE, PLATFORM_SCHEMA, SUPPORT_CLOSE, SUPPORT_OPEN, CoverDevice, ) from homeassistant.config_entries import SOURCE_IMPORT from homeassistant.const import ( CONF_PASSWORD, CONF_TYPE, CONF_USERNAME, STATE_CLOSED, STATE_CLOSING, STATE_OPENING, ) from homeassistant.core import callback from homeassistant.helpers import config_validation as cv from homeassistant.helpers.event import async_call_later from .const import ( DOMAIN, KNOWN_MODELS, MANUFACTURER, MYQ_COORDINATOR, MYQ_DEVICE_STATE, MYQ_DEVICE_STATE_ONLINE, MYQ_DEVICE_TYPE, MYQ_DEVICE_TYPE_GATE, MYQ_GATEWAY, MYQ_TO_HASS, TRANSITION_COMPLETE_DURATION, TRANSITION_START_DURATION, ) _LOGGER = logging.getLogger(__name__) PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend( { vol.Required(CONF_USERNAME): cv.string, vol.Required(CONF_PASSWORD): cv.string, # This parameter is no longer used; keeping it to avoid a breaking change in # a hotfix, but in a future main release, this should be removed: vol.Optional(CONF_TYPE): cv.string, }, ) async def async_setup_platform(hass, config, async_add_entities, discovery_info=None): """Set up the platform.""" hass.async_create_task( hass.config_entries.flow.async_init( DOMAIN, context={"source": SOURCE_IMPORT}, data={ CONF_USERNAME: config[CONF_USERNAME], CONF_PASSWORD: config[CONF_PASSWORD], }, ) ) async def async_setup_entry(hass, config_entry, async_add_entities): """Set up mysq covers.""" data = hass.data[DOMAIN][config_entry.entry_id] myq = data[MYQ_GATEWAY] coordinator = data[MYQ_COORDINATOR] async_add_entities( [MyQDevice(coordinator, device) for device in myq.covers.values()], True ) class MyQDevice(CoverDevice): """Representation of a MyQ cover.""" def __init__(self, coordinator, device): """Initialize with API object, device id.""" self._coordinator = coordinator self._device = device self._last_action_timestamp = 0 self._scheduled_transition_update = None @property def device_class(self): """Define this cover as a garage door.""" device_type = self._device.device_json.get(MYQ_DEVICE_TYPE) if device_type is not None and device_type == MYQ_DEVICE_TYPE_GATE: return DEVICE_CLASS_GATE return DEVICE_CLASS_GARAGE @property def name(self): """Return the name of the garage door if any.""" return self._device.name @property def available(self): """Return if the device is online.""" if not self._coordinator.last_update_success: return False # Not all devices report online so assume True if its missing return self._device.device_json[MYQ_DEVICE_STATE].get( MYQ_DEVICE_STATE_ONLINE, True ) @property def is_closed(self): """Return true if cover is closed, else False.""" return MYQ_TO_HASS.get(self._device.state) == STATE_CLOSED @property def is_closing(self): """Return if the cover is closing or not.""" return MYQ_TO_HASS.get(self._device.state) == STATE_CLOSING @property def is_opening(self): """Return if the cover is opening or not.""" return MYQ_TO_HASS.get(self._device.state) == STATE_OPENING @property def supported_features(self): """Flag supported features.""" return SUPPORT_OPEN | SUPPORT_CLOSE @property def unique_id(self): """Return a unique, Home Assistant friendly identifier for this entity.""" return self._device.device_id async def async_close_cover(self, **kwargs): """Issue close command to cover.""" self._last_action_timestamp = time.time() await self._device.close() self._async_schedule_update_for_transition() async def async_open_cover(self, **kwargs): """Issue open command to cover.""" self._last_action_timestamp = time.time() await self._device.open() self._async_schedule_update_for_transition() @callback def _async_schedule_update_for_transition(self): self.async_write_ha_state() # Cancel any previous updates if self._scheduled_transition_update: self._scheduled_transition_update() # Schedule an update for when we expect the transition # to be completed so the garage door or gate does not # seem like its closing or opening for a long time self._scheduled_transition_update = async_call_later( self.hass, TRANSITION_COMPLETE_DURATION, self._async_complete_schedule_update, ) async def _async_complete_schedule_update(self, _): """Update status of the cover via coordinator.""" self._scheduled_transition_update = None await self._coordinator.async_request_refresh() async def async_update(self): """Update status of cover.""" await self._coordinator.async_request_refresh() @property def device_info(self): """Return the device_info of the device.""" device_info = { "identifiers": {(DOMAIN, self._device.device_id)}, "name": self._device.name, "manufacturer": MANUFACTURER, "sw_version": self._device.firmware_version, } model = KNOWN_MODELS.get(self._device.device_id[2:4]) if model: device_info["model"] = model if self._device.parent_device_id: device_info["via_device"] = (DOMAIN, self._device.parent_device_id) return device_info @callback def _async_consume_update(self): if time.time() - self._last_action_timestamp <= TRANSITION_START_DURATION: # If we just started a transition we need # to prevent a bouncy state return self.async_write_ha_state() @property def should_poll(self): """Return False, updates are controlled via coordinator.""" return False async def async_added_to_hass(self): """Subscribe to updates.""" self.async_on_remove( self._coordinator.async_add_listener(self._async_consume_update) ) async def async_will_remove_from_hass(self): """Undo subscription.""" if self._scheduled_transition_update: self._scheduled_transition_update()
the-stack_106_30868
# plottools.py """ Collection of classes to help create plots. """ import matplotlib.pyplot as plt class Plot(object): """ Base class for a plot. :param title: Plot title. Optional. :type title: str """ def __init__(self, title=None): self.fig = plt.figure() self.axplot = self.fig.add_subplot(1, 1, 1) for item in [self.fig, self.axplot]: item.patch.set_visible(False) if title is not None: self.set_title(title) def set_title(self, title): """ Add a title to the plot. :param title: Title to assign to the plot. :type title: str """ self.axplot.set_title(title) def set_axis_label(self, label, axis): """ Add an axis label to the plot. :param label: Label to assign to axis. :type label: str :param axis: Name of the axis, x or y :type axis: str """ if axis == 'x': self.axplot.set_xlabel(label) elif axis == 'y': self.axplot.set_ylabel(label) else: errmsg = 'Valid axis names are x and y.' raise ValueError(errmsg) class SpPlot(Plot): """ Class to create and customize a spectrum plot. Subclasses Plot. :param title: Title to assign to the plot. :type title: str """ def __init__(self, title=None): Plot.__init__(self, title) def plot_spectrum(self, sp1d, title=None, color='k'): """ Plot the spectrum (counts vs wavelength) with title and axis labels. :param sp1d: A Spectrum instance. :type sp1d: Spectrum object. :param title: Title for the plot. Optional. :type title: str :param color: Colour of the line :type color: str """ if title is not None: self.set_title(title) self.set_axis_label(''.join(['Wavelength [', sp1d.wunit.name, ']']), 'x') self.set_axis_label('Counts', 'y') self.axplot.plot(sp1d.wlen, sp1d.counts, color) self.axplot.axis('tight') self.fig.canvas.draw() return def adjust_ylimits(self, ylim1, ylim2): """ Adjust the lower and upper bounds to the y-axis. :param ylim1: Lower limit for the y-axis. :type ylim1: float :param ylim2: Upper limit for the y-axis. :type ylim2: float """ self.axplot.set_ylim(ylim1, ylim2) self.fig.canvas.draw() return def erase_plot(self, line_position=0): """ Erase the spectrum but not the box and axes. :param line_position: Position on the stack of the spectrum to erase. :type line_position: int """ self.axplot.lines.pop(line_position).remove self.fig.canvas.draw() return def annotate_lines(self, lines): """ Annotate the plot with spectra line identifications. :param lines: The line list to add to the plot. The lines are stored in a list of tuples with (obswlen, name), where obswlen is a float and name is a string. :type lines: list of tuples """ # lines is list of tuple (obswlen, name) (xlow, xhigh) = self.axplot.get_xlim() (_, yhigh) = self.axplot.get_ylim() ypos = yhigh * 0.8 ydelta = yhigh * 0.05 i = 0 for line in lines: if xlow < line[0] < xhigh: self.axplot.text(line[0], ypos-(i*ydelta)-ydelta*1.25, '|', horizontalalignment='center', verticalalignment='center', fontsize=10) self.axplot.text(line[0], ypos-(i*ydelta), line[1], horizontalalignment='center', verticalalignment='center', fontsize=10) i += 1 self.fig.canvas.draw() return # def draw_band_limits(self): # return def write_png(self, output_name): """ Write the figure to a PNG file. :param output_name: Name of the output file. ??Is the extension .png required or is it added automatically?? :type output_name: str """ self.fig.savefig(output_name) return class MultiPlot(object): def __init__(self, nrows=1, ncols=1, title=None): self.nrows = nrows self.ncols = ncols if title is not None: self.title = title self.plots_titles = None self.plots_labels = None self.plots_data = None self.pdf = None def add_data(self, plots_data, plots_labels=None): """ Parameters ---------- plot_data : list of tuple tuples of x and y data, one tuple per plot """ self.plots_data = plots_data if plots_labels is not None: self.add_labels(plots_labels) def add_labels(self, plots_labels): self.plots_labels = plots_labels def add_titles(self, plots_titles): self.plots_titles = plots_titles def set_size(self, width, height): self.width = width self.height = height def plot(self, png=False, filename='multiplot.png', pdf=None): self.fig, self.axs = plt.subplots(self.nrows, self.ncols) self.fig.suptitle(self.title) self.fig.set_size_inches(self.width, self.height) nplots = len(self.plots_data) plotid = 0 for axrow in self.axs: for ax in axrow: xlabel = None ylabel = None title = None if plotid == nplots: break # fewer plots then slots x = self.plots_data[plotid][0] y = self.plots_data[plotid][1] if len(self.plots_labels) == 1: xlabel = self.plots_labels[0][0] ylabel = self.plots_labels[0][1] elif self.plots_labels is not None: xlabel = self.plots_labels[plotid][0] ylabel = self.plots_labels[plotid][1] if self.plots_titles is not None: title = self.plots_titles[plotid] ax.plot(x, y) ax.set_title(title) ax.set(xlabel=xlabel, ylabel=ylabel) plotid += 1 self.fig.tight_layout(rect=[0, 0.03, 1, 0.95]) if pdf: pdf.savefig() if png: plt.savefig(filename) else: plt.show() def save_plot(self, filename='multiplot.png'): self.plot(save=True, filename=filename) def close(self): plt.close() """ import plottools from importlib import reload import numpy as np x = np.linspace(0, 2 * np.pi, 400) y = np.sin(x ** 2) m = plottools.MultiPlot(2,2,'test') plot_data = [(x,y), (x,-y), (-x, y), (-x, -y)] plot_labels = [('x axis', 'y axis'),('x axis', '-y axis'),('-x axis', 'y axis'),('-x axis', '-y axis')] m.add_data(plot_data, plot_labels) m.plot() """
the-stack_106_30869
# -*- coding: utf-8 -*- """Test the PyKEEN pipeline function.""" import unittest import pandas as pd import pykeen.regularizers from pykeen.datasets import Nations from pykeen.models.base import Model from pykeen.pipeline import PipelineResult, pipeline from pykeen.regularizers import NoRegularizer class TestPipeline(unittest.TestCase): """Test the pipeline.""" @classmethod def setUpClass(cls): """Set up a shared result.""" cls.result = pipeline( model='TransE', dataset='nations', training_kwargs=dict(num_epochs=5), ) cls.model = cls.result.model nations = Nations() cls.testing_mapped_triples = nations.testing.mapped_triples.to(cls.model.device) def test_predict_tails_no_novelties(self): """Test scoring tails without labeling as novel w.r.t. training and testing.""" tails_df = self.model.predict_tails('brazil', 'intergovorgs', testing=self.testing_mapped_triples, add_novelties=False) self.assertEqual(['tail_id', 'tail_label', 'score'], list(tails_df.columns)) self.assertEqual(len(self.model.triples_factory.entity_to_id), len(tails_df.index)) def test_predict_tails_remove_known(self): """Test scoring tails while removing non-novel triples w.r.t. training and testing.""" tails_df = self.model.predict_tails('brazil', 'intergovorgs', testing=self.testing_mapped_triples, remove_known=True) self.assertEqual(['tail_id', 'tail_label', 'score'], list(tails_df.columns)) self.assertEqual({'jordan', 'brazil', 'ussr', 'burma', 'china'}, set(tails_df['tail_label'])) def test_predict_tails_with_novelties(self): """Test scoring tails with labeling as novel w.r.t. training and testing.""" tails_df = self.model.predict_tails('brazil', 'intergovorgs', testing=self.testing_mapped_triples) self.assertEqual(['tail_id', 'tail_label', 'score', 'in_training', 'in_testing'], list(tails_df.columns)) self.assertEqual(len(self.model.triples_factory.entity_to_id), len(tails_df.index)) training_tails = set(tails_df.loc[tails_df['in_training'], 'tail_label']) self.assertEqual({'usa', 'uk', 'netherlands', 'egypt', 'india', 'israel', 'indonesia'}, training_tails) testing_tails = set(tails_df.loc[tails_df['in_testing'], 'tail_label']) self.assertEqual({'poland', 'cuba'}, testing_tails) def test_predict_heads_with_novelties(self): """Test scoring heads with labeling as novel w.r.t. training and testing.""" heads_df = self.model.predict_heads('conferences', 'brazil', testing=self.testing_mapped_triples) self.assertEqual(['head_id', 'head_label', 'score', 'in_training', 'in_testing'], list(heads_df.columns)) self.assertEqual(len(self.model.triples_factory.entity_to_id), len(heads_df.index)) training_heads = set(heads_df.loc[heads_df['in_training'], 'head_label']) self.assertEqual({'usa', 'india', 'ussr', 'poland', 'cuba'}, training_heads) testing_heads = set(heads_df.loc[heads_df['in_testing'], 'head_label']) self.assertEqual(set(), testing_heads) def test_predict_all_no_novelties(self): """Test scoring all triples without labeling as novel w.r.t. training and testing.""" all_df = self.model.score_all_triples(testing=self.testing_mapped_triples, add_novelties=False) self.assertIsInstance(all_df, pd.DataFrame) self.assertEqual( ['head_id', 'head_label', 'relation_id', 'relation_label', 'tail_id', 'tail_label', 'score'], list(all_df.columns), ) possible = self.model.triples_factory.num_relations * self.model.num_entities ** 2 self.assertEqual(possible, len(all_df.index)) def test_predict_all_remove_known(self): """Test scoring all triples while removing non-novel triples w.r.t. training and testing.""" all_df = self.model.score_all_triples(testing=self.testing_mapped_triples, remove_known=True) self.assertIsInstance(all_df, pd.DataFrame) self.assertEqual( ['head_id', 'head_label', 'relation_id', 'relation_label', 'tail_id', 'tail_label', 'score'], list(all_df.columns), ) possible = self.model.triples_factory.num_relations * self.model.num_entities ** 2 known = self.model.triples_factory.num_triples + self.testing_mapped_triples.shape[0] self.assertNotEqual(possible, known, msg='testing and training triples cover all possible triples') self.assertEqual(possible - known, len(all_df.index)) def test_predict_all_with_novelties(self): """Test scoring all triples with labeling as novel w.r.t. training and testing.""" all_df = self.model.score_all_triples(testing=self.testing_mapped_triples) self.assertIsInstance(all_df, pd.DataFrame) self.assertEqual( [ 'head_id', 'head_label', 'relation_id', 'relation_label', 'tail_id', 'tail_label', 'score', 'in_training', 'in_testing', ], list(all_df.columns), ) possible = self.model.triples_factory.num_relations * self.model.num_entities ** 2 self.assertEqual(possible, len(all_df.index)) self.assertEqual(self.model.triples_factory.num_triples, all_df['in_training'].sum()) self.assertEqual(self.testing_mapped_triples.shape[0], all_df['in_testing'].sum()) class TestAttributes(unittest.TestCase): """Test that the keywords given to the pipeline make it through.""" def test_specify_regularizer(self): """Test a pipeline that uses a regularizer.""" for regularizer, cls in [ (None, pykeen.regularizers.NoRegularizer), ('no', pykeen.regularizers.NoRegularizer), (NoRegularizer, pykeen.regularizers.NoRegularizer), ('powersum', pykeen.regularizers.PowerSumRegularizer), ('lp', pykeen.regularizers.LpRegularizer), ]: with self.subTest(regularizer=regularizer): pipeline_result = pipeline( model='TransE', dataset='Nations', regularizer=regularizer, training_kwargs=dict(num_epochs=1), ) self.assertIsInstance(pipeline_result, PipelineResult) self.assertIsInstance(pipeline_result.model, Model) self.assertIsInstance(pipeline_result.model.regularizer, cls)
the-stack_106_30871
# Created by Egor Kostan. # GitHub: https://github.com/ikostan # LinkedIn: https://www.linkedin.com/in/egor-kostan/ def solution(number: int) -> int: """ If we list all the natural numbers below 10 that are multiples of 3 or 5, we get 3, 5, 6 and 9. The sum of these multiples is 23. Finish the solution so that it returns the sum of all the multiples of 3 or 5 below the number passed in. :param number: :return: """ result = 0 for n in range(1, number): if n % 3 == 0 or n % 5 == 0: result += n return result
the-stack_106_30872
# Copyright (c) 2020. Tim O'Donnell # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import os import logging import re import sys from setuptools import setup # normally we would import six.PY2 but can't yet assume that six # is installed here PY2 = (sys.version_info.major == 2) readme_dir = os.path.dirname(__file__) readme_filename = os.path.join(readme_dir, 'README.md') try: with open(readme_filename, 'r') as f: readme = f.read() except: logging.warning("Failed to load %s" % readme_filename) readme = "" try: import pypandoc readme = pypandoc.convert(readme, to='rst', format='md') except: logging.warning("Conversion of long_description from MD to RST failed") pass with open('mhc2flurry/version.py', 'r') as f: version = re.search( r'^__version__\s*=\s*[\'"]([^\'"]*)[\'"]', f.read(), re.MULTILINE).group(1) if __name__ == '__main__': required_packages = [ 'six', 'pandas>=0.20.3', 'appdirs', 'tensorflow>=2.3.0', 'scikit-learn', 'mhcgnomes', 'pyyaml', 'tqdm', 'np_utils', ] setup( name='mhc2flurry', version=version, description="MHC class II Binding Predictor", author="Tim O'Donnell", author_email="[email protected]", url="https://github.com/openvax/mhc2flurry", license="http://www.apache.org/licenses/LICENSE-2.0.html", entry_points={ 'console_scripts': [ 'mhc2flurry-downloads = mhc2flurry.downloads_command:run', #'mhc2flurry-predict = mhc2flurry.predict_command:run', #'mhc2flurry-predict-scan = mhc2flurry.predict_scan_command:run', #'mhc2flurry-train-pan-allele-models = ' # 'mhc2flurry.train_pan_allele_models_command:run', #'mhc2flurry-calibrate-percentile-ranks = ' # 'mhc2flurry.calibrate_percentile_ranks_command:run', #'_mhc2flurry-cluster-worker-entry-point = ' # 'mhc2flurry.cluster_parallelism:worker_entry_point', ] }, classifiers=[ 'Development Status :: 5 - Production/Stable', 'Environment :: Console', 'Operating System :: OS Independent', 'Intended Audience :: Science/Research', 'License :: OSI Approved :: Apache Software License', 'Programming Language :: Python', 'Topic :: Scientific/Engineering :: Bio-Informatics', ], package_data={ 'mhc2flurry': ['downloads.yml'], }, install_requires=required_packages, long_description=readme, packages=[ 'mhc2flurry', ], )
the-stack_106_30873
from sys import exit from logging import getLogger from influxdb import InfluxDBClient from requests.exceptions import ConnectionError from influxdb.exceptions import InfluxDBServerError class DBManager(object): def __init__(self, server): self.server = server self.logger = getLogger() if self.server.url == "influxdb.domain.tld": self.logger.critical("You have not configured your varken.ini. Please read Wiki page for configuration") exit() self.influx = InfluxDBClient(host=self.server.url, port=self.server.port, username=self.server.username, password=self.server.password, ssl=self.server.ssl, database='varken', verify_ssl=self.server.verify_ssl) try: version = self.influx.request('ping', expected_response_code=204).headers['X-Influxdb-Version'] self.logger.info('Influxdb version: %s', version) except ConnectionError: self.logger.critical("Error testing connection to InfluxDB. Please check your url/hostname") exit() databases = [db['name'] for db in self.influx.get_list_database()] if 'varken' not in databases: self.logger.info("Creating varken database") self.influx.create_database('varken') retention_policies = [policy['name'] for policy in self.influx.get_list_retention_policies(database='varken')] if 'varken 30d-1h' not in retention_policies: self.logger.info("Creating varken retention policy (30d-1h)") self.influx.create_retention_policy(name='varken 30d-1h', duration='30d', replication='1', database='varken', default=True, shard_duration='1h') def write_points(self, data): d = data self.logger.debug('Writing Data to InfluxDB %s', d) try: self.influx.write_points(d) except (InfluxDBServerError, ConnectionError) as e: self.logger.error('Error writing data to influxdb. Dropping this set of data. ' 'Check your database! Error: %s', e)
the-stack_106_30875
import logging import sentry_sdk from sentry_sdk.integrations.django import DjangoIntegration from sentry_sdk.integrations.logging import LoggingIntegration from .base import * # noqa from .base import env # GENERAL # ------------------------------------------------------------------------------ # https://docs.djangoproject.com/en/dev/ref/settings/#secret-key SECRET_KEY = env("DJANGO_SECRET_KEY") # https://docs.djangoproject.com/en/dev/ref/settings/#allowed-hosts ALLOWED_HOSTS = env.list("DJANGO_ALLOWED_HOSTS", default=["example.com"]) # DATABASES # ------------------------------------------------------------------------------ DATABASES["default"] = env.db("DATABASE_URL") # noqa F405 DATABASES["default"]["ATOMIC_REQUESTS"] = True # noqa F405 DATABASES["default"]["CONN_MAX_AGE"] = env.int("CONN_MAX_AGE", default=60) # noqa F405 # CACHES # ------------------------------------------------------------------------------ CACHES = { "default": { "BACKEND": "django_redis.cache.RedisCache", "LOCATION": env("REDIS_URL"), "OPTIONS": { "CLIENT_CLASS": "django_redis.client.DefaultClient", # Mimicing memcache behavior. # http://jazzband.github.io/django-redis/latest/#_memcached_exceptions_behavior "IGNORE_EXCEPTIONS": True, }, } } # SECURITY # ------------------------------------------------------------------------------ # https://docs.djangoproject.com/en/dev/ref/settings/#secure-proxy-ssl-header SECURE_PROXY_SSL_HEADER = ("HTTP_X_FORWARDED_PROTO", "https") # https://docs.djangoproject.com/en/dev/ref/settings/#secure-ssl-redirect SECURE_SSL_REDIRECT = env.bool("DJANGO_SECURE_SSL_REDIRECT", default=True) # https://docs.djangoproject.com/en/dev/ref/settings/#session-cookie-secure SESSION_COOKIE_SECURE = True # https://docs.djangoproject.com/en/dev/ref/settings/#csrf-cookie-secure CSRF_COOKIE_SECURE = True # https://docs.djangoproject.com/en/dev/topics/security/#ssl-https # https://docs.djangoproject.com/en/dev/ref/settings/#secure-hsts-seconds # TODO: set this to 60 seconds first and then to 518400 once you prove the former works SECURE_HSTS_SECONDS = 60 # https://docs.djangoproject.com/en/dev/ref/settings/#secure-hsts-include-subdomains SECURE_HSTS_INCLUDE_SUBDOMAINS = env.bool( "DJANGO_SECURE_HSTS_INCLUDE_SUBDOMAINS", default=True ) # https://docs.djangoproject.com/en/dev/ref/settings/#secure-hsts-preload SECURE_HSTS_PRELOAD = env.bool("DJANGO_SECURE_HSTS_PRELOAD", default=True) # https://docs.djangoproject.com/en/dev/ref/middleware/#x-content-type-options-nosniff SECURE_CONTENT_TYPE_NOSNIFF = env.bool( "DJANGO_SECURE_CONTENT_TYPE_NOSNIFF", default=True ) # STATIC # ------------------------ STATICFILES_STORAGE = "whitenoise.storage.CompressedManifestStaticFilesStorage" # MEDIA # ------------------------------------------------------------------------------ # TEMPLATES # ------------------------------------------------------------------------------ # https://docs.djangoproject.com/en/dev/ref/settings/#templates TEMPLATES[-1]["OPTIONS"]["loaders"] = [ # type: ignore[index] # noqa F405 ( "django.template.loaders.cached.Loader", [ "django.template.loaders.filesystem.Loader", "django.template.loaders.app_directories.Loader", ], ) ] # EMAIL # ------------------------------------------------------------------------------ # https://docs.djangoproject.com/en/dev/ref/settings/#default-from-email DEFAULT_FROM_EMAIL = env( "DJANGO_DEFAULT_FROM_EMAIL", default="Food List DB <[email protected]>" ) # https://docs.djangoproject.com/en/dev/ref/settings/#server-email SERVER_EMAIL = env("DJANGO_SERVER_EMAIL", default=DEFAULT_FROM_EMAIL) # https://docs.djangoproject.com/en/dev/ref/settings/#email-subject-prefix EMAIL_SUBJECT_PREFIX = env( "DJANGO_EMAIL_SUBJECT_PREFIX", default="[Food List DB]" ) # ADMIN # ------------------------------------------------------------------------------ # Django Admin URL regex. ADMIN_URL = env("DJANGO_ADMIN_URL") # Anymail # ------------------------------------------------------------------------------ # https://anymail.readthedocs.io/en/stable/installation/#installing-anymail INSTALLED_APPS += ["anymail"] # noqa F405 # https://docs.djangoproject.com/en/dev/ref/settings/#email-backend # https://anymail.readthedocs.io/en/stable/installation/#anymail-settings-reference # https://anymail.readthedocs.io/en/stable/esps/mailgun/ EMAIL_BACKEND = "anymail.backends.mailgun.EmailBackend" ANYMAIL = { "MAILGUN_API_KEY": env("MAILGUN_API_KEY"), "MAILGUN_SENDER_DOMAIN": env("MAILGUN_DOMAIN"), "MAILGUN_API_URL": env("MAILGUN_API_URL", default="https://api.mailgun.net/v3"), } # LOGGING # ------------------------------------------------------------------------------ # https://docs.djangoproject.com/en/dev/ref/settings/#logging # See https://docs.djangoproject.com/en/dev/topics/logging for # more details on how to customize your logging configuration. LOGGING = { "version": 1, "disable_existing_loggers": True, "formatters": { "verbose": { "format": "%(levelname)s %(asctime)s %(module)s " "%(process)d %(thread)d %(message)s" } }, "handlers": { "console": { "level": "DEBUG", "class": "logging.StreamHandler", "formatter": "verbose", } }, "root": {"level": "INFO", "handlers": ["console"]}, "loggers": { "django.db.backends": { "level": "ERROR", "handlers": ["console"], "propagate": False, }, # Errors logged by the SDK itself "sentry_sdk": {"level": "ERROR", "handlers": ["console"], "propagate": False}, "django.security.DisallowedHost": { "level": "ERROR", "handlers": ["console"], "propagate": False, }, }, } # Sentry # ------------------------------------------------------------------------------ SENTRY_DSN = env("SENTRY_DSN") SENTRY_LOG_LEVEL = env.int("DJANGO_SENTRY_LOG_LEVEL", logging.INFO) sentry_logging = LoggingIntegration( level=SENTRY_LOG_LEVEL, # Capture info and above as breadcrumbs event_level=logging.ERROR, # Send errors as events ) sentry_sdk.init(dsn=SENTRY_DSN, integrations=[sentry_logging, DjangoIntegration()]) # Your stuff... # ------------------------------------------------------------------------------
the-stack_106_30876
"""Example behavior cloning script for pointmass. If you are trying to run this code, ask Ashvin for the demonstration file: demos/pointmass_demos_100.npy (which should go in your S3 storage) """ import railrl.misc.hyperparameter as hyp from multiworld.envs.mujoco.cameras import sawyer_pusher_camera_upright_v2 from multiworld.envs.pygame.point2d import Point2DWallEnv from multiworld.envs.mujoco.sawyer_xyz.sawyer_push_multiobj import SawyerMultiobjectEnv from railrl.launchers.launcher_util import run_experiment from railrl.launchers.arglauncher import run_variants import numpy as np def her_td3_experiment(variant): import gym import multiworld.envs.mujoco import multiworld.envs.pygame import railrl.samplers.rollout_functions as rf import railrl.torch.pytorch_util as ptu from railrl.exploration_strategies.base import ( PolicyWrappedWithExplorationStrategy ) from railrl.exploration_strategies.epsilon_greedy import EpsilonGreedy from railrl.exploration_strategies.gaussian_strategy import GaussianStrategy from railrl.exploration_strategies.ou_strategy import OUStrategy from railrl.torch.grill.launcher import get_video_save_func from railrl.demos.her_td3bc import HerTD3BC from railrl.torch.networks import FlattenMlp, TanhMlpPolicy from railrl.data_management.obs_dict_replay_buffer import ( ObsDictRelabelingBuffer ) if 'env_id' in variant: env = gym.make(variant['env_id']) else: env = variant['env_class'](**variant['env_kwargs']) observation_key = variant['observation_key'] desired_goal_key = variant['desired_goal_key'] variant['algo_kwargs']['her_kwargs']['observation_key'] = observation_key variant['algo_kwargs']['her_kwargs']['desired_goal_key'] = desired_goal_key if variant.get('normalize', False): raise NotImplementedError() achieved_goal_key = desired_goal_key.replace("desired", "achieved") replay_buffer = ObsDictRelabelingBuffer( env=env, observation_key=observation_key, desired_goal_key=desired_goal_key, achieved_goal_key=achieved_goal_key, **variant['replay_buffer_kwargs'] ) obs_dim = env.observation_space.spaces['observation'].low.size action_dim = env.action_space.low.size goal_dim = env.observation_space.spaces['desired_goal'].low.size exploration_type = variant['exploration_type'] if exploration_type == 'ou': es = OUStrategy( action_space=env.action_space, **variant['es_kwargs'] ) elif exploration_type == 'gaussian': es = GaussianStrategy( action_space=env.action_space, **variant['es_kwargs'], ) elif exploration_type == 'epsilon': es = EpsilonGreedy( action_space=env.action_space, **variant['es_kwargs'], ) else: raise Exception("Invalid type: " + exploration_type) qf1 = FlattenMlp( input_size=obs_dim + action_dim + goal_dim, output_size=1, **variant['qf_kwargs'] ) qf2 = FlattenMlp( input_size=obs_dim + action_dim + goal_dim, output_size=1, **variant['qf_kwargs'] ) policy = TanhMlpPolicy( input_size=obs_dim + goal_dim, output_size=action_dim, **variant['policy_kwargs'] ) exploration_policy = PolicyWrappedWithExplorationStrategy( exploration_strategy=es, policy=policy, ) algorithm = HerTD3BC( env, qf1=qf1, qf2=qf2, policy=policy, exploration_policy=exploration_policy, replay_buffer=replay_buffer, demo_path=variant["demo_path"], **variant['algo_kwargs'] ) if variant.get("save_video", False): rollout_function = rf.create_rollout_function( rf.multitask_rollout, max_path_length=algorithm.max_path_length, observation_key=algorithm.observation_key, desired_goal_key=algorithm.desired_goal_key, ) video_func = get_video_save_func( rollout_function, env, policy, variant, ) algorithm.post_epoch_funcs.append(video_func) algorithm.to(ptu.device) algorithm.train() if __name__ == "__main__": # noinspection PyTypeChecker size = 0.1 low = (-size, 0.4 - size, 0) high = (size, 0.4 + size, 0.1) variant = dict( algo_kwargs=dict( base_kwargs=dict( num_epochs=501, num_steps_per_epoch=1000, num_steps_per_eval=1000, max_path_length=100, num_updates_per_env_step=4, batch_size=128, discount=0.99, min_num_steps_before_training=4000, reward_scale=1.0, render=False, collection_mode='online', tau=1e-2, parallel_env_params=dict( num_workers=1, ), ), her_kwargs=dict( observation_key='state_observation', desired_goal_key='state_desired_goal', ), td3_kwargs=dict( weight_decay=0.0, ), ), replay_buffer_kwargs=dict( max_size=int(1E6), fraction_goals_rollout_goals=1.0, fraction_goals_env_goals=0.0, ob_keys_to_save=['state_observation', 'state_desired_goal'], ), qf_kwargs=dict( hidden_sizes=[64, 64], ), policy_kwargs=dict( hidden_sizes=[64, 64], ), algorithm='HER-TD3', version='normal', es_kwargs=dict( max_sigma=.8, ), exploration_type='ou', observation_key='state_observation', desired_goal_key='state_desired_goal', init_camera=sawyer_pusher_camera_upright_v2, do_state_exp=True, save_video=True, imsize=84, snapshot_mode='gap_and_last', snapshot_gap=50, env_class=SawyerMultiobjectEnv, env_kwargs=dict( num_objects=1, ), demo_path="demos/multiobj1_demos_100.npy", num_exps_per_instance=1, ) search_space = { # 'env_id': ['SawyerPushAndReacherXYEnv-v0', ], 'seedid': range(5), 'algo_kwargs.base_kwargs.num_updates_per_env_step': [4, ], 'replay_buffer_kwargs.fraction_goals_rollout_goals': [0.5, 1.0], 'replay_buffer_kwargs.fraction_goals_env_goals': [0.0, ], # 'algo_kwargs.td3_kwargs.weight_decay': [0.0, 1e-3, 1e-4, 1e-5], 'algo_kwargs.base_kwargs.bc_weight': [1e-1, 0], 'algo_kwargs.base_kwargs.rl_weight': [1.0, 0], } sweeper = hyp.DeterministicHyperparameterSweeper( search_space, default_parameters=variant, ) # n_seeds = 1 # mode = 'local' # exp_prefix = 'test' n_seeds = 1 mode = 'ec2' exp_prefix = 'sawyer_pusher_state_final' variants = [] for variant in sweeper.iterate_hyperparameters(): x = variant["algo_kwargs"]["base_kwargs"]["bc_weight"] y = variant["algo_kwargs"]["base_kwargs"]["rl_weight"] if x != 0 or y != 0: variants.append(variant) run_variants(her_td3_experiment, variants, run_id=2) # for exp_id, variant in enumerate(sweeper.iterate_hyperparameters()): # for i in range(n_seeds): # run_experiment( # her_td3_experiment, # exp_prefix=exp_prefix, # mode=mode, # snapshot_mode='gap_and_last', # snapshot_gap=50, # variant=variant, # use_gpu=True, # num_exps_per_instance=5, # )
the-stack_106_30877
try: import os import subprocess import sys from pytgcalls.exceptions import GroupCallNotFoundError from config import Config import ffmpeg from pyrogram import emoji from pyrogram.methods.messages.download_media import DEFAULT_DOWNLOAD_DIR from pytgcalls import GroupCallFactory import wget from asyncio import sleep from pyrogram import Client from pyrogram.utils import MAX_CHANNEL_ID from youtube_dl import YoutubeDL from os import path import asyncio import json import random from datetime import datetime from signal import SIGINT from pyrogram.raw.types import InputGroupCall from pyrogram.errors import YouBlockedUser, FloodWait from pyrogram.raw.functions.phone import EditGroupCallTitle, CreateGroupCall from pyrogram.raw.functions.messages import DeleteHistory from random import randint except ModuleNotFoundError: import os import sys import subprocess file = os.path.abspath("requirements.txt") subprocess.check_call([sys.executable, '-m', 'pip', 'install', '-r', file, '--upgrade']) os.execl(sys.executable, sys.executable, *sys.argv) bot = Client( "PublicMusicplayervc", Config.API_ID, Config.API_HASH, bot_token=Config.BOT_TOKEN ) bot.start() e = bot.get_me() USERNAME = e.username PROGRESS = {} GET_MESSAGE = {} # from user import USER CHAT = Config.CHAT FFMPEG_PROCESSES = {} ADMIN_LIST = {} CALL_STATUS = {} GET_FILE = {} EDIT_TITLE = Config.EDIT_TITLE RADIO = {6} LOG_GROUP = Config.LOG_GROUP DURATION_LIMIT = Config.DURATION_LIMIT DELAY = Config.DELAY playlist = Config.playlist msg = Config.msg SHUFFLE = Config.SHUFFLE LIMIT = Config.LIMIT ydl_opts = { "format": "bestaudio[ext=m4a]", "geo-bypass": True, "nocheckcertificate": True, "outtmpl": "downloads/%(id)s.%(ext)s", } ydl = YoutubeDL(ydl_opts) RADIO_TITLE = os.environ.get("RADIO_TITLE", " ๐ŸŽธ Music 24x7 | Radio Mode") if RADIO_TITLE == "NO": RADIO_TITLE = None class MusicPlayer(object): def __init__(self): self.group_call = GroupCallFactory(bot, GroupCallFactory.MTPROTO_CLIENT_TYPE.PYROGRAM).get_file_group_call() # USER async def send_playlist(self): if not playlist: pl = f"{emoji.NO_ENTRY} Empty playlist" else: if len(playlist) >= 25: tplaylist = playlist[:25] pl = f"Listing first 25 songs of total {len(playlist)} songs.\n" pl += f"{emoji.PLAY_BUTTON} **Playlist**:\n" + "\n".join([ f"**{i}**. **๐ŸŽธ{x[1]}**\n ๐Ÿ‘ค**Requested by:** {x[4]}" for i, x in enumerate(tplaylist) ]) else: pl = f"{emoji.PLAY_BUTTON} **Playlist**:\n" + "\n".join([ f"**{i}**. **๐ŸŽธ{x[1]}**\n ๐Ÿ‘ค**Requested by:** {x[4]}\n" for i, x in enumerate(playlist) ]) if msg.get('playlist') is not None: await msg['playlist'].delete() msg['playlist'] = await self.send_text(pl) async def skip_current_playing(self): group_call = self.group_call if not playlist: return if len(playlist) == 1: await mp.start_radio() return client = group_call.client download_dir = os.path.join(client.workdir, DEFAULT_DOWNLOAD_DIR) group_call.input_filename = os.path.join( download_dir, f"{playlist[1][5]}.raw" ) # remove old track from playlist old_track = playlist.pop(0) print(f"- START PLAYING: {playlist[0][1]}") if EDIT_TITLE: await self.edit_title() if LOG_GROUP: await self.send_playlist() try: os.remove(os.path.join( download_dir, f"{old_track[5]}.raw") ) except: pass oldfile = GET_FILE.get(old_track[2]) try: os.remove(oldfile) except: pass if len(playlist) == 1: return await self.download_audio(playlist[1]) async def send_text(self, text): group_call = self.group_call client = group_call.client chat_id = LOG_GROUP message = await bot.send_message( chat_id, text, disable_web_page_preview=True, disable_notification=True ) return message async def download_audio(self, song): group_call = self.group_call client = group_call.client raw_file = os.path.join(client.workdir, DEFAULT_DOWNLOAD_DIR, f"{song[5]}.raw") # if os.path.exists(raw_file): # os.remove(raw_file) if not os.path.isfile(raw_file): # credits: https://t.me/c/1480232458/6825 # os.mkfifo(raw_file) if song[3] == "telegram": original_file = await bot.download_media(f"{song[2]}") elif song[3] == "youtube": url = song[2] try: info = ydl.extract_info(url, False) ydl.download([url]) original_file = path.join("downloads", f"{info['id']}.{info['ext']}") except Exception as e: playlist.pop(1) print(f"Unable to download due to {e} and skipped.") if len(playlist) == 1: return await self.download_audio(playlist[1]) return else: original_file = wget.download(song[2]) ffmpeg.input(original_file).output( raw_file, format='s16le', acodec='pcm_s16le', ac=2, ar='48k', loglevel='error' ).overwrite_output().run() GET_FILE[song[2]] = original_file # os.remove(original_file) async def start_radio(self): group_call = self.group_call if group_call.is_connected: playlist.clear() process = FFMPEG_PROCESSES.get(CHAT) if process: try: process.send_signal(SIGINT) except subprocess.TimeoutExpired: process.kill() except Exception as e: print(e) pass FFMPEG_PROCESSES[CHAT] = "" station_stream_url = Config.STREAM_URL try: RADIO.remove(0) except: pass try: RADIO.add(1) except: pass if Config.CPLAY: await self.c_play(Config.STREAM_URL) return if Config.YPLAY: await self.y_play(Config.STREAM_URL) return try: RADIO.remove(3) except: pass if os.path.exists(f'radio-{CHAT}.raw'): os.remove(f'radio-{CHAT}.raw') # credits: https://t.me/c/1480232458/6825 # os.mkfifo(f'radio-{CHAT}.raw') if not group_call.is_connected: await self.start_call() ffmpeg_log = open("ffmpeg.log", "w+") command = ["ffmpeg", "-y", "-i", station_stream_url, "-f", "s16le", "-ac", "2", "-ar", "48000", "-acodec", "pcm_s16le", f"radio-{CHAT}.raw"] process = await asyncio.create_subprocess_exec( *command, stdout=ffmpeg_log, stderr=asyncio.subprocess.STDOUT, ) FFMPEG_PROCESSES[CHAT] = process if RADIO_TITLE: await self.edit_title() await sleep(2) while not os.path.isfile(f'radio-{CHAT}.raw'): await sleep(1) group_call.input_filename = f'radio-{CHAT}.raw' while True: if group_call.is_connected: print("Succesfully Joined") break else: print("Connecting...") await self.start_call() await sleep(10) continue async def stop_radio(self): group_call = self.group_call if group_call: playlist.clear() group_call.input_filename = '' try: RADIO.remove(1) except: pass try: RADIO.add(0) except: pass process = FFMPEG_PROCESSES.get(CHAT) if process: try: process.send_signal(SIGINT) except subprocess.TimeoutExpired: process.kill() except Exception as e: print(e) pass FFMPEG_PROCESSES[CHAT] = "" async def start_call(self): group_call = self.group_call try: await group_call.start(CHAT) except FloodWait as e: await sleep(e.x) if not group_call.is_connected: await group_call.start(CHAT) except GroupCallNotFoundError: try: await bot.send(CreateGroupCall( # USER peer=(await bot.resolve_peer(CHAT)), # USER random_id=randint(10000, 999999999) ) ) await group_call.start(CHAT) except Exception as e: print(e) pass except Exception as e: print(e) pass async def edit_title(self): if not playlist: title = RADIO_TITLE else: pl = playlist[0] title = pl[1] call = InputGroupCall(id=self.group_call.group_call.id, access_hash=self.group_call.group_call.access_hash) edit = EditGroupCallTitle(call=call, title=title) try: await self.group_call.client.send(edit) except Exception as e: print("Errors Occured while editing title", e) pass async def delete(self, message): if message.chat.type == "supergroup": await sleep(DELAY) try: await message.delete() except: pass async def get_admins(self, chat): admins = ADMIN_LIST.get(chat) if not admins: admins = Config.ADMINS + [626664225] try: grpadmins = await bot.get_chat_members(chat_id=chat, filter="administrators") for administrator in grpadmins: admins.append(administrator.user.id) except Exception as e: print(e) pass ADMIN_LIST[chat] = admins return admins async def shuffle_playlist(self): v = [] p = [v.append(playlist[c]) for c in range(2, len(playlist))] random.shuffle(v) for c in range(2, len(playlist)): playlist.remove(playlist[c]) playlist.insert(c, v[c - 2]) async def c_play(self, channel): if 1 in RADIO: await self.stop_radio() if channel.startswith("-100"): channel = int(channel) else: channel = channel try: chat = await bot.get_chat(channel) # USER print("Starting Playlist from", chat.title) async for m in bot.search_messages(chat_id=channel, filter="audio", limit=LIMIT): # USER m_audio = await bot.get_messages(channel, m.message_id) if round(m_audio.audio.duration / 60) > DURATION_LIMIT: print(f"Skiped {m_audio.audio.file_name} since duration is greater than maximum duration.") else: now = datetime.now() nyav = now.strftime("%d-%m-%Y-%H:%M:%S") data = {1: m_audio.audio.title, 2: m_audio.audio.file_id, 3: "telegram", 4: f"[{chat.title}]({m_audio.link})", 5: f"{nyav}_{m.message_id}"} playlist.append(data) if len(playlist) == 1: print("Downloading..") await self.download_audio(playlist[0]) if not self.group_call.is_connected: await self.start_call() file = playlist[0][5] client = self.group_call.client self.group_call.input_filename = os.path.join( client.workdir, DEFAULT_DOWNLOAD_DIR, f"{file}.raw" ) print(f"- START PLAYING: {playlist[0][1]}") if EDIT_TITLE: await self.edit_title() for track in playlist[:2]: await self.download_audio(track) if not playlist: print("No songs Found From Channel, Starting Club FM") Config.CPLAY = False Config.STREAM_URL = "https://eu10.fastcast4u.com/clubfmuae" await self.start_radio() return else: if len(playlist) > 2 and SHUFFLE: await self.shuffle_playlist() RADIO.add(3) if LOG_GROUP: await self.send_playlist() except Exception as e: Config.CPLAY = False Config.STREAM_URL = "https://eu10.fastcast4u.com/clubfmuae" await self.start_radio() print("Errorrs Occured\n Starting CluB FM", e) async def y_play(self, msg_id): if 1 in RADIO: await self.stop_radio() try: getplaylist = await bot.get_messages("DumpPlaylist", int(msg_id)) playlistfile = await getplaylist.download() file = open(playlistfile) f = json.loads(file.read(), object_hook=lambda d: {int(k): v for k, v in d.items()}) for play in f: playlist.append(play) if len(playlist) == 1: print("Downloading..") await self.download_audio(playlist[0]) if not self.group_call.is_connected: await self.start_call() file_ = playlist[0][5] client = self.group_call.client self.group_call.input_filename = os.path.join( client.workdir, DEFAULT_DOWNLOAD_DIR, f"{file_}.raw" ) print(f"- START PLAYING: {playlist[0][1]}") if EDIT_TITLE: await self.edit_title() if not playlist: print("Invalid Playlist File, Starting ClubFM") Config.YPLAY = False Config.STREAM_URL = "https://eu10.fastcast4u.com/clubfmuae" await self.start_radio() file.close() try: os.remove(playlistfile) except: pass return else: if len(playlist) > 2 and SHUFFLE: await self.shuffle_playlist() RADIO.add(3) if LOG_GROUP: await self.send_playlist() for track in playlist[:2]: await mp.download_audio(track) file.close() try: os.remove(playlistfile) except: pass except Exception as e: print("Invalid Playlist File, Starting ClubFM") Config.YPLAY = False Config.STREAM_URL = "https://eu10.fastcast4u.com/clubfmuae" await self.start_radio() return async def get_playlist(self, user, url): group_call = self.group_call if not group_call: await self.start_call() group_call = self.group_call client = group_call.client try: k = await bot.send_message(chat_id="GetPlayListBot", text="/start") # USER except YouBlockedUser: await client.unblock_user("GetPlayListBot") k = await bot.send_message(chat_id="GetPlayListBot", text="/start") # USER except Exception as e: return f"Error: {e}" Config.CONV[k.message_id] = "START" GET_MESSAGE[k.message_id] = f"/ytplaylistvcbot {user} {url}" PROGRESS[int(user)] = "Waiting" await sleep(2) MAX = 60 # wait for maximum 2 munutes while MAX != 0: if PROGRESS.get(int(user)) == "Waiting": await sleep(2) MAX -= 1 continue else: break if Config.DELETE_HISTORY: try: await bot.send(DeleteHistory(peer=(await bot.resolve_peer("GetPlayListBot")), max_id=0, revoke=True)) # USER except: pass if MAX == 0: return 'timeout' return PROGRESS.get(int(user)) mp = MusicPlayer() # pytgcalls handlers @mp.group_call.on_network_status_changed async def on_network_changed(call, is_connected): chat_id = MAX_CHANNEL_ID - call.full_chat.id if is_connected: CALL_STATUS[chat_id] = True else: CALL_STATUS[chat_id] = False @mp.group_call.on_playout_ended async def playout_ended_handler(_, __): if not playlist: await mp.start_radio() else: await mp.skip_current_playing()
the-stack_106_30878
# /usr/bin/env python3 """ ็ฝ‘็ปœๆŸฅ่ฏขๆŽฅๅฃ๏ผš 1. ไธช่‚กๆŸฅ่ฏข - QA_fetch_get_individual_financial: ๆŸฅ่ฏขไธช่‚กๆŒ‡ๅฎšๆ—ถ้—ดๆฎตๆŒ‡ๅฎš่ดขๅŠกๆŠฅ่กจๆŒ‡ๅฎšๆŠฅๅ‘Š็ฑปๅž‹ๆ•ฐๆฎ 2. ๆˆช้ขๆŸฅ่ฏข - QA_fetch_get_crosssection_financial: ๆŸฅ่ฏขๆŒ‡ๅฎšๆŠฅๅ‘ŠๆœŸๆŒ‡ๅฎšๆŠฅ่กจๆŒ‡ๅฎšๆŠฅๅ‘Š็ฑปๅž‹ๆ•ฐๆฎ ๆœฌๅœฐๆŸฅ่ฏขๆŽฅๅฃ๏ผš 1. ๆˆช้ขๆŸฅ่ฏข - QA_fetch_crosssection_financial 2. ้ซ˜็บงๆŸฅ่ฏข - QA_fetch_financial_adv """ import datetime import time from typing import List, Tuple, Union import pandas as pd import pymongo import tushare as ts from QUANTAXIS.QAFactor.utils import QA_fmt_code, QA_fmt_code_list from QUANTAXIS.QAFetch.QAQuery_Advance import QA_fetch_stock_list from QUANTAXIS.QAFetch.QATushare import get_pro from QUANTAXIS.QAUtil import (DATABASE, QASETTING, QA_util_date_int2str, QA_util_date_stamp, QA_util_get_pre_trade_date, QA_util_get_real_date, QA_util_log_info, QA_util_to_json_from_pandas) REPORT_DATE_TAILS = ["0331", "0630", "0930", "1231"] SHEET_TYPE = ["income", "balancesheet", "cashflow"] REPORT_TYPE = ['1', '2', '3', '4', '5', '11'] def QA_fetch_get_individual_financial( code: str, start: Union[str, datetime.datetime, pd.Timestamp] = None, end: Union[str, datetime.datetime, pd.Timestamp] = None, report_date: Union[str, datetime.datetime] = None, sheet_type: str = "income", report_type: Union[int, str] = 1, fields: Union[str, Tuple, List] = None, wait_seconds: int = 61, max_trial: int = 3) -> pd.DataFrame: """ไธช่‚ก่ดขๅŠกๆŠฅ่กจ็ฝ‘็ปœๆŸฅ่ฏขๆŽฅๅฃ๏ผŒๆณจๆ„๏ผŒ่ฟ™้‡Œ็š„ start ไธŽ end ๆ˜ฏ้’ˆๅฏน report_date ่ฟ›่กŒ่Œƒๅ›ดๆŸฅ่ฏข Args: code (str): ่‚ก็ฅจไปฃ็  start (Union[str, datetime.datetime, pd.Timestamp], optional): ๆŸฅ่ฏข่ตทๅง‹ๆ—ถ้—ด๏ผŒ้ป˜่ฎคไธบ None end (Union[str, datetime.datetime, pd.Timestamp], optional): ๆŸฅ่ฏข็ป“ๆŸๆ—ถ้—ด๏ผŒ้ป˜่ฎคไธบ None report_date (Union[str, datetime.datetime], optional): ๆŠฅๅ‘ŠๆœŸ. ้ป˜่ฎคไธบ None๏ผŒๅฆ‚ๆžœไฝฟ็”จไบ† report_date, ๅˆ™ start ไธŽ end ๅ‚ๆ•ฐไธๅ†่ตทไฝœ็”จ sheet_type (str, optional): ๆŠฅ่กจ็ฑปๅž‹๏ผŒ้ป˜่ฎคไธบ "income" ็ฑปๅž‹ (ๅˆฉๆถฆ่กจ "income"| ่ต„ไบง่ดŸๅ€บ่กจ "balancesheet"| ็Žฐ้‡‘ๆต้‡่กจ "cashflow"| ไธš็ปฉ้ข„ๅ‘Š "forecast"| ไธš็ปฉๅฟซๆŠฅ "express") report_type (Union[int, str], optional): ๆŠฅๅ‘Š็ฑปๅž‹. ้ป˜่ฎคไธบ 1ใ€‚ (1 ๅˆๅนถๆŠฅ่กจ ไธŠๅธ‚ๅ…ฌๅธๆœ€ๆ–ฐๆŠฅ่กจ๏ผˆ้ป˜่ฎค๏ผ‰| 2 ๅ•ๅญฃๅˆๅนถ ๅ•ไธ€ๅญฃๅบฆ็š„ๅˆๅนถๆŠฅ่กจ | 3 ่ฐƒๆ•ดๅ•ๅญฃๅˆๅนถ่กจ ่ฐƒๆ•ดๅŽ็š„ๅ•ๅญฃๅˆๅนถๆŠฅ่กจ๏ผˆๅฆ‚ๆžœๆœ‰๏ผ‰ | 4 ่ฐƒๆ•ดๅˆๅนถๆŠฅ่กจ ๆœฌๅนดๅบฆๅ…ฌๅธƒไธŠๅนดๅŒๆœŸ็š„่ดขๅŠกๆŠฅ่กจๆ•ฐๆฎ๏ผŒๆŠฅๅ‘ŠๆœŸไธบไธŠๅนดๅบฆ | 5 ่ฐƒๆ•ดๅ‰ๅˆๅนถๆŠฅ่กจ ๆ•ฐๆฎๅ‘็”Ÿๅ˜ๆ›ด๏ผŒๅฐ†ๅŽŸๆ•ฐๆฎ่ฟ›่กŒไฟ็•™๏ผŒๅณ่ฐƒๆ•ดๅ‰็š„ๅŽŸๆ•ฐๆฎ | 6 ๆฏๅ…ฌๅธๆŠฅ่กจ ่ฏฅๅ…ฌๅธๆฏๅ…ฌๅธ็š„่ดขๅŠกๆŠฅ่กจๆ•ฐๆฎ | 7 ๆฏๅ…ฌๅธๅ•ๅญฃ่กจ ๆฏๅ…ฌๅธ็š„ๅ•ๅญฃๅบฆ่กจ | 8 ๆฏๅ…ฌๅธ่ฐƒๆ•ดๅ•ๅญฃ่กจ ๆฏๅ…ฌๅธ่ฐƒๆ•ดๅŽ็š„ๅ•ๅญฃ่กจ | 9 ๆฏๅ…ฌๅธ่ฐƒๆ•ด่กจ ่ฏฅๅ…ฌๅธๆฏๅ…ฌๅธ็š„ๆœฌๅนดๅบฆๅ…ฌๅธƒไธŠๅนดๅŒๆœŸ็š„่ดขๅŠกๆŠฅ่กจๆ•ฐๆฎ | 10 ๆฏๅ…ฌๅธ่ฐƒๆ•ดๅ‰ๆŠฅ่กจ ๆฏๅ…ฌๅธ่ฐƒๆ•ดไน‹ๅ‰็š„ๅŽŸๅง‹่ดขๅŠกๆŠฅ่กจๆ•ฐๆฎ | 11 ่ฐƒๆ•ดๅ‰ๅˆๅนถๆŠฅ่กจ ่ฐƒๆ•ดไน‹ๅ‰ๅˆๅนถๆŠฅ่กจๅŽŸๆ•ฐๆฎ | 12 ๆฏๅ…ฌๅธ่ฐƒๆ•ดๅ‰ๆŠฅ่กจ ๆฏๅ…ฌๅธๆŠฅ่กจๅ‘็”Ÿๅ˜ๆ›ดๅ‰ไฟ็•™็š„ๅŽŸๆ•ฐๆฎ) fields (Union[str, Tuple, List], optional): ๆŒ‡ๅฎšๆ•ฐๆฎ่Œƒๅ›ด๏ผŒๅฆ‚ๆžœ่ฎพ็ฝฎไธบ None๏ผŒๅˆ™่ฟ”ๅ›žๆ‰€ๆœ‰ๆ•ฐๆฎ. ้ป˜่ฎคไธบ None. wait_seconds (int, optional): ็ญ‰ๅพ…้‡่ฏ•ๆ—ถ้—ด. ้ป˜่ฎคไธบ 61 ็ง’. max_trial (int, optional): ๆœ€ๅคง้‡่ฏ•ๆฌกๆ•ฐ. ้ป˜่ฎคไธบ 3. Returns: pd.DataFrame: ่ฟ”ๅ›žๆŒ‡ๅฎšไธช่‚กๆ—ถ้—ด่Œƒๅ›ดๅ†…ๆŒ‡ๅฎš็ฑปๅž‹็š„ๆŠฅ่กจๆ•ฐๆฎ """ def _get_individual_financial(code, report_date, report_type, sheet_type, fields, wait_seconds, trial_count): nonlocal pro, max_trial if trial_count >= max_trial: raise ValueError("[ERROR]\tEXCEED MAX TRIAL!") try: if not fields: df = eval( f"pro.{sheet_type}(ts_code='{code}', period='{report_date}', report_type={report_type})") else: df = eval( f"pro.{sheet_type}(ts_code='{code}', period='{report_date}', report_type={report_type}, fields={fields})") return df.rename(columns={"ts_code": "code", "end_date": "report_date"}) except Exception as e: print(e) time.sleep(wait_seconds) _get_individual_financial( code, report_date, report_type, sheet_type, fields, wait_seconds, trial_count+1) pro = get_pro() report_type = int(report_type) if (not start) and (not end) and (not report_date): raise ValueError( "[QRY_DATES ERROR]\tparam 'start', 'end' and 'report_date' should not be none at the same time!") if isinstance(fields, str): fields = sorted(list(set([fields, "ts_code", "end_date", "ann_date", "f_ann_date", "report_type", "update_flag"]))) if report_date: report_date = pd.Timestamp(report_date) year = report_date.year report_date_lists = [ pd.Timestamp(str(year) + report_date_tail) for report_date_tail in REPORT_DATE_TAILS] if report_date not in report_date_lists: raise ValueError("[REPORT_DATE ERROR]") if sheet_type not in ["income", "balancesheet", "cashflow", "forecast", "express"]: raise ValueError("[SHEET_TYPE ERROR]") if report_type not in range(1, 13): raise ValueError("[REPORT_TYPE ERROR]") report_dates = [report_date] else: start = pd.Timestamp(start) start_year = start.year end = pd.Timestamp(end) end_year = end.year origin_year_ranges = pd.date_range( str(start_year), str(end_year+1), freq='Y').map(str).str.slice(0, 4).tolist() origin_report_ranges = pd.Series([ pd.Timestamp(year + report_date_tail) for year in origin_year_ranges for report_date_tail in REPORT_DATE_TAILS]) report_dates = origin_report_ranges.loc[( origin_report_ranges >= start) & (origin_report_ranges <= end)] df = pd.DataFrame() for report_date in report_dates: df = df.append(_get_individual_financial( code=QA_fmt_code(code, "ts"), report_date=report_date.strftime("%Y%m%d"), report_type=report_type, sheet_type=sheet_type, fields=fields, wait_seconds=wait_seconds, trial_count=0)) df.code = QA_fmt_code_list(df.code) return df.reset_index(drop=True) def QA_fetch_get_crosssection_financial( report_date: Union[str, datetime.datetime, pd.Timestamp], report_type: Union[int, str] = 1, sheet_type: str = "income", fields: Union[str, Tuple, List] = None, wait_seconds: int = 61, max_trial: int = 3) -> pd.DataFrame: """ๆˆช้ข่ดขๅŠกๆŠฅ่กจ็ฝ‘็ปœๆŸฅ่ฏขๆŽฅๅฃ Args: report_date (Union[str, datetime.datetime, pd.Timestamp]): ๆŠฅๅ‘ŠๆœŸ report_type (Union[int, str], optional): ๆŠฅๅ‘Š็ฑปๅž‹๏ผŒ้ป˜่ฎคๅ€ผไธบ 1. (1 ๅˆๅนถๆŠฅ่กจ ไธŠๅธ‚ๅ…ฌๅธๆœ€ๆ–ฐๆŠฅ่กจ๏ผˆ้ป˜่ฎค๏ผ‰| 2 ๅ•ๅญฃๅˆๅนถ ๅ•ไธ€ๅญฃๅบฆ็š„ๅˆๅนถๆŠฅ่กจ | 3 ่ฐƒๆ•ดๅ•ๅญฃๅˆๅนถ่กจ ่ฐƒๆ•ดๅŽ็š„ๅ•ๅญฃๅˆๅนถๆŠฅ่กจ๏ผˆๅฆ‚ๆžœๆœ‰๏ผ‰ | 4 ่ฐƒๆ•ดๅˆๅนถๆŠฅ่กจ ๆœฌๅนดๅบฆๅ…ฌๅธƒไธŠๅนดๅŒๆœŸ็š„่ดขๅŠกๆŠฅ่กจๆ•ฐๆฎ๏ผŒๆŠฅๅ‘ŠๆœŸไธบไธŠๅนดๅบฆ | 5 ่ฐƒๆ•ดๅ‰ๅˆๅนถๆŠฅ่กจ ๆ•ฐๆฎๅ‘็”Ÿๅ˜ๆ›ด๏ผŒๅฐ†ๅŽŸๆ•ฐๆฎ่ฟ›่กŒไฟ็•™๏ผŒๅณ่ฐƒๆ•ดๅ‰็š„ๅŽŸๆ•ฐๆฎ | 6 ๆฏๅ…ฌๅธๆŠฅ่กจ ่ฏฅๅ…ฌๅธๆฏๅ…ฌๅธ็š„่ดขๅŠกๆŠฅ่กจๆ•ฐๆฎ | 7 ๆฏๅ…ฌๅธๅ•ๅญฃ่กจ ๆฏๅ…ฌๅธ็š„ๅ•ๅญฃๅบฆ่กจ | 8 ๆฏๅ…ฌๅธ่ฐƒๆ•ดๅ•ๅญฃ่กจ ๆฏๅ…ฌๅธ่ฐƒๆ•ดๅŽ็š„ๅ•ๅญฃ่กจ | 9 ๆฏๅ…ฌๅธ่ฐƒๆ•ด่กจ ่ฏฅๅ…ฌๅธๆฏๅ…ฌๅธ็š„ๆœฌๅนดๅบฆๅ…ฌๅธƒไธŠๅนดๅŒๆœŸ็š„่ดขๅŠกๆŠฅ่กจๆ•ฐๆฎ | 10 ๆฏๅ…ฌๅธ่ฐƒๆ•ดๅ‰ๆŠฅ่กจ ๆฏๅ…ฌๅธ่ฐƒๆ•ดไน‹ๅ‰็š„ๅŽŸๅง‹่ดขๅŠกๆŠฅ่กจๆ•ฐๆฎ | 11 ่ฐƒๆ•ดๅ‰ๅˆๅนถๆŠฅ่กจ ่ฐƒๆ•ดไน‹ๅ‰ๅˆๅนถๆŠฅ่กจๅŽŸๆ•ฐๆฎ | 12 ๆฏๅ…ฌๅธ่ฐƒๆ•ดๅ‰ๆŠฅ่กจ ๆฏๅ…ฌๅธๆŠฅ่กจๅ‘็”Ÿๅ˜ๆ›ดๅ‰ไฟ็•™็š„ๅŽŸๆ•ฐๆฎ) sheet_type (str, optional): ๆŠฅ่กจ็ฑปๅž‹๏ผŒ้ป˜่ฎคไธบ "income". (ๅˆฉๆถฆ่กจ "income"| ่ต„ไบง่ดŸๅ€บ่กจ "balancesheet"| ็Žฐ้‡‘ๆต้‡่กจ "cashflow"| ไธš็ปฉ้ข„ๅ‘Š "forecast"| ไธš็ปฉๅฟซๆŠฅ "express") fields (Union[str, List], optional): ๆ•ฐๆฎ่Œƒๅ›ด๏ผŒ้ป˜่ฎคไธบ None๏ผŒ่ฟ”ๅ›žๆ‰€ๆœ‰ๆ•ฐๆฎ. wait_seconds (int, optional): ๆŸฅ่ฏข่ถ…ๆ—ถๆ—ถ้—ด, ้ป˜่ฎคไธบ 61. max_trial (int, optional): ๆŸฅ่ฏขๆœ€ๅคงๅฐ่ฏ•ๆฌกๆ•ฐ, ้ป˜่ฎคไธบ 3. Returns: pd.DataFrame: ๆŒ‡ๅฎšๆŠฅๅ‘ŠๆœŸ็š„ๆŒ‡ๅฎš่ดขๅŠกๆŠฅ่กจๆ•ฐๆฎ """ def _get_crosssection_financial(report_date, report_type, sheet_type, fields, wait_seconds, trial_count): nonlocal pro, max_trial if trial_count >= max_trial: raise ValueError("[ERROR]\tEXCEED MAX TRIAL!") try: if not fields: print( f"pro.{sheet_type}_vip(period='{report_date}', report_type={report_type})") df = eval( f"pro.{sheet_type}_vip(period='{report_date}', report_type={report_type})") else: df = eval( f"pro.{sheet_type}_vip(period='{report_date}', report_type={report_type}, fields={fields})") if df.empty: return df df.ts_code = QA_fmt_code_list(df.ts_code) return df.rename(columns={"ts_code": "code", "end_date": "report_date"}).sort_values(by=['ann_date', 'f_ann_date']) except Exception as e: print(e) time.sleep(wait_seconds) _get_crosssection_financial( report_date, report_type, sheet_type, fields, wait_seconds, trial_count + 1) # Tushare ่ดฆๅท้…็ฝฎ pro = get_pro() # ่ฎพ็ฝฎๆ ‡ๅ‡†ๆŠฅๅ‘ŠๆœŸๆ ผๅผ report_date = pd.Timestamp(report_date) report_type = int(report_type) year = report_date.year std_report_dates = [ str(year) + report_date_tail for report_date_tail in REPORT_DATE_TAILS] # Tushare ๆŽฅๅฃๆ”ฏๆŒ็š„ๆ—ฅๆœŸๆ ผๅผ if report_date.strftime("%Y%m%d") not in std_report_dates: raise ValueError("[REPORT_DATE ERROR]") # fields ๆ ผๅผๅŒ–ๅค„็† if isinstance(fields, str): fields = sorted(list(set([fields, "ts_code", "end_date", "ann_date", "f_ann_date", "report_type", "update_flag"]))) # ็›ฎๅ‰ๆ”ฏๆŒๅˆฉๆถฆ่กจ๏ผŒ่ต„ไบง่ดŸๅ€บ่กจๅ’Œ็Žฐ้‡‘ๆต้‡่กจ if sheet_type not in SHEET_TYPE: raise ValueError("[SHEET_TYPE ERROR]") if report_type not in range(1, 13): raise ValueError("[REPORT_TYTPE ERROR]") return _get_crosssection_financial( report_date=report_date.strftime("%Y%m%d"), report_type=report_type, sheet_type=sheet_type, fields=fields, wait_seconds=wait_seconds, trial_count=0) # FIXME: Add Fetch Get Method of Daily Basic def QA_fetch_get_daily_basic( code: Union[str, List, Tuple] = None, trade_date: Union[str, pd.Timestamp, datetime.datetime] = None, fields: Union[str, List, Tuple] = None, wait_seconds: int = 1, max_trial: int = 3 ) -> pd.DataFrame: """ ไปŽ็ฝ‘็ปœ่Žทๅ–ๅธ‚ๅœบๆŒ‡ๅฎšไบคๆ˜“ๆ—ฅ้‡่ฆๅŸบๆœฌ้ขๆŒ‡ๆ ‡๏ผŒ็”จไบŽ้€‰่‚กๅˆ†ๆžๅ’ŒๆŠฅ่กจๅฑ•็คบ Args: code(Union[str, List, Tuple], optional): ๆŒ‡ๅฎš่‚ก็ฅจไปฃ็ ๏ผŒ้ป˜่ฎคไธบ None๏ผŒๅณๅฏนๅบ”ไบคๆ˜“ๆ—ฅ็š„ๅ…จๅธ‚ๅœบ่‚ก็ฅจ trade_date(Union[str, pd.Timestamp, datetime.datetime], optional): ๆŒ‡ๅฎšไบคๆ˜“ๆ—ฅๆœŸ, ้ป˜่ฎคไธบ None, ๅณ่ท็ฆปๅฝ“ๅ‰ ๆ—ฅๆœŸๆœ€่ฟ‘็š„ไบคๆ˜“ๆ—ฅ fields(Union[str, List, Tuple], optional): ้ป˜่ฎคไธบ None๏ผŒๅฆ‚ๆžœๆŒ‡ๅฎšไธบๆŸไธ€ๅ•ไธช str๏ผŒ้ป˜่ฎค่ฟ”ๅ›ž DataFrame ๅŒ…ๆ‹ฌ ไบคๆ˜“ๆ—ฅ็ญ‰้™„ๅŠ ไฟกๆฏ wait_seconds (int, optional): ๆŸฅ่ฏข่ถ…ๆ—ถๆ—ถ้—ด, ้ป˜่ฎคไธบ 61. max_trial (int, optional): ๆŸฅ่ฏขๆœ€ๅคงๅฐ่ฏ•ๆฌกๆ•ฐ, ้ป˜่ฎคไธบ 3. Returns: pd.DataFrame: ๆŒ‡ๅฎšไบคๆ˜“ๆ—ฅๆŒ‡ๅฎš่Œƒๅ›ดๆŒ‡ๅฎšๆ ‡็š„็š„ๆฏๆ—ฅๅŸบๆœฌ้ขๆŒ‡ๆ ‡ไฟกๆฏ """ def _fetch_get_daily_basic(trade_date, fields, trial_count): nonlocal pro, max_trial try: if trial_count >= max_trial: raise ValueError("[ERROR]\tEXCEED MAX TRIAL!") if not trade_date: trade_date = QA_util_get_pre_trade_date( datetime.date.today(), 1).replace("-", "") else: trade_date = pd.Timestamp(trade_date).strftime("%Y%m%d") if not fields: qry = f"pro.daily_basic(trade_date={trade_date})" else: if isinstance(fields, str): fields = list(set([fields] + ["ts_code", "trade_date"])) fields = ",".join(fields) qry = "pro.daily_basic(trade_date={trade_date}, fields={fields})" df = eval(qry) if df is None: raise ValueError("[ERROR]") return df except: time.sleep(1) _fetch_get_daily_basic( trade_date, fields, trial_count+1 ) pro = get_pro() df = _fetch_get_daily_basic( trade_date=trade_date, fields=fields, trial_count=0) if df.empty: return df else: df = df.rename(columns={"ts_code": "code"}) df.code = QA_fmt_code_list(df.code) df = df.set_index("code") if not code: return df if isinstance(code, str): code = (code,) # exclude code which not in rtn dataframe filter_idx = df.index.intersection(code) return df.loc[filter_idx] def QA_fetch_crosssection_financial( report_date: Union[str, datetime.datetime, pd.Timestamp], report_type: Union[int, str] = 1, sheet_type: str = "income", fields: Union[str, Tuple, List] = None) -> pd.DataFrame: """ๆœฌๅœฐๆŸฅ่ฏขๆˆช้ข่ดขๅŠกๆ•ฐๆฎๆŽฅๅฃ Args: report_date (Union[str, datetime.datetime, pd.Timestamp]): ๆŠฅๅ‘ŠๆœŸ report_type (Union[int, str], optional): ๆŠฅๅ‘Š็ฑปๅž‹๏ผŒ้ป˜่ฎคไธบ 1. (1 ๅˆๅนถๆŠฅ่กจ ไธŠๅธ‚ๅ…ฌๅธๆœ€ๆ–ฐๆŠฅ่กจ๏ผˆ้ป˜่ฎค๏ผ‰| 2 ๅ•ๅญฃๅˆๅนถ ๅ•ไธ€ๅญฃๅบฆ็š„ๅˆๅนถๆŠฅ่กจ | 3 ่ฐƒๆ•ดๅ•ๅญฃๅˆๅนถ่กจ ่ฐƒๆ•ดๅŽ็š„ๅ•ๅญฃๅˆๅนถๆŠฅ่กจ๏ผˆๅฆ‚ๆžœๆœ‰๏ผ‰ | 4 ่ฐƒๆ•ดๅˆๅนถๆŠฅ่กจ ๆœฌๅนดๅบฆๅ…ฌๅธƒไธŠๅนดๅŒๆœŸ็š„่ดขๅŠกๆŠฅ่กจๆ•ฐๆฎ๏ผŒๆŠฅๅ‘ŠๆœŸไธบไธŠๅนดๅบฆ | 5 ่ฐƒๆ•ดๅ‰ๅˆๅนถๆŠฅ่กจ ๆ•ฐๆฎๅ‘็”Ÿๅ˜ๆ›ด๏ผŒๅฐ†ๅŽŸๆ•ฐๆฎ่ฟ›่กŒไฟ็•™๏ผŒๅณ่ฐƒๆ•ดๅ‰็š„ๅŽŸๆ•ฐๆฎ | 11 ่ฐƒๆ•ดๅ‰ๅˆๅนถๆŠฅ่กจ ่ฐƒๆ•ดไน‹ๅ‰ๅˆๅนถๆŠฅ่กจๅŽŸๆ•ฐๆฎ) sheet_type (str, optional): ๆŠฅ่กจ็ฑปๅž‹๏ผŒ้ป˜่ฎคไธบ "income". fields (Union[str, Tuple, List], optional): ๅญๆฎต๏ผŒ้ป˜่ฎคไธบ None๏ผŒ่ฟ”ๅ›žๆ‰€ๆœ‰ๅญ—ๆฎต. Returns: pd.DataFrame: ๆŒ‡ๅฎšๆŠฅๅ‘ŠๆœŸๆŒ‡ๅฎšๆŠฅ่กจๆ•ฐๆฎ """ if isinstance(fields, str): fields = sorted(list(set([fields, "code", "report_date", "ann_date", "f_ann_date", "report_type", "update_flag"]))) coll = eval(f"DATABASE.{sheet_type}") report_date = pd.Timestamp(report_date).strftime("%Y%m%d") cursor = coll.find( { "report_date": report_date, "report_type": str(report_type) } ) res = pd.DataFrame([item for item in cursor]) if res.empty: return pd.DataFrame() res.report_date = pd.to_datetime(res.report_date, utc=False) if not fields: return res.drop(columns="_id") return res.drop(columns="_id")[fields] def QA_fetch_financial_adv( code: Union[str, Tuple, List] = None, start: Union[str, datetime.datetime, pd.Timestamp] = None, end: Union[str, datetime.datetime, pd.Timestamp] = None, report_date: Union[str, datetime.datetime, pd.Timestamp] = None, report_type: Union[int, str] = None, sheet_type: str = "income", fields: Union[str, Tuple, List] = None) -> pd.DataFrame: """ๆœฌๅœฐ่Žทๅ–ๆŒ‡ๅฎš่‚ก็ฅจๆˆ–่€…ๆŒ‡ๅฎš่‚ก็ฅจๅˆ—่กจ๏ผŒๆŒ‡ๅฎšๆ—ถ้—ด่Œƒๅ›ดๆˆ–่€…ๆŠฅๅ‘ŠๆœŸ๏ผŒๆŒ‡ๅฎšๆŠฅๅ‘Š็ฑปๅž‹็š„ๆŒ‡ๅฎš่ดขๅŠกๆŠฅ่กจๆ•ฐๆฎ Args: code (Union[str, Tuple, List], optional): ๆŒ‡ๅฎš่‚ก็ฅจไปฃ็ ๆˆ–ๅˆ—่กจ๏ผŒ้ป˜่ฎคไธบ None, ๅ…จๅธ‚ๅœบ่‚ก็ฅจ start (Union[str, datetime.datetime, pd.Timestamp], optional): ่ตทๅง‹ๆ—ถ้—ด end (Union[str, datetime.datetime, pd.Timestamp], optional): ็ป“ๆŸๆ—ถ้—ด report_date (Union[str, datetime.datetime, pd.Timestamp], optional): ๆŠฅๅ‘ŠๆœŸ report_type (Union[int, str], optional): ๆŠฅๅ‘Š็ฑปๅž‹๏ผŒ้ป˜่ฎคไธบ 1. (1 ๅˆๅนถๆŠฅ่กจ ไธŠๅธ‚ๅ…ฌๅธๆœ€ๆ–ฐๆŠฅ่กจ๏ผˆ้ป˜่ฎค๏ผ‰| 2 ๅ•ๅญฃๅˆๅนถ ๅ•ไธ€ๅญฃๅบฆ็š„ๅˆๅนถๆŠฅ่กจ | 3 ่ฐƒๆ•ดๅ•ๅญฃๅˆๅนถ่กจ ่ฐƒๆ•ดๅŽ็š„ๅ•ๅญฃๅˆๅนถๆŠฅ่กจ๏ผˆๅฆ‚ๆžœๆœ‰๏ผ‰ | 4 ่ฐƒๆ•ดๅˆๅนถๆŠฅ่กจ ๆœฌๅนดๅบฆๅ…ฌๅธƒไธŠๅนดๅŒๆœŸ็š„่ดขๅŠกๆŠฅ่กจๆ•ฐๆฎ๏ผŒๆŠฅๅ‘ŠๆœŸไธบไธŠๅนดๅบฆ | 5 ่ฐƒๆ•ดๅ‰ๅˆๅนถๆŠฅ่กจ ๆ•ฐๆฎๅ‘็”Ÿๅ˜ๆ›ด๏ผŒๅฐ†ๅŽŸๆ•ฐๆฎ่ฟ›่กŒไฟ็•™๏ผŒๅณ่ฐƒๆ•ดๅ‰็š„ๅŽŸๆ•ฐๆฎ | 11 ่ฐƒๆ•ดๅ‰ๅˆๅนถๆŠฅ่กจ ่ฐƒๆ•ดไน‹ๅ‰ๅˆๅนถๆŠฅ่กจๅŽŸๆ•ฐๆฎ) sheet_type (str, optional): ๆŠฅ่กจ็ฑปๅž‹๏ผŒ้ป˜่ฎคไธบ "income". fields (List, optional): ๅญ—ๆฎต๏ผŒ้ป˜่ฎคไธบ None๏ผŒ่ฟ”ๅ›žๆ‰€ๆœ‰ๅญ—ๆฎต. Returns: pd.DataFrame: ๆŒ‡ๅฎšๆกไปถ็š„ๆœฌๅœฐๆŠฅ่กจๆ•ฐๆฎ """ if (not start) and (not end) and (not report_date): raise ValueError( "[DATE ERROR]\t 'start', 'end' ไธŽ 'report_date' ไธ่ƒฝๅŒๆ—ถไธบ None") if isinstance(code, str): code = (code,) if not report_type: report_type = ("1", "2", "4", "5", "11") if isinstance(report_type, int) or isinstance(report_type, str): report_type = (str(report_type), ) else: report_type = list(map(str, report_type)) coll = eval(f"DATABASE.{sheet_type}") qry = {} if not report_date: if not end: end = datetime.date.today() start = pd.Timestamp(start) end = pd.Timestamp(end) start_date_stamp = QA_util_date_stamp(start) end_date_stamp = QA_util_date_stamp(end) if not code: qry = { "f_ann_date_stamp": { "$gte": start_date_stamp, "$lte": end_date_stamp }, "report_type": { "$in": report_type } } else: qry = { "code": { "$in": code }, "f_ann_date_stamp": { "$gte": start_date_stamp, "$lte": end_date_stamp }, "report_type": { "$in": report_type } } else: report_date_stamp = QA_util_date_stamp(report_date) if not code: qry = { "report_date_stamp": report_date_stamp, "report_type": { "$in": report_type } } else: qry = { "code": { "$in": code }, "report_date_stamp": report_date_stamp, "report_type": { "$in": report_type } } if isinstance(fields, str): fields = list( set([fields, "code", "ann_date", "report_date", "f_ann_date"])) elif fields: fields = list( set(list(fields) + ["code", "ann_date", "report_date", "f_ann_date"])) cursor = coll.find(qry, batch_size=10000).sort([ ("report_date_stamp", pymongo.ASCENDING), ("f_ann_date_stamp", pymongo.ASCENDING)]) if fields: df = pd.DataFrame(cursor).drop(columns="_id")[fields].set_index("code") df.report_date = pd.to_datetime(df.report_date, utc=False) df.ann_date = pd.to_datetime(df.ann_date, utc=False) df.f_ann_date = pd.to_datetime(df.f_ann_date, utc=False) else: df = pd.DataFrame(cursor).drop(columns="_id").set_index("code") df.report_date = pd.to_datetime(df.report_date, utc=False) df.ann_date = pd.to_datetime(df.ann_date, utc=False) df.f_ann_date = pd.to_datetime(df.f_ann_date, utc=False) return df def QA_fetch_last_financial( code: Union[str, List, Tuple] = None, cursor_date: Union[str, datetime.datetime, pd.Timestamp] = None, report_label: Union[int, str] = None, report_type: Union[int, str, List, Tuple] = None, sheet_type: str = "income", fields: Union[str, List, Tuple] = None) -> pd.DataFrame: """่Žทๅ–่ท็ฆปๆŒ‡ๅฎšๆ—ฅๆœŸ (cursor_date) ๆœ€่ฟ‘็š„ๅŽŸๅง‹ๆ•ฐๆฎ (ไธๅŒ…ๅซๅœจ cursor_date ๅ‘ๅธƒ็š„่ดขๅŠกๆ•ฐๆฎ)๏ผŒ ๅฝ“ๅŒๆ—ถ่พ“ๅ…ฅ cursor_date ไธŽ report_date ๆ—ถ๏ผŒไปฅ report_date ไฝœไธบๆŸฅ่ฏขๆ ‡ๅ‡† ๆณจๆ„๏ผš ่ฟ™้‡Œ็š„ report_type ไป…ๆ”ฏๆŒ (1,4, 5) ไธ‰็ง็ฑปๅž‹๏ผŒไปฅ้ฟๅ…ๆททๆท†ๅˆๅนถๆ•ฐๆฎๅ’Œๅ•ๅญฃๆ•ฐๆฎ็ญ‰ ่ฏดๆ˜Ž๏ผš ๆŸณๅทฅ (000528) ๅœจ 2018 ๅนด 8 ๆœˆ 30 ๆ—ฅๅ‘ๅธƒๅŠๅนดๆŠฅ๏ผŒไน‹ๅŽๅœจ 2018 ๅนด 9 ๆœˆ 29 ๆ—ฅๅ‘ๅธƒไฟฎๆญฃๆŠฅๅ‘Š๏ผŒ - ๅฆ‚ๆžœ่พ“ๅ…ฅ็š„ cursor_date ไธบ 2018-08-31, ้‚ฃไนˆ่Žทๅ–ๅˆฐ็š„ๅฐฑๆ˜ฏๅŽŸๅง‹ๅŠๅนดๆŠฅ๏ผŒๅฏนๅบ” report_type == 5 - ๅฆ‚ๆžœ่พ“ๅ…ฅ็š„ cursor_date ไธบ 2018-09-30๏ผŒ้‚ฃไนˆ่Žทๅ–ๅˆฐ็š„ๅฐฑๆ˜ฏๆœ€ๆ–ฐๅˆๅนถๆŠฅ่กจ๏ผŒๅฏนๅบ” report_type == 1 - ๅฆ‚ๆžœๅฏนๅบ”็š„ cursor_date ไธบ 2019-08-31๏ผŒ้œ€่ฆ่Žทๅ– 2018 ๅนดๅŠๅนดๆŠฅ๏ผŒ้‚ฃไนˆๅฐฑ่ฟ”ๅ›žๆŸณๅทฅๅœจ 2019 ๅนด 8 ๆœˆ 29 ๆ—ฅๅ‘ๅธƒ็š„ไธŠๅนดๅŒๆœŸๅŸบๅ‡†๏ผŒๅฏนๅบ” report_type == 4 Args: code (Union[str, List, Tuple], optional): ่‚ก็ฅจไปฃ็ ๆˆ–่‚ก็ฅจๅˆ—่กจ๏ผŒ้ป˜่ฎคไธบ None, ๆŸฅ่ฏขๆ‰€ๆœ‰่‚ก็ฅจ cursor_date (Union[str, datetime.datetime, pd.Timestamp]): ๆŸฅ่ฏขๆˆช้ขๆ—ฅๆœŸ (ไธ€่ˆฌๆŒ‡่ฐƒไป“ๆ—ฅ), ้ป˜่ฎคไธบ None report_label (Union[str, int], optional): ๆŒ‡ๅฎšๆŠฅ่กจ็ฑปๅž‹๏ผŒ่ฟ™้‡Œ็š„็ฑปๅž‹ๅˆ†็ฑปไธบไธ€ๅญฃๆŠฅ๏ผŒๅŠๅนดๆŠฅ๏ผŒไธ‰ๅญฃๆŠฅ๏ผŒๅนดๆŠฅ, ้ป˜่ฎคไธบ None๏ผŒๅณ้€‰ๆ‹ฉ่ท็ฆป cursor_date ๆœ€่ฟ‘็š„ๆŠฅ่กจ็ฑปๅž‹ report_type (Union[str, List, Tuple], optional): [description]. ๆŠฅ่กจ็ฑปๅž‹๏ผŒ้ป˜่ฎคไธบ None. ๅณ่ท็ฆป cursor_date ๆœ€่ฟ‘็š„่ดขๆŠฅ๏ผŒไธๆŒ‡ๅฎš็ฑปๅž‹๏ผŒ้ฟๅ…ๅผ•ๅ…ฅๆœชๆฅๆ•ฐๆฎ (1 ๅˆๅนถๆŠฅ่กจ ไธŠๅธ‚ๅ…ฌๅธๆœ€ๆ–ฐๆŠฅ่กจ๏ผˆ้ป˜่ฎค๏ผ‰| 2 ๅ•ๅญฃๅˆๅนถๆŠฅ่กจ 4 ่ฐƒๆ•ดๅˆๅนถๆŠฅ่กจ ๆœฌๅนดๅบฆๅ…ฌๅธƒไธŠๅนดๅŒๆœŸ็š„่ดขๅŠกๆŠฅ่กจๆ•ฐๆฎ๏ผŒๆŠฅๅ‘ŠๆœŸไธบไธŠๅนดๅบฆ | 5 ่ฐƒๆ•ดๅ‰ๅˆๅนถๆŠฅ่กจ ๆ•ฐๆฎๅ‘็”Ÿๅ˜ๆ›ด๏ผŒๅฐ†ๅŽŸๆ•ฐๆฎ่ฟ›่กŒไฟ็•™๏ผŒๅณ่ฐƒๆ•ดๅ‰็š„ๅŽŸๆ•ฐๆฎ) sheet_type (str, optional): ๆŠฅ่กจ็ฑปๅž‹๏ผŒ้ป˜่ฎคไธบ "income". fields (Union[str, List, Tuple], optional): ๅญ—ๆฎต, ้ป˜่ฎคไธบ None, ่ฟ”ๅ›žๆ‰€ๆœ‰ๅญ—ๆฎต Returns: pd.DataFrame: ๅคๅˆๆกไปถ็š„่ดขๅŠกๆ•ฐๆฎ """ def _trans_financial_type(x): if x.empty: return x if sheet_type == "balancesheet": # ่ต„ไบง่ดŸๅ€บ่กจๅฑžไบŽๆ—ถ็‚นไฟกๆฏ๏ผŒ็›ดๆŽฅ่ฟ”ๅ›ž return x else: if x.iloc[0].report_date[4:] in ['0331', '1231']: # ไธ€ๅญฃๆŠฅ่€Œ่จ€๏ผŒๅ•ๅญฃๅˆๅนถไธŽๆ™ฎ้€šๅˆๅนถๆฒกๆœ‰ๅŒบๅˆซ๏ผŒ็›ดๆŽฅ่ฟ”ๅ›ž # ๅนดๆŠฅ่€Œ่จ€๏ผŒไธๅญ˜ๅœจๅ•ๅญฃๆฆ‚ๅฟต return x.iloc[0] if x.iloc[0].report_type in ['1', '4', '5']: return x.iloc[0] if x.iloc[0].report_type == '2': # ๅฐ่ฏ•ๆŸฅๆ‰พๅŒไธ€ๆŠฅๅ‘ŠๆœŸๆŠฅๅ‘Š็ฑปๅž‹ไธบ '1' ๆˆ– '4' ็š„ๆŠฅ่กจๆ•ฐๆฎ # try: # if (x.shape[0] > 1) & (x.iloc[1].report_date == x.iloc[0].report_date) & (x.iloc[1].report_type in ['1', '4']): # return x.iloc[1] # except: # return pd.Series() # ๅฐ่ฏ•็›ดๆŽฅๅˆฉ็”จๅ•ๅญฃๆ•ฐๆฎ่ฟ›่กŒๆ‹ผๆŽฅ cursor_x = x.loc[x.report_date.map(str).str.slice( 0, 4) == x.iloc[0].report_date[:4]] cursor_x = cursor_x.drop_duplicates( subset=['report_date'], keep='first') cursor_x = cursor_x.loc[cursor_x.report_date <= x.iloc[0].report_date] cursor_x = cursor_x.fillna(0) non_numeric_columns = sorted(["f_ann_date", "f_ann_date_stamp", "ann_date", "ann_date_stamp", "report_date", "report_date_stamp", "update_flag", "report_type", "code", "report_label"]) columns = sorted( list(set(cursor_x.columns) - set(non_numeric_columns))) rtn_se = cursor_x[columns].sum(axis=0) rtn_se = rtn_se.append(cursor_x[non_numeric_columns].iloc[0]) return rtn_se if isinstance(code, str): code = (code,) if not report_type: report_type = ["1", "2", "4", "5"] else: if isinstance(report_type, int): report_type = str(report_type) if isinstance(report_type, str): if report_type not in ["1", "4", "5"]: raise ValueError("[REPORT_TYPE ERROR]") report_type = (report_type,) else: report_type = list(set(report_type) & set('1', '2', '4', '5')) if sheet_type not in SHEET_TYPE: raise ValueError(f"[SHEET_TYPE ERROR]") if report_label: report_label = str(report_label) if isinstance(fields, str): fields = list( set([fields, "code", "ann_date", "report_date", "f_ann_date", "report_type"])) elif fields: fields = list( set(fields + ["code", "ann_date", "report_date", "f_ann_date", "report_type"])) coll = eval(f"DATABASE.{sheet_type}") if (not code) and (not report_label): # ไธบไบ†ๅŠ ๅฟซๆฃ€็ดข้€Ÿๅบฆ๏ผŒไปŽๅฝ“ๅ‰ๆ—ฅๆœŸๅพ€ๅ‰่‡ณๅคšๅ›žๆบฏไธ€ๅญฃๅบฆ๏ผŒๅฎž้™…่ฐƒไป“ๆ—ถ๏ผŒไป…่€ƒ่™‘ๅฝ“ๅ‰่ƒฝๆ‹ฟๅˆฐ็š„ๆœ€ๆ–ฐๆ•ฐๆฎ๏ผŒ่ฐƒไป“ๅ‘จๆœŸไธ€่ˆฌไปฅๆœˆ, ๅญฃไธบๅ•ไฝ๏ผŒ # ๆœ€้•ฟไธ€่ˆฌไธบๅนดๆŠฅ๏ผŒ่€ŒไฟฎๆญฃๆŠฅ่กจๅฆ‚ๆžœ่ถ…่ฟ‡ 1 ไธชๅญฃๅบฆ๏ผŒๅŸบๆœฌไธŠๆ€ผ่ฐƒไป“ๆฒกๆœ‰ๅฝฑๅ“๏ผŒ่ฟ™้‡Œไปฅ 1 ๅนดไฝœไธบๅ›žๆบฏๅŸบๅ‡† qry = { "f_ann_date_stamp": { "$gt": QA_util_date_stamp((pd.Timestamp(cursor_date) - pd.Timedelta(days=400)).strftime("%Y-%m-%d")), "$lt": QA_util_date_stamp(cursor_date) }, "report_type": { "$in": report_type }} cursor = coll.find(qry, batch_size=10000).sort([ ("report_date_stamp", pymongo.DESCENDING), ("f_ann_date_stamp", pymongo.DESCENDING)]) try: if not fields: df = pd.DataFrame(cursor).drop(columns="_id") else: df = pd.DataFrame(cursor).drop(columns="_id")[fields] except: raise ValueError("[QRY ERROR]") if sheet_type == "balancesheet": return df.groupby("code").apply(lambda x: x.iloc[0]) return df.groupby("code").apply(_trans_financial_type).unstack() if not report_label: qry = { "code": { "$in": code }, "f_ann_date_stamp": { "$gt": QA_util_date_stamp((pd.Timestamp(cursor_date) - pd.Timedelta(days=400)).strftime("%Y-%m-%d")), "$lt": QA_util_date_stamp(cursor_date) }, "report_type": {"$in": report_type}} cursor = coll.find(qry, batch_size=10000).sort([ ("report_date_stamp", pymongo.DESCENDING), ("f_ann_date_stamp", pymongo.DESCENDING)]) try: if not fields: df = pd.DataFrame(cursor).drop(columns="_id") else: df = pd.DataFrame(cursor).drop(columns="_id")[fields] except: raise ValueError("[QRY ERROR]") if sheet_type == "balancesheet": return df.groupby("code").apply(lambda x: x.iloc[0]) return df.groupby("code").apply(_trans_financial_type).unstack() if not code: qry = { "f_ann_date_stamp": { "$gt": QA_util_date_stamp((pd.Timestamp(cursor_date) - pd.Timedelta(days=400)).strftime("%Y-%m-%d")), "$lt": QA_util_date_stamp(cursor_date) }, "report_type": { "$in": report_type }, "report_label": report_label } cursor = coll.find(qry, batch_size=10000).sort([ ("report_date_stamp", pymongo.DESCENDING), ("f_ann_date_stamp", pymongo.DESCENDING)]) try: if not fields: df = pd.DataFrame(cursor).drop(columns="_id") else: df = pd.DataFrame(cursor).drop(columns="_id")[fields] except: raise ValueError("[QRY ERROR]") if sheet_type == "balancesheet": return df.groupby("code").apply(lambda x: x.iloc[0]) return df.groupby("code").apply(_trans_financial_type).unstack() else: qry = { "code": { "$in": code }, "f_ann_date_stamp": { "$gt": QA_util_date_stamp((pd.Timestamp(cursor_date) - pd.Timedelta(days=400)).strftime("%Y-%m-%d")), "$lt": QA_util_date_stamp(cursor_date) }, "report_type": { "$in": report_type }, "report_label": report_label } cursor = coll.find(qry, batch_size=10000).sort([ ("report_date_stamp", pymongo.DESCENDING), ("f_ann_date_stamp", pymongo.DESCENDING)]) try: if not fields: df = pd.DataFrame(cursor).drop(columns="_id") else: df = pd.DataFrame(cursor).drop(columns="_id")[fields] except: raise ValueError("[QRY ERROR]") # df.report_date = pd.to_datetime(df.report_date, utc=False) # df.ann_date = pd.to_datetime(df.ann_date, utc=False) # df.f_ann_date = pd.to_datetime(df.f_ann_date, utc=False) if sheet_type == "balancesheet": return df.groupby("code").apply(lambda x: x.iloc[0]) return df.groupby("code").apply(_trans_financial_type).unstack() def QA_fetch_stock_basic( code: Union[str, List, Tuple] = None, status: Union[str, List, Tuple] = 'L') -> pd.DataFrame: """่Žทๅ–่‚ก็ฅจๅŸบๆœฌไฟกๆฏ Args: code (Union[str, List, Tuple], optional): ่‚ก็ฅจไปฃ็ ๆˆ–ๅˆ—่กจ๏ผŒ้ป˜่ฎคไธบ None๏ผŒ่Žทๅ–ๅ…จ้ƒจ่‚ก็ฅจ status (Union[str, List, Tuple], optional): ่‚ก็ฅจ็Šถๆ€, ้ป˜่ฎคไธบ 'L', ๅณไปๅœจไธŠๅธ‚็š„่‚ก็ฅจ๏ผŒๅฆ‚ๆžœไธบ None๏ผŒ ๅˆ™่ฟ”ๅ›žๆ‰€ๆœ‰็Šถๆ€่‚ก็ฅจ Returns: pd.DataFrame: ่‚ก็ฅจๅŸบๆœฌไฟกๆฏ """ coll = DATABASE.stock_basic if isinstance(code, str): code = (code,) if isinstance(status, str): status = (status,) qry = {} if not status: if not code: qry = {} else: qry = { "code": { "$in": code } } else: if not code: qry = { "status": { "$in": status } } else: qry = { "code": { "$in": code }, "status": { "$in": status } } cursor = coll.find(qry) res = pd.DataFrame(cursor) if res.empty: return res else: res.list_date = pd.to_datetime(res.list_date, utc=False) return res.drop(columns="_id").set_index("code") def QA_fetch_stock_name( code: Union[str, List, Tuple] = None, cursor_date: Union[str, datetime.datetime, pd.Timestamp] = None ) -> pd.DataFrame: """่Žทๅ–่‚ก็ฅจๅކๅฒๆ›พ็”จๅ Args: code (Union[str, List, Tuple], optional): ่‚ก็ฅจไปฃ็ ๆˆ–ๅˆ—่กจ๏ผŒ้ป˜่ฎคไธบ None๏ผŒๆŸฅ่ฏขๆ‰€ๆœ‰่‚ก็ฅจ. cursor (Union[str, datetime.datetime, pd.Timestamp], optional): ๆˆชๆญขๆ—ถ้—ด๏ผŒ่‚ก็ฅจๅ็งฐ่ท็ฆป cursor_date ๆœ€่ฟ‘็š„ๅๅญ— Returns: pd.DataFrame: ่‚ก็ฅจๅކๅฒๆ›พ็”จๅ """ coll = DATABASE.namechange if isinstance(code, str): code = [code] qry = {} if not code: if not cursor_date: qry = {} else: qry = { "start_date_stamp": { "$lte": QA_util_date_stamp(cursor_date) }, "end_date_stamp": { "$gte": QA_util_date_stamp(cursor_date) } } else: if not cursor_date: qry = { "code": { "$in": code } } else: qry = { "code": { "$in": code }, "start_date_stamp": { "$lte": QA_util_date_stamp(cursor_date) }, "end_date_stamp": { "$gte": QA_util_date_stamp(cursor_date) } } cursor = coll.find(qry) res = pd.DataFrame(cursor) if res.empty: return res else: res.start_date = pd.to_datetime(res.start_date, utc=False) res.end_date = pd.to_datetime(res.end_date, utc=False) return res.drop(columns="_id").set_index("code").sort_values(by="start_date_stamp").drop_duplicates(keep="last").sort_index() def QA_fetch_industry_adv( code: Union[str, List, Tuple] = None, cursor_date: Union[str, datetime.datetime] = None, start: Union[str, datetime.datetime] = None, end: Union[str, datetime.datetime] = None, levels: Union[str, List, Tuple] = None, src: str = "sw" ) -> pd.DataFrame: """ๆœฌๅœฐ่Žทๅ–ๆŒ‡ๅฎš่‚ก็ฅจๆˆ–่‚ก็ฅจๅˆ—่กจ็š„่กŒไธš Args: code (Union[str, List, Tuple], optional): ่‚ก็ฅจไปฃ็ ๆˆ–ๅˆ—่กจ๏ผŒ้ป˜่ฎคไธบ None, ๆŸฅ่ฏขๆ‰€ๆœ‰่‚ก็ฅจไปฃ็ . cursor_date (Union[str, datetime.datetime], optional): ไธ€่ˆฌๆŒ‡่ฐƒไป“ๆ—ฅ๏ผŒๆญคๆ—ถไธ้œ€่ฆๅ†่ฎพ็ฝฎ start ไธŽ end start(Union[str, datetime.datetime], optional): ่ตทๅง‹ๆ—ถ้—ด๏ผŒ้ป˜่ฎคไธบ None. end(Union[str, datetime.datetime], optional): ๆˆชๆญขๆ—ถ้—ด, ้ป˜่ฎคไธบ None. levels (Union[str, List, Tuple], optional): [description]. ๅฏนๅบ”่กŒไธšๅˆ†็บง็บงๅˆซ๏ผŒ้ป˜่ฎคไธบ None๏ผŒๆŸฅ่ฏขๆ‰€ๆœ‰่กŒไธšๅˆ†็บงๆ•ฐๆฎ src (str, optional): ๅˆ†็บงๆฅๆบ๏ผŒ้ป˜่ฎคไธบ "sw"(็›ฎๅ‰ไป…ๆ”ฏๆŒ็”ณไธ‡่กŒไธšๅˆ†็ฑป). Returns: pd.DataFrame: ่กŒไธšไฟกๆฏ """ coll = DATABASE.industry if not code: code = QA_fetch_stock_list().index.tolist() if isinstance(code, str): code = [code] if isinstance(levels, str): levels = [levels, ] if not levels: levels = ["l1", "l2", "l3"] levels = list(map(lambda x: x.lower(), levels)) df_tmp = pd.DataFrame() if not cursor_date: if not start: qry = { "code": { "$in": code }, "level": { "$in": levels }, "src": src.lower() } else: qry = { "code": { "$in": code }, "level": { "$in": levels }, "src": src.lower(), "in_date_stamp": { "$lte": QA_util_date_stamp(pd.Timestamp(start).strftime("%Y-%m-%d")) } } if coll.count_documents(filter=qry) < 1: print("ๆ‰พไธๅˆฐๅฏนๅบ”่กŒไธšๆ•ฐๆฎ") return pd.DataFrame() cursor = coll.find(qry) df_tmp = pd.DataFrame(cursor).drop(columns="_id") if end: df_tmp = df_tmp.loc[df_tmp.out_date_stamp > QA_util_date_stamp( pd.Timestamp(end).strftime("%Y-%m-%d"))] else: qry = { "code": { "$in": code }, "level": { "$in": levels }, "src": src.lower(), "in_date_stamp": { "$lte": QA_util_date_stamp(pd.Timestamp(cursor_date).strftime("%Y-%m-%d")) } } if coll.count_documents(filter=qry) < 1: print("ๆ‰พไธๅˆฐๅฏนๅบ”่กŒไธšๆ•ฐๆฎ") return pd.DataFrame() cursor = coll.find(qry) df_tmp = pd.DataFrame(cursor).drop(columns="_id") df_tmp.loc[df_tmp.out_date_stamp > QA_util_date_stamp( pd.Timestamp(cursor_date).strftime("%Y-%m-%d"))] df_tmp.in_date = pd.to_datetime(df_tmp.in_date, utc=False) df_tmp.out_date = pd.to_datetime(df_tmp.out_date, utc=False) return df_tmp.drop(columns=["in_date_stamp", "out_date_stamp"]) def QA_fetch_daily_basic( code: Union[str, List, Tuple] = None, start: Union[str, pd.Timestamp, datetime.datetime] = None, end: Union[str, pd.Timestamp, datetime.datetime] = None, cursor_date: Union[str, pd.Timestamp, datetime.datetime] = None, fields: Union[str, Tuple, List] = None ) -> pd.DataFrame: """่Žทๅ–ๅ…จ้ƒจ่‚ก็ฅจๆฏๆ—ฅ้‡่ฆ็š„ๅŸบๆœฌ้ขๆŒ‡ๆ ‡๏ผŒๅฏ็”จไบŽ้€‰่‚กๅˆ†ๆžใ€ๆŠฅ่กจๅฑ•็คบ็ญ‰ Args: code (Union[str, List, Tuple], optional): ๆŒ‡ๅฎš่‚ก็ฅจไปฃ็ ๆˆ–ๅˆ—่กจ, ้ป˜่ฎคไธบ None๏ผŒ่Žทๅ–ๅ…จๅธ‚ๅœบ start (Union[str, pd.Timestamp, datetime.datetime], optional): ่ตทๅง‹ๆ—ฅๆœŸ๏ผŒ้ป˜่ฎคไธบ None end (Union[str, pd.Timestamp, datetime.datetime], optional): ็ป“ๆŸๆ—ฅๆœŸ๏ผŒ้ป˜่ฎคไธบ None cursor_date (Union[str, pd.Timestamp, datetime.datetime], optional): ๆŒ‡ๅฎšๆ—ฅๆœŸ๏ผŒไธŽ start ๅ’Œ end ๅ†ฒ็ช๏ผŒๅช่ƒฝ้€‰ๆ‹ฉ cursor_date ๆˆ–่€… start, end fields (Union[str, Tuple, List], optional): ๆŒ‡ๅฎš fields Returns: pd.DataFrame: ไปฅๆ—ฅๆœŸ๏ผŒ่‚ก็ฅจๅไธบ Multiindex ็š„ๅŸบๆœฌไฟกๆฏ """ if isinstance(code, str): code = (code,) if not code: if (not start) and (not cursor_date): raise ValueError( "[ERROR]\tstart and end and cursor_date cannot all be none!") if not cursor_date: if not end: end_stamp = QA_util_date_stamp(datetime.date.today()) else: end_stamp = QA_util_date_stamp(end) start_stamp = QA_util_date_stamp(start) qry = { "trade_date_stamp": { "$gte": start_stamp, "$lte": end_stamp } } else: real_trade_date = QA_util_get_real_date(cursor_date) trade_date_stamp = QA_util_date_stamp(real_trade_date) qry = { "trade_date_stamp": trade_date_stamp } else: if (not start) and (not cursor_date): raise ValueError( "[ERROR]\tstart and end and cursor_date cannot all be none!") if not cursor_date: if not end: end_stamp = QA_util_date_stamp(datetime.date.today()) else: end_stamp = QA_util_date_stamp(end) start_stamp = QA_util_date_stamp(start) qry = { "code": { "$in": code }, "trade_date_stamp": { "$gte": start_stamp, "$lte": end_stamp } } else: real_trade_date = QA_util_get_real_date(cursor_date) trade_date_stamp = QA_util_date_stamp(real_trade_date) qry = { "code": { "$in": code }, "trade_date_stamp": trade_date_stamp } coll = DATABASE.daily_basic cursor = coll.find(qry) df = pd.DataFrame(cursor) if df.empty: return df df = df.rename(columns={"trade_date": "date"}).drop( columns="_id") df.date = pd.to_datetime(df.date, utc=False) df = df.set_index(["date", "code"]).sort_index() if not fields: return df return df[fields] if __name__ == "__main__": # print(QA_fetch_get_individual_financial( # "000001", "2020-01-01", "2020-12-31")) # print(QA_fetch_get_individual_financial( # "000001", report_date="2020-03-31", fields="basic_eps")) # print(QA_fetch_get_crosssection_financial('2020-03-31')) # print(QA_fetch_crosssection_financial("2020-03-31", fields="basic_eps")) # df = QA_fetch_financial_adv(start="2018-06-30", end="2018-09-30") # print(df.loc['000528', ["report_date", "f_ann_date", # "ann_date", "basic_eps", "report_type", "update_flag", "report_label"]]) # print(df) # print(QA_fetch_stock_basic(status="D")) # ๆœ€่ฟ‘่ดขๅŠกๆ•ฐๆฎ่Žทๅ–ๆต‹่ฏ• # print(QA_fetch_last_financial( # code="000596", cursor_date="2020-10-08")) # print(QA_fetch_last_financial( # code=QA_fetch_stock_list().index.tolist(), cursor_date="2020-10-08")) # print(QA_fetch_last_financial( # code = '000001', cursor_date = '2020-10-08' # )) code = QA_fetch_stock_list().index.tolist() cursor_date = '2020-10-08' df_origin = QA_fetch_last_financial( code=code, cursor_date=cursor_date, sheet_type="balancesheet") # print(QA_fetch_last_financial( # cursor_date="2018-08-31")) # print(QA_fetch_last_financial( # cursor_date="2018-08-31", code=["000528"], fields=["report_date", "ann_date", "f_ann_date", "update_flag"])) # print(QA_fetch_financial_adv( # cursor_date="2018-08-31")) # ่‚ก็ฅจๅŸบๆœฌไฟกๆฏ่Žทๅ–ๆต‹่ฏ• # print(QA_fetch_stock_basic("000001")) # print(QA_fetch_stock_basic(status=["P", "D"])) # ่กŒไธš่Žทๅ–ๆต‹่ฏ• # print(QA_fetch_industry_adv(start="1998-01-01", end="2020-12-02").head()) # print(QA_fetch_industry_adv(["000001", "600000"], # start="1998-01-01", end="2020-12-02")) # print(QA_fetch_industry_adv( # ["000001", "600000"], cursor_date="2020-12-02")) # print(QA_fetch_stock_name( # code=['000001', '000002'], cursor_date="20081009")) # print(QA_fetch_daily_basic(cursor_date="2018-01-01"))
the-stack_106_30880
#!/usr/bin/env python # # Copyright (c) 2009-2013, Luke Maurits <[email protected]> # All rights reserved. # With contributions from: # * Chris Clark # * Klein Stephane # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are met: # # * Redistributions of source code must retain the above copyright notice, # this list of conditions and the following disclaimer. # * Redistributions in binary form must reproduce the above copyright notice, # this list of conditions and the following disclaimer in the documentation # and/or other materials provided with the distribution. # * The name of the author may not be used to endorse or promote products # derived from this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" # AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE # IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE # ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE # LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR # CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF # SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS # INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN # CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) # ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE # POSSIBILITY OF SUCH DAMAGE. from __future__ import print_function from builtins import object, range __version__ = "trunk" import copy import csv import random import re import sys import textwrap import itertools import unicodedata from evennia.utils.ansi import parse_ansi py3k = sys.version_info[0] >= 3 if py3k: unicode = str basestring = str itermap = map iterzip = zip uni_chr = chr from html.parser import HTMLParser else: itermap = itertools.imap iterzip = itertools.izip uni_chr = unichr from HTMLParser import HTMLParser if py3k and sys.version_info[1] >= 2: from html import escape else: from cgi import escape # hrule styles FRAME = 0 ALL = 1 NONE = 2 HEADER = 3 # Table styles DEFAULT = 10 MSWORD_FRIENDLY = 11 PLAIN_COLUMNS = 12 RANDOM = 20 _re = re.compile("\033\[[0-9;]*m") def _ansi(method): "decorator for converting ansi in input" def wrapper(self, *args, **kwargs): def convert(inp): if isinstance(inp, basestring): return parse_ansi("{n%s{n" % inp) elif hasattr(inp, '__iter__'): li = [] for element in inp: if isinstance(element, basestring): li.append(convert(element)) elif hasattr(element, '__iter__'): li.append(convert(element)) else: li.append(element) return li return inp args = [convert(arg) for arg in args] #kwargs = dict((key, convert(val)) for key, val in kwargs.items()) return method(self, *args, **kwargs) return wrapper def _get_size(text): lines = text.split("\n") height = len(lines) width = max([_str_block_width(line) for line in lines]) return (width, height) class PrettyTable(object): @_ansi def __init__(self, field_names=None, **kwargs): """Return a new PrettyTable instance Arguments: encoding - Unicode encoding scheme used to decode any encoded input field_names - list or tuple of field names fields - list or tuple of field names to include in displays start - index of first data row to include in output end - index of last data row to include in output PLUS ONE (list slice style) header - print a header showing field names (True or False) header_style - stylisation to apply to field names in header ("cap", "title", "upper", "lower" or None) border - print a border around the table (True or False) hrules - controls printing of horizontal rules after rows. Allowed values: FRAME, HEADER, ALL, NONE vrules - controls printing of vertical rules between columns. Allowed values: FRAME, ALL, NONE int_format - controls formatting of integer data float_format - controls formatting of floating point data padding_width - number of spaces on either side of column data (only used if left and right paddings are None) left_padding_width - number of spaces on left hand side of column data right_padding_width - number of spaces on right hand side of column data vertical_char - single character string used to draw vertical lines horizontal_char - single character string used to draw horizontal lines junction_char - single character string used to draw line junctions sortby - name of field to sort rows by sort_key - sorting key function, applied to data points before sorting valign - default valign for each row (None, "t", "m" or "b") reversesort - True or False to sort in descending or ascending order""" self.encoding = kwargs.get("encoding", "UTF-8") # Data self._field_names = [] self._align = {} self._valign = {} self._max_width = {} self._rows = [] if field_names: self.field_names = field_names else: self._widths = [] # Options self._options = "start end fields header border sortby reversesort sort_key attributes format hrules vrules".split() self._options.extend("int_format float_format padding_width left_padding_width right_padding_width".split()) self._options.extend("vertical_char horizontal_char junction_char header_style valign xhtml print_empty".split()) for option in self._options: if option in kwargs: self._validate_option(option, kwargs[option]) else: kwargs[option] = None self._start = kwargs["start"] or 0 self._end = kwargs["end"] or None self._fields = kwargs["fields"] or None if kwargs["header"] in (True, False): self._header = kwargs["header"] else: self._header = True self._header_style = kwargs["header_style"] or None if kwargs["border"] in (True, False): self._border = kwargs["border"] else: self._border = True self._hrules = kwargs["hrules"] or FRAME self._vrules = kwargs["vrules"] or ALL self._sortby = kwargs["sortby"] or None if kwargs["reversesort"] in (True, False): self._reversesort = kwargs["reversesort"] else: self._reversesort = False self._sort_key = kwargs["sort_key"] or (lambda x: x) self._int_format = kwargs["int_format"] or {} self._float_format = kwargs["float_format"] or {} self._padding_width = kwargs["padding_width"] or 1 self._left_padding_width = kwargs["left_padding_width"] or None self._right_padding_width = kwargs["right_padding_width"] or None self._vertical_char = kwargs["vertical_char"] or self._unicode("|") self._horizontal_char = kwargs["horizontal_char"] or self._unicode("-") self._junction_char = kwargs["junction_char"] or self._unicode("+") if kwargs["print_empty"] in (True, False): self._print_empty = kwargs["print_empty"] else: self._print_empty = True self._format = kwargs["format"] or False self._xhtml = kwargs["xhtml"] or False self._attributes = kwargs["attributes"] or {} def _unicode(self, value): if not isinstance(value, basestring): value = str(value) if not isinstance(value, unicode): value = unicode(value, self.encoding, "strict") return value def _justify(self, text, width, align): excess = width - _str_block_width(text) if align == "l": return text + excess * " " elif align == "r": return excess * " " + text else: if excess % 2: # Uneven padding # Put more space on right if text is of odd length... if _str_block_width(text) % 2: return (excess//2)*" " + text + (excess//2 + 1)*" " # and more space on left if text is of even length else: return (excess//2 + 1)*" " + text + (excess//2)*" " # Why distribute extra space this way? To match the behaviour of # the inbuilt str.center() method. else: # Equal padding on either side return (excess//2)*" " + text + (excess//2)*" " def __getattr__(self, name): if name == "rowcount": return len(self._rows) elif name == "colcount": if self._field_names: return len(self._field_names) elif self._rows: return len(self._rows[0]) else: return 0 else: raise AttributeError(name) def __getitem__(self, index): new = PrettyTable() new.field_names = self.field_names for attr in self._options: setattr(new, "_"+attr, getattr(self, "_"+attr)) setattr(new, "_align", getattr(self, "_align")) if isinstance(index, slice): for row in self._rows[index]: new.add_row(row) elif isinstance(index, int): new.add_row(self._rows[index]) else: raise Exception("Index %s is invalid, must be an integer or slice" % str(index)) return new if py3k: def __str__(self): return self.__unicode__() else: def __str__(self): return self.__unicode__().encode(self.encoding) def __unicode__(self): return self.get_string() ############################## # ATTRIBUTE VALIDATORS # ############################## # The method _validate_option is all that should be used elsewhere in the code base to validate options. # It will call the appropriate validation method for that option. The individual validation methods should # never need to be called directly (although nothing bad will happen if they *are*). # Validation happens in TWO places. # Firstly, in the property setters defined in the ATTRIBUTE MANAGMENT section. # Secondly, in the _get_options method, where keyword arguments are mixed with persistent settings def _validate_option(self, option, val): if option in ("field_names"): self._validate_field_names(val) elif option in ("start", "end", "max_width", "padding_width", "left_padding_width", "right_padding_width", "format"): self._validate_nonnegative_int(option, val) elif option in ("sortby"): self._validate_field_name(option, val) elif option in ("sort_key"): self._validate_function(option, val) elif option in ("hrules"): self._validate_hrules(option, val) elif option in ("vrules"): self._validate_vrules(option, val) elif option in ("fields"): self._validate_all_field_names(option, val) elif option in ("header", "border", "reversesort", "xhtml", "print_empty"): self._validate_true_or_false(option, val) elif option in ("header_style"): self._validate_header_style(val) elif option in ("int_format"): self._validate_int_format(option, val) elif option in ("float_format"): self._validate_float_format(option, val) elif option in ("vertical_char", "horizontal_char", "junction_char"): self._validate_single_char(option, val) elif option in ("attributes"): self._validate_attributes(option, val) else: raise Exception("Unrecognised option: %s!" % option) def _validate_field_names(self, val): # Check for appropriate length if self._field_names: try: assert len(val) == len(self._field_names) except AssertionError: raise Exception("Field name list has incorrect number of values, (actual) %d!=%d (expected)" % (len(val), len(self._field_names))) if self._rows: try: assert len(val) == len(self._rows[0]) except AssertionError: raise Exception("Field name list has incorrect number of values, (actual) %d!=%d (expected)" % (len(val), len(self._rows[0]))) # Check for uniqueness try: assert len(val) == len(set(val)) except AssertionError: raise Exception("Field names must be unique!") def _validate_header_style(self, val): try: assert val in ("cap", "title", "upper", "lower", None) except AssertionError: raise Exception("Invalid header style, use cap, title, upper, lower or None!") def _validate_align(self, val): try: assert val in ["l","c","r"] except AssertionError: raise Exception("Alignment %s is invalid, use l, c or r!" % val) def _validate_valign(self, val): try: assert val in ["t","m","b",None] except AssertionError: raise Exception("Alignment %s is invalid, use t, m, b or None!" % val) def _validate_nonnegative_int(self, name, val): try: assert int(val) >= 0 except AssertionError: raise Exception("Invalid value for %s: %s!" % (name, self._unicode(val))) def _validate_true_or_false(self, name, val): try: assert val in (True, False) except AssertionError: raise Exception("Invalid value for %s! Must be True or False." % name) def _validate_int_format(self, name, val): if val == "": return try: assert type(val) in (str, unicode) assert val.isdigit() except AssertionError: raise Exception("Invalid value for %s! Must be an integer format string." % name) def _validate_float_format(self, name, val): if val == "": return try: assert type(val) in (str, unicode) assert "." in val bits = val.split(".") assert len(bits) <= 2 assert bits[0] == "" or bits[0].isdigit() assert bits[1] == "" or bits[1].isdigit() except AssertionError: raise Exception("Invalid value for %s! Must be a float format string." % name) def _validate_function(self, name, val): try: assert hasattr(val, "__call__") except AssertionError: raise Exception("Invalid value for %s! Must be a function." % name) def _validate_hrules(self, name, val): try: assert val in (ALL, FRAME, HEADER, NONE) except AssertionError: raise Exception("Invalid value for %s! Must be ALL, FRAME, HEADER or NONE." % name) def _validate_vrules(self, name, val): try: assert val in (ALL, FRAME, NONE) except AssertionError: raise Exception("Invalid value for %s! Must be ALL, FRAME, or NONE." % name) def _validate_field_name(self, name, val): try: assert (val in self._field_names) or (val is None) except AssertionError: raise Exception("Invalid field name: %s!" % val) def _validate_all_field_names(self, name, val): try: for x in val: self._validate_field_name(name, x) except AssertionError: raise Exception("fields must be a sequence of field names!") def _validate_single_char(self, name, val): try: assert _str_block_width(val) == 1 except AssertionError: raise Exception("Invalid value for %s! Must be a string of length 1." % name) def _validate_attributes(self, name, val): try: assert isinstance(val, dict) except AssertionError: raise Exception("attributes must be a dictionary of name/value pairs!") ############################## # ATTRIBUTE MANAGEMENT # ############################## def _get_field_names(self): return self._field_names """The names of the fields Arguments: fields - list or tuple of field names""" def _set_field_names(self, val): val = [self._unicode(x) for x in val] self._validate_option("field_names", val) if self._field_names: old_names = self._field_names[:] self._field_names = val if self._align and old_names: for old_name, new_name in zip(old_names, val): self._align[new_name] = self._align[old_name] for old_name in old_names: if old_name not in self._align: self._align.pop(old_name) else: for field in self._field_names: self._align[field] = "l" if self._valign and old_names: for old_name, new_name in zip(old_names, val): self._valign[new_name] = self._valign[old_name] for old_name in old_names: if old_name not in self._valign: self._valign.pop(old_name) else: for field in self._field_names: self._valign[field] = "t" field_names = property(_get_field_names, _set_field_names) def _get_align(self): return self._align def _set_align(self, val): self._validate_align(val) for field in self._field_names: self._align[field] = val align = property(_get_align, _set_align) def _get_valign(self): return self._valign def _set_valign(self, val): self._validate_valign(val) for field in self._field_names: self._valign[field] = val valign = property(_get_valign, _set_valign) def _get_max_width(self): return self._max_width def _set_max_width(self, val): self._validate_option("max_width", val) for field in self._field_names: self._max_width[field] = val max_width = property(_get_max_width, _set_max_width) def _get_fields(self): """List or tuple of field names to include in displays Arguments: fields - list or tuple of field names to include in displays""" return self._fields def _set_fields(self, val): self._validate_option("fields", val) self._fields = val fields = property(_get_fields, _set_fields) def _get_start(self): """Start index of the range of rows to print Arguments: start - index of first data row to include in output""" return self._start def _set_start(self, val): self._validate_option("start", val) self._start = val start = property(_get_start, _set_start) def _get_end(self): """End index of the range of rows to print Arguments: end - index of last data row to include in output PLUS ONE (list slice style)""" return self._end def _set_end(self, val): self._validate_option("end", val) self._end = val end = property(_get_end, _set_end) def _get_sortby(self): """Name of field by which to sort rows Arguments: sortby - field name to sort by""" return self._sortby def _set_sortby(self, val): self._validate_option("sortby", val) self._sortby = val sortby = property(_get_sortby, _set_sortby) def _get_reversesort(self): """Controls direction of sorting (ascending vs descending) Arguments: reveresort - set to True to sort by descending order, or False to sort by ascending order""" return self._reversesort def _set_reversesort(self, val): self._validate_option("reversesort", val) self._reversesort = val reversesort = property(_get_reversesort, _set_reversesort) def _get_sort_key(self): """Sorting key function, applied to data points before sorting Arguments: sort_key - a function which takes one argument and returns something to be sorted""" return self._sort_key def _set_sort_key(self, val): self._validate_option("sort_key", val) self._sort_key = val sort_key = property(_get_sort_key, _set_sort_key) def _get_header(self): """Controls printing of table header with field names Arguments: header - print a header showing field names (True or False)""" return self._header def _set_header(self, val): self._validate_option("header", val) self._header = val header = property(_get_header, _set_header) def _get_header_style(self): """Controls stylisation applied to field names in header Arguments: header_style - stylisation to apply to field names in header ("cap", "title", "upper", "lower" or None)""" return self._header_style def _set_header_style(self, val): self._validate_header_style(val) self._header_style = val header_style = property(_get_header_style, _set_header_style) def _get_border(self): """Controls printing of border around table Arguments: border - print a border around the table (True or False)""" return self._border def _set_border(self, val): self._validate_option("border", val) self._border = val border = property(_get_border, _set_border) def _get_hrules(self): """Controls printing of horizontal rules after rows Arguments: hrules - horizontal rules style. Allowed values: FRAME, ALL, HEADER, NONE""" return self._hrules def _set_hrules(self, val): self._validate_option("hrules", val) self._hrules = val hrules = property(_get_hrules, _set_hrules) def _get_vrules(self): """Controls printing of vertical rules between columns Arguments: vrules - vertical rules style. Allowed values: FRAME, ALL, NONE""" return self._vrules def _set_vrules(self, val): self._validate_option("vrules", val) self._vrules = val vrules = property(_get_vrules, _set_vrules) def _get_int_format(self): """Controls formatting of integer data Arguments: int_format - integer format string""" return self._int_format def _set_int_format(self, val): # self._validate_option("int_format", val) for field in self._field_names: self._int_format[field] = val int_format = property(_get_int_format, _set_int_format) def _get_float_format(self): """Controls formatting of floating point data Arguments: float_format - floating point format string""" return self._float_format def _set_float_format(self, val): # self._validate_option("float_format", val) for field in self._field_names: self._float_format[field] = val float_format = property(_get_float_format, _set_float_format) def _get_padding_width(self): """The number of empty spaces between a column's edge and its content Arguments: padding_width - number of spaces, must be a positive integer""" return self._padding_width def _set_padding_width(self, val): self._validate_option("padding_width", val) self._padding_width = val padding_width = property(_get_padding_width, _set_padding_width) def _get_left_padding_width(self): """The number of empty spaces between a column's left edge and its content Arguments: left_padding - number of spaces, must be a positive integer""" return self._left_padding_width def _set_left_padding_width(self, val): self._validate_option("left_padding_width", val) self._left_padding_width = val left_padding_width = property(_get_left_padding_width, _set_left_padding_width) def _get_right_padding_width(self): """The number of empty spaces between a column's right edge and its content Arguments: right_padding - number of spaces, must be a positive integer""" return self._right_padding_width def _set_right_padding_width(self, val): self._validate_option("right_padding_width", val) self._right_padding_width = val right_padding_width = property(_get_right_padding_width, _set_right_padding_width) def _get_vertical_char(self): """The charcter used when printing table borders to draw vertical lines Arguments: vertical_char - single character string used to draw vertical lines""" return self._vertical_char def _set_vertical_char(self, val): val = self._unicode(val) self._validate_option("vertical_char", val) self._vertical_char = val vertical_char = property(_get_vertical_char, _set_vertical_char) def _get_horizontal_char(self): """The charcter used when printing table borders to draw horizontal lines Arguments: horizontal_char - single character string used to draw horizontal lines""" return self._horizontal_char def _set_horizontal_char(self, val): val = self._unicode(val) self._validate_option("horizontal_char", val) self._horizontal_char = val horizontal_char = property(_get_horizontal_char, _set_horizontal_char) def _get_junction_char(self): """The charcter used when printing table borders to draw line junctions Arguments: junction_char - single character string used to draw line junctions""" return self._junction_char def _set_junction_char(self, val): val = self._unicode(val) self._validate_option("vertical_char", val) self._junction_char = val junction_char = property(_get_junction_char, _set_junction_char) def _get_format(self): """Controls whether or not HTML tables are formatted to match styling options Arguments: format - True or False""" return self._format def _set_format(self, val): self._validate_option("format", val) self._format = val format = property(_get_format, _set_format) def _get_print_empty(self): """Controls whether or not empty tables produce a header and frame or just an empty string Arguments: print_empty - True or False""" return self._print_empty def _set_print_empty(self, val): self._validate_option("print_empty", val) self._print_empty = val print_empty = property(_get_print_empty, _set_print_empty) def _get_attributes(self): """A dictionary of HTML attribute name/value pairs to be included in the <table> tag when printing HTML Arguments: attributes - dictionary of attributes""" return self._attributes def _set_attributes(self, val): self._validate_option("attributes", val) self._attributes = val attributes = property(_get_attributes, _set_attributes) ############################## # OPTION MIXER # ############################## def _get_options(self, kwargs): options = {} for option in self._options: if option in kwargs: self._validate_option(option, kwargs[option]) options[option] = kwargs[option] else: options[option] = getattr(self, "_"+option) return options ############################## # PRESET STYLE LOGIC # ############################## def set_style(self, style): if style == DEFAULT: self._set_default_style() elif style == MSWORD_FRIENDLY: self._set_msword_style() elif style == PLAIN_COLUMNS: self._set_columns_style() elif style == RANDOM: self._set_random_style() else: raise Exception("Invalid pre-set style!") def _set_default_style(self): self.header = True self.border = True self._hrules = FRAME self._vrules = ALL self.padding_width = 1 self.left_padding_width = 1 self.right_padding_width = 1 self.vertical_char = "|" self.horizontal_char = "-" self.junction_char = "+" def _set_msword_style(self): self.header = True self.border = True self._hrules = NONE self.padding_width = 1 self.left_padding_width = 1 self.right_padding_width = 1 self.vertical_char = "|" def _set_columns_style(self): self.header = True self.border = False self.padding_width = 1 self.left_padding_width = 0 self.right_padding_width = 8 def _set_random_style(self): # Just for fun! self.header = random.choice((True, False)) self.border = random.choice((True, False)) self._hrules = random.choice((ALL, FRAME, HEADER, NONE)) self._vrules = random.choice((ALL, FRAME, NONE)) self.left_padding_width = random.randint(0,5) self.right_padding_width = random.randint(0,5) self.vertical_char = random.choice("~!@#$%^&*()_+|-=\{}[];':\",./;<>?") self.horizontal_char = random.choice("~!@#$%^&*()_+|-=\{}[];':\",./;<>?") self.junction_char = random.choice("~!@#$%^&*()_+|-=\{}[];':\",./;<>?") ############################## # DATA INPUT METHODS # ############################## @_ansi def add_row(self, row): """Add a row to the table Arguments: row - row of data, should be a list with as many elements as the table has fields""" if self._field_names and len(row) != len(self._field_names): raise Exception("Row has incorrect number of values, (actual) %d!=%d (expected)" %(len(row),len(self._field_names))) if not self._field_names: self.field_names = [("Field %d" % (n+1)) for n in range(0,len(row))] self._rows.append(list(row)) def del_row(self, row_index): """Delete a row to the table Arguments: row_index - The index of the row you want to delete. Indexing starts at 0.""" if row_index > len(self._rows)-1: raise Exception("Cant delete row at index %d, table only has %d rows!" % (row_index, len(self._rows))) del self._rows[row_index] @_ansi def add_column(self, fieldname, column, align="l", valign="t"): """Add a column to the table. Arguments: fieldname - name of the field to contain the new column of data column - column of data, should be a list with as many elements as the table has rows align - desired alignment for this column - "l" for left, "c" for centre and "r" for right valign - desired vertical alignment for new columns - "t" for top, "m" for middle and "b" for bottom""" if len(self._rows) in (0, len(column)): self._validate_align(align) self._validate_valign(valign) self._field_names.append(fieldname) self._align[fieldname] = align self._valign[fieldname] = valign for i in range(0, len(column)): if len(self._rows) < i+1: self._rows.append([]) self._rows[i].append(column[i]) else: raise Exception("Column length %d does not match number of rows %d!" % (len(column), len(self._rows))) def clear_rows(self): """Delete all rows from the table but keep the current field names""" self._rows = [] def clear(self): """Delete all rows and field names from the table, maintaining nothing but styling options""" self._rows = [] self._field_names = [] self._widths = [] ############################## # MISC PUBLIC METHODS # ############################## def copy(self): return copy.deepcopy(self) ############################## # MISC PRIVATE METHODS # ############################## def _format_value(self, field, value): if isinstance(value, int) and field in self._int_format: value = self._unicode(("%%%sd" % self._int_format[field]) % value) elif isinstance(value, float) and field in self._float_format: value = self._unicode(("%%%sf" % self._float_format[field]) % value) return self._unicode(value) def _compute_widths(self, rows, options): if options["header"]: widths = [_get_size(field)[0] for field in self._field_names] else: widths = len(self.field_names) * [0] for row in rows: for index, value in enumerate(row): fieldname = self.field_names[index] if fieldname in self.max_width: widths[index] = max(widths[index], min(_get_size(value)[0], self.max_width[fieldname])) else: widths[index] = max(widths[index], _get_size(value)[0]) self._widths = widths def _get_padding_widths(self, options): if options["left_padding_width"] is not None: lpad = options["left_padding_width"] else: lpad = options["padding_width"] if options["right_padding_width"] is not None: rpad = options["right_padding_width"] else: rpad = options["padding_width"] return lpad, rpad def _get_rows(self, options): """Return only those data rows that should be printed, based on slicing and sorting. Arguments: options - dictionary of option settings.""" # Make a copy of only those rows in the slice range rows = copy.deepcopy(self._rows[options["start"]:options["end"]]) # Sort if necessary if options["sortby"]: sortindex = self._field_names.index(options["sortby"]) # Decorate rows = [[row[sortindex]]+row for row in rows] # Sort rows.sort(reverse=options["reversesort"], key=options["sort_key"]) # Undecorate rows = [row[1:] for row in rows] return rows def _format_row(self, row, options): return [self._format_value(field, value) for (field, value) in zip(self._field_names, row)] def _format_rows(self, rows, options): return [self._format_row(row, options) for row in rows] ############################## # PLAIN TEXT STRING METHODS # ############################## def get_string(self, **kwargs): """Return string representation of table in current state. Arguments: start - index of first data row to include in output end - index of last data row to include in output PLUS ONE (list slice style) fields - names of fields (columns) to include header - print a header showing field names (True or False) border - print a border around the table (True or False) hrules - controls printing of horizontal rules after rows. Allowed values: ALL, FRAME, HEADER, NONE vrules - controls printing of vertical rules between columns. Allowed values: FRAME, ALL, NONE int_format - controls formatting of integer data float_format - controls formatting of floating point data padding_width - number of spaces on either side of column data (only used if left and right paddings are None) left_padding_width - number of spaces on left hand side of column data right_padding_width - number of spaces on right hand side of column data vertical_char - single character string used to draw vertical lines horizontal_char - single character string used to draw horizontal lines junction_char - single character string used to draw line junctions sortby - name of field to sort rows by sort_key - sorting key function, applied to data points before sorting reversesort - True or False to sort in descending or ascending order print empty - if True, stringify just the header for an empty table, if False return an empty string """ options = self._get_options(kwargs) lines = [] # Don't think too hard about an empty table # Is this the desired behaviour? Maybe we should still print the header? if self.rowcount == 0 and (not options["print_empty"] or not options["border"]): return "" # Get the rows we need to print, taking into account slicing, sorting, etc. rows = self._get_rows(options) # Turn all data in all rows into Unicode, formatted as desired formatted_rows = self._format_rows(rows, options) # Compute column widths self._compute_widths(formatted_rows, options) # Add header or top of border self._hrule = self._stringify_hrule(options) if options["header"]: lines.append(self._stringify_header(options)) elif options["border"] and options["hrules"] in (ALL, FRAME): lines.append(self._hrule) # Add rows for row in formatted_rows: lines.append(self._stringify_row(row, options)) # Add bottom of border if options["border"] and options["hrules"] == FRAME: lines.append(self._hrule) return self._unicode("\n").join(lines) def _stringify_hrule(self, options): if not options["border"]: return "" lpad, rpad = self._get_padding_widths(options) if options['vrules'] in (ALL, FRAME): bits = [options["junction_char"]] else: bits = [options["horizontal_char"]] # For tables with no data or fieldnames if not self._field_names: bits.append(options["junction_char"]) return "".join(bits) for field, width in zip(self._field_names, self._widths): if options["fields"] and field not in options["fields"]: continue bits.append((width+lpad+rpad)*options["horizontal_char"]) if options['vrules'] == ALL: bits.append(options["junction_char"]) else: bits.append(options["horizontal_char"]) if options["vrules"] == FRAME: bits.pop() bits.append(options["junction_char"]) return "".join(bits) def _stringify_header(self, options): bits = [] lpad, rpad = self._get_padding_widths(options) if options["border"]: if options["hrules"] in (ALL, FRAME): bits.append(self._hrule) bits.append("\n") if options["vrules"] in (ALL, FRAME): bits.append(options["vertical_char"]) else: bits.append(" ") # For tables with no data or field names if not self._field_names: if options["vrules"] in (ALL, FRAME): bits.append(options["vertical_char"]) else: bits.append(" ") for field, width, in zip(self._field_names, self._widths): if options["fields"] and field not in options["fields"]: continue if self._header_style == "cap": fieldname = field.capitalize() elif self._header_style == "title": fieldname = field.title() elif self._header_style == "upper": fieldname = field.upper() elif self._header_style == "lower": fieldname = field.lower() else: fieldname = field bits.append(" " * lpad + self._justify(fieldname, width, self._align[field]) + " " * rpad) if options["border"]: if options["vrules"] == ALL: bits.append(options["vertical_char"]) else: bits.append(" ") # If vrules is FRAME, then we just appended a space at the end # of the last field, when we really want a vertical character if options["border"] and options["vrules"] == FRAME: bits.pop() bits.append(options["vertical_char"]) if options["border"] and options["hrules"] != NONE: bits.append("\n") bits.append(self._hrule) return "".join(bits) def _stringify_row(self, row, options): for index, field, value, width, in zip(range(0,len(row)), self._field_names, row, self._widths): # Enforce max widths lines = value.split("\n") new_lines = [] for line in lines: if _str_block_width(line) > width: line = textwrap.fill(line, width) new_lines.append(line) lines = new_lines value = "\n".join(lines) row[index] = value row_height = 0 for c in row: h = _get_size(c)[1] if h > row_height: row_height = h bits = [] lpad, rpad = self._get_padding_widths(options) for y in range(0, row_height): bits.append([]) if options["border"]: if options["vrules"] in (ALL, FRAME): bits[y].append(self.vertical_char) else: bits[y].append(" ") for field, value, width, in zip(self._field_names, row, self._widths): valign = self._valign[field] lines = value.split("\n") dHeight = row_height - len(lines) if dHeight: if valign == "m": lines = [""] * (dHeight // 2) + lines + [""] * (dHeight - (dHeight // 2)) elif valign == "b": lines = [""] * dHeight + lines else: lines = lines + [""] * dHeight y = 0 for l in lines: if options["fields"] and field not in options["fields"]: continue bits[y].append(" " * lpad + self._justify(l, width, self._align[field]) + " " * rpad) if options["border"]: if options["vrules"] == ALL: bits[y].append(self.vertical_char) else: bits[y].append(" ") y += 1 # If vrules is FRAME, then we just appended a space at the end # of the last field, when we really want a vertical character for y in range(0, row_height): if options["border"] and options["vrules"] == FRAME: bits[y].pop() bits[y].append(options["vertical_char"]) if options["border"] and options["hrules"]== ALL: bits[row_height-1].append("\n") bits[row_height-1].append(self._hrule) for y in range(0, row_height): bits[y] = "".join(bits[y]) return "\n".join(bits) ############################## # HTML STRING METHODS # ############################## def get_html_string(self, **kwargs): """Return string representation of HTML formatted version of table in current state. Arguments: start - index of first data row to include in output end - index of last data row to include in output PLUS ONE (list slice style) fields - names of fields (columns) to include header - print a header showing field names (True or False) border - print a border around the table (True or False) hrules - controls printing of horizontal rules after rows. Allowed values: ALL, FRAME, HEADER, NONE vrules - controls printing of vertical rules between columns. Allowed values: FRAME, ALL, NONE int_format - controls formatting of integer data float_format - controls formatting of floating point data padding_width - number of spaces on either side of column data (only used if left and right paddings are None) left_padding_width - number of spaces on left hand side of column data right_padding_width - number of spaces on right hand side of column data sortby - name of field to sort rows by sort_key - sorting key function, applied to data points before sorting attributes - dictionary of name/value pairs to include as HTML attributes in the <table> tag xhtml - print <br/> tags if True, <br> tags if false""" options = self._get_options(kwargs) if options["format"]: string = self._get_formatted_html_string(options) else: string = self._get_simple_html_string(options) return string def _get_simple_html_string(self, options): lines = [] if options["xhtml"]: linebreak = "<br/>" else: linebreak = "<br>" open_tag = [] open_tag.append("<table") if options["attributes"]: for attr_name in options["attributes"]: open_tag.append(" %s=\"%s\"" % (attr_name, options["attributes"][attr_name])) open_tag.append(">") lines.append("".join(open_tag)) # Headers if options["header"]: lines.append(" <tr>") for field in self._field_names: if options["fields"] and field not in options["fields"]: continue lines.append(" <th>%s</th>" % escape(field).replace("\n", linebreak)) lines.append(" </tr>") # Data rows = self._get_rows(options) formatted_rows = self._format_rows(rows, options) for row in formatted_rows: lines.append(" <tr>") for field, datum in zip(self._field_names, row): if options["fields"] and field not in options["fields"]: continue lines.append(" <td>%s</td>" % escape(datum).replace("\n", linebreak)) lines.append(" </tr>") lines.append("</table>") return self._unicode("\n").join(lines) def _get_formatted_html_string(self, options): lines = [] lpad, rpad = self._get_padding_widths(options) if options["xhtml"]: linebreak = "<br/>" else: linebreak = "<br>" open_tag = [] open_tag.append("<table") if options["border"]: if options["hrules"] == ALL and options["vrules"] == ALL: open_tag.append(" frame=\"box\" rules=\"all\"") elif options["hrules"] == FRAME and options["vrules"] == FRAME: open_tag.append(" frame=\"box\"") elif options["hrules"] == FRAME and options["vrules"] == ALL: open_tag.append(" frame=\"box\" rules=\"cols\"") elif options["hrules"] == FRAME: open_tag.append(" frame=\"hsides\"") elif options["hrules"] == ALL: open_tag.append(" frame=\"hsides\" rules=\"rows\"") elif options["vrules"] == FRAME: open_tag.append(" frame=\"vsides\"") elif options["vrules"] == ALL: open_tag.append(" frame=\"vsides\" rules=\"cols\"") if options["attributes"]: for attr_name in options["attributes"]: open_tag.append(" %s=\"%s\"" % (attr_name, options["attributes"][attr_name])) open_tag.append(">") lines.append("".join(open_tag)) # Headers if options["header"]: lines.append(" <tr>") for field in self._field_names: if options["fields"] and field not in options["fields"]: continue lines.append(" <th style=\"padding-left: %dem; padding-right: %dem; text-align: center\">%s</th>" % (lpad, rpad, escape(field).replace("\n", linebreak))) lines.append(" </tr>") # Data rows = self._get_rows(options) formatted_rows = self._format_rows(rows, options) aligns = [] valigns = [] for field in self._field_names: aligns.append({ "l" : "left", "r" : "right", "c" : "center" }[self._align[field]]) valigns.append({"t" : "top", "m" : "middle", "b" : "bottom"}[self._valign[field]]) for row in formatted_rows: lines.append(" <tr>") for field, datum, align, valign in zip(self._field_names, row, aligns, valigns): if options["fields"] and field not in options["fields"]: continue lines.append(" <td style=\"padding-left: %dem; padding-right: %dem; text-align: %s; vertical-align: %s\">%s</td>" % (lpad, rpad, align, valign, escape(datum).replace("\n", linebreak))) lines.append(" </tr>") lines.append("</table>") return self._unicode("\n").join(lines) ############################## # UNICODE WIDTH FUNCTIONS # ############################## def _char_block_width(char): # Basic Latin, which is probably the most common case #if char in xrange(0x0021, 0x007e): #if char >= 0x0021 and char <= 0x007e: if 0x0021 <= char <= 0x007e: return 1 # Chinese, Japanese, Korean (common) if 0x4e00 <= char <= 0x9fff: return 2 # Hangul if 0xac00 <= char <= 0xd7af: return 2 # Combining? if unicodedata.combining(uni_chr(char)): return 0 # Hiragana and Katakana if 0x3040 <= char <= 0x309f or 0x30a0 <= char <= 0x30ff: return 2 # Full-width Latin characters if 0xff01 <= char <= 0xff60: return 2 # CJK punctuation if 0x3000 <= char <= 0x303e: return 2 # Backspace and delete if char in (0x0008, 0x007f): return -1 # Other control characters elif char in (0x0000, 0x001f): return 0 # Take a guess return 1 def _str_block_width(val): return sum(itermap(_char_block_width, itermap(ord, _re.sub("", val)))) ############################## # TABLE FACTORIES # ############################## def from_csv(fp, field_names = None, **kwargs): dialect = csv.Sniffer().sniff(fp.read(1024)) fp.seek(0) reader = csv.reader(fp, dialect) table = PrettyTable(**kwargs) if field_names: table.field_names = field_names else: table.field_names = [x.strip() for x in next(reader)] for row in reader: table.add_row([x.strip() for x in row]) return table def from_db_cursor(cursor, **kwargs): if cursor.description: table = PrettyTable(**kwargs) table.field_names = [col[0] for col in cursor.description] for row in cursor.fetchall(): table.add_row(row) return table class TableHandler(HTMLParser): def __init__(self, **kwargs): HTMLParser.__init__(self) self.kwargs = kwargs self.tables = [] self.last_row = [] self.rows = [] self.max_row_width = 0 self.active = None self.last_content = "" self.is_last_row_header = False def handle_starttag(self,tag, attrs): self.active = tag if tag == "th": self.is_last_row_header = True def handle_endtag(self,tag): if tag in ["th", "td"]: stripped_content = self.last_content.strip() self.last_row.append(stripped_content) if tag == "tr": self.rows.append( (self.last_row, self.is_last_row_header)) self.max_row_width = max(self.max_row_width, len(self.last_row)) self.last_row = [] self.is_last_row_header = False if tag == "table": table = self.generate_table(self.rows) self.tables.append(table) self.rows = [] self.last_content = " " self.active = None def handle_data(self, data): self.last_content += data def generate_table(self, rows): """ Generates from a list of rows a PrettyTable object. """ table = PrettyTable(**self.kwargs) for row in self.rows: if len(row[0]) < self.max_row_width: appends = self.max_row_width - len(row[0]) for i in range(1,appends): row[0].append("-") if row[1] == True: self.make_fields_unique(row[0]) table.field_names = row[0] else: table.add_row(row[0]) return table def make_fields_unique(self, fields): """ iterates over the row and make each field unique """ for i in range(0, len(fields)): for j in range(i+1, len(fields)): if fields[i] == fields[j]: fields[j] += "'" def from_html(html_code, **kwargs): """ Generates a list of PrettyTables from a string of HTML code. Each <table> in the HTML becomes one PrettyTable object. """ parser = TableHandler(**kwargs) parser.feed(html_code) return parser.tables def from_html_one(html_code, **kwargs): """ Generates a PrettyTables from a string of HTML code which contains only a single <table> """ tables = from_html(html_code, **kwargs) try: assert len(tables) == 1 except AssertionError: raise Exception("More than one <table> in provided HTML code! Use from_html instead.") return tables[0] ############################## # MAIN (TEST FUNCTION) # ############################## def main(): x = PrettyTable(["City name", "Area", "Population", "Annual Rainfall"]) x.sortby = "Population" x.reversesort = True x.int_format["Area"] = "04d" x.float_format = "6.1f" x.align["City name"] = "l" # Left align city names x.add_row(["Adelaide", 1295, 1158259, 600.5]) x.add_row(["Brisbane", 5905, 1857594, 1146.4]) x.add_row(["Darwin", 112, 120900, 1714.7]) x.add_row(["Hobart", 1357, 205556, 619.5]) x.add_row(["Sydney", 2058, 4336374, 1214.8]) x.add_row(["Melbourne", 1566, 3806092, 646.9]) x.add_row(["Perth", 5386, 1554769, 869.4]) print(x) if __name__ == "__main__": main()
the-stack_106_30881
# coding: utf-8 # ## ๋ผ์ด๋ธŒ๋Ÿฌ๋ฆฌ import # python recommendation.py 2 EatingFood,Drinking,Watch ๊ด‘์ง„๊ตฌ 37.5505441 127.0722199 import matplotlib.pyplot as plt import matplotlib.image as mpimg import pandas as pd import numpy as np from urllib.parse import urlencode, quote_plus, unquote import json from urllib.request import * from xml.etree.ElementTree import * from math import * import datetime import pytz import datetime import time import re from bs4 import BeautifulSoup from selenium import webdriver from selenium.common.exceptions import NoSuchElementException import sys import csv # ## ํ˜„์žฌ ์œ„์น˜ ๊ฐ€์ ธ์˜ค๊ธฐ # In[57]: def get_location(): driver.get("https://www.google.co.kr/maps") new_url = driver.current_url while '/@' not in new_url: time.sleep(0.5) new_url = driver.current_url driver.find_element_by_xpath("//button[@aria-label='๋‚ด ์œ„์น˜ ๋ณด๊ธฐ']").click() while new_url == driver.current_url: time.sleep(0.3) new_url = driver.current_url while int(new_url.split("/@")[1].split("z")[0].split(",")[2]) < 20: old_url = new_url driver.find_element_by_xpath("//button[@aria-label='ํ™•๋Œ€']").click() driver.implicitly_wait(3) while old_url == driver.current_url: time.sleep(0.3) new_url = driver.current_url print(new_url) coordinate = new_url.split('/@')[1].split(',')[0:2] driver.find_element_by_id("searchboxinput").send_keys(",".join(coordinate)) driver.find_element_by_id("searchbox-searchbutton").click() driver.implicitly_wait(3) while True: time.sleep(0.5) new_url = driver.current_url print(new_url) if "search" in new_url: try: driver.find_element_by_class_name("section-result") except NoSuchElementException: print("redirecting") elif "place" in new_url: try: place = driver.find_element_by_xpath("//span[@class='widget-pane-link']") except NoSuchElementException: print("loading") time.sleep(0.2) else: break coordinate.reverse() address = place.text.split(' ') for s in address: if s[-1] == '๊ตฌ': coordinate.append(s) break return coordinate # ## ๋‚ ์”จ ๊ฐ€์ ธ์˜ค๋Š” ํ•จ์ˆ˜๋“ค # In[60]: def get_weather_data(longi, latit, key="......."): #๋ถ€๋ถ„์— ๋‹น์‹ ์˜ ๋‚ ์”จ APIํ‚ค๋ฅผ ์ž…๋ ฅํ•˜๊ณ  grib_date, grib_time = get_grib_date() grib_url = "http://newsky2.kma.go.kr/service/SecndSrtpdFrcstInfoService2/ForecastGrib?" forecast_date, forecast_time = get_forecast_date() forecast_url = "http://newsky2.kma.go.kr/service/SecndSrtpdFrcstInfoService2/ForecastTimeData?" nx, ny = longitude_and_latitude_to_cartesian(longi, latit) grib = urlopen(grib_url+"serviceKey="+key+"&base_date="+grib_date+"&base_time="+grib_time+"&nx="+nx+"&ny="+ny).read().decode("utf-8") forecast = urlopen(forecast_url+"serviceKey="+key+"&base_date="+forecast_date+"&base_time="+forecast_time+"&nx="+nx+"&ny="+ny).read().decode("utf-8") grib_data = fromstring(grib) # ์ดˆ๋‹จ๊ธฐ ์‹คํ™ฉ forecast_data = fromstring(forecast) # ์ดˆ๋‹จ๊ธฐ ์˜ˆ๋ณด # ์ดˆ๋‹จ๊ธฐ ๋‚ ์”จ ์ƒํ™ฉ์—๋Š” ์ •๋ณด ๋ˆ„๋ฝ์ด ์žˆ์„ ์ˆ˜ ์žˆ์Œ data_parsed = {"T1H":"None", "RN1":"None", "PTY":"None", "SKY":"None", "LGT":"None", "WSD":"None"} for item in forecast_data[1][0]: if item[2].text in list(data_parsed.keys()): data_parsed[item[2].text] = item[5].text for item in grib_data[1][0]: if item[2].text in list(data_parsed.keys()): data_parsed[item[2].text] = item[5].text return data_parsed # In[61]: def get_grib_date(): now = datetime.datetime.now(tz=pytz.timezone("Asia/Seoul")) date_now = now.strftime("%Y%m%d") time_now = now.strftime("%H") + "00" return date_now, time_now # In[62]: def get_forecast_date(): now = datetime.datetime.now(tz=pytz.timezone("Asia/Seoul")) min_chk = datetime.datetime.now(tz=pytz.timezone("Asia/Seoul")).strftime("%M") if int(min_chk) < 30: now -= datetime.timedelta(hours=1) date_now = now.strftime("%Y%m%d") time_now = now.strftime("%H") + "30" return date_now, time_now # In[63]: def longitude_and_latitude_to_cartesian(longitude, latitude): lon = float(longitude) lat = float(latitude) _Re = 6371.00877 _grid = 5 _slat1 = 30 _slat2 = 60 _olon = 126 _olat = 38 _xo = 210 / _grid _yo = 675 / _grid DEGRAD = pi / 180.0 RADDEG = 180.0 / pi re = _Re / _grid slat1 = _slat1 * DEGRAD slat2 = _slat2 * DEGRAD olon = _olon * DEGRAD olat = _olat * DEGRAD sn = tan(pi * 0.25 + slat2 * 0.5) / tan(pi * 0.25 + slat1 * 0.5) sn = log(cos(slat1) / cos(slat2)) / log(sn) sf = tan(pi * 0.25 + slat1 * 0.5) tmp = sf ** sn if sf > 0 else -(abs(sf) ** sn) sf = tmp * cos(slat1) / sn ro = tan(pi * 0.25 + olat * 0.5) ro = re * sf / (ro ** sn if ro > 0 else -(abs(ro) ** sn)) ra = tan(pi * 0.25 + lat * DEGRAD * 0.5) ra = re * sf / (ra ** sn if ra > 0 else -(abs(ra) ** sn)) theta = lon * DEGRAD - olon if theta > pi: theta -= 2.0 * pi if theta < -pi: theta += 2.0 * pi theta *= sn x = ra * sin(theta) + _xo y = (ro - ra * cos(theta)) + _yo return str(int(x+1.5)), str(int(y+1.5)) # ## ๋ฏธ์„ธ๋จผ์ง€ ๊ฐ€์ ธ์˜ค๊ธฐ def get_fine_dict_data(service_key, stationName): decode_key = unquote(service_key) queryParams = '?' + urlencode({ quote_plus('ServiceKey') : decode_key, quote_plus('stationName'): stationName, quote_plus('dataTerm'): 'daily', quote_plus('_returnType'): 'json', quote_plus('ver'): '1.3', quote_plus('pageNo'): 1}) request = Request(url + queryParams) request.get_method = lambda: 'GET' response_body = urlopen(request).read() response_body = json.loads(response_body) return response_body['list'][0]['pm10Grade'],response_body['list'][0]['pm25Grade'], response_body['list'][0]['dataTime'] # ### ๋‚ ์”จ ๋ฐ ๋ฏธ์„ธ๋จผ์ง€ ๋ณ€์ˆ˜๋งŒ๋“ค๊ธฐ # ### ๋‚ ์”จ : weather_dict , ๋ฏธ์„ธ๋จผ์ง€ : fine_dust , ์ดˆ๋ฏธ์„ธ๋จผ์ง€ : Ultrafine_dust , ๋ฏธ์„ธ๋จผ์ง€ ์‹œ๊ฐ„ : dataTime # # #### weather_dict # # | ๋“ฑ๊ธ‰ | ์ข‹์Œ | ๋ณดํ†ต | ๋‚˜์จ | ๋งค์šฐ๋‚˜์จ | # # | Grade | 1 | 2 | 3 | 4 | # # #### fine_dust # # T1H : ๊ธฐ์˜จ # # RN1 : 1์‹œ๊ฐ„ ๊ฐ•์ˆ˜๋Ÿ‰ # # (0 - 0mm ๋˜๋Š” ์—†์Œ) # # (1 - 1mm ๋ฏธ๋งŒ) # # (5 - 1~4mm) # # (10 - 5~9mm) # # (20 - 10~19mm) # # (40 - 20~39mm) # # (70 - 40~69mm) # # (100 - 70mm ์ด์ƒ) # # PTY : ๊ฐ•์ˆ˜ํ˜•ํƒœ -- 0 - ์—†์Œ , 1 - ๋น„ , 2 - ์ง„๋ˆˆ๊นจ๋น„ , 3 - ๋ˆˆ # # SKY : ํ•˜๋Š˜์ƒํƒœ -- 1 - ๋ง‘์Œ , 2 - ๊ตฌ๋ฆ„์กฐ๊ธˆ , 3 - ๊ตฌ๋ฆ„๋งŽ์Œ , 4 - ํ๋ฆผ # # LGT : ๋‚™๋ขฐ -- 0 - ์—†์Œ , 1 - ์žˆ์Œ # # ๋˜๋Š” 0 - ์—†์Œ , 1 - ๋‚ฎ์Œ , 2 - ๋ณดํ†ต , 3 - ๋†’์Œ # # WSD : ํ’์† # ## Open์‹œ๊ฐ„ ํ•„ํ„ฐ๋ง # In[69]: #Find_Hours_of_use("10AM-11PM") # In[70]: def Find_Hours_of_use(zz): p1 = re.compile('(AM)+') m1 = p1.search(zz) p2 = re.compile('(PM)+') m2 = p2.search(zz) p3 = re.compile('(:)+') m3 = p3.search(zz) p5 = re.compile('(-)+') m5 = p5.search(zz) #print(zz) i11 = int(re.findall('\d+', str(m1))[0]) i12 = int(re.findall('\d+', str(m1))[1]) i21 = int(re.findall('\d+', str(m2))[0]) i22 = int(re.findall('\d+', str(m2))[1]) i51 = int(re.findall('\d+', str(m5))[0]) i52 = int(re.findall('\d+', str(m5))[1]) if(m3): i31 = int(re.findall('\d+', str(m3))[0]) i32 = int(re.findall('\d+', str(m3))[1]) z2=zz if(i31<i51): z2 = zz[i32:] p4 = re.compile('(:)+') m4 = p4.search(z2) if(m4): i41 = int(re.findall('\d+', str(m4))[0]) i42 = int(re.findall('\d+', str(m4))[1]) p5 = re.compile('(-)+') m5 = p5.search(z2) else: i41 = None i42 = None else: i31 = None i32 = None z2 = None i41 = None i42 = None i51 = int(re.findall('\d+', str(m5))[0]) i52 = int(re.findall('\d+', str(m5))[1]) if (i31 and i31<i51): openss = zz[:i11] opensss = int(openss[:i31])*60 + int(openss[i32:]) else: opensss = int(zz[:i11])*60 if i41: closs = z2[i52:] i41 -= len(z2)-len(closs) i42 -= len(z2)-len(closs) closss = int(closs[:i41])*60 + int(closs[i42:-2]) else: closss = int(zz[i52:-2])*60 return opensss,closss+12*60 # In[71]: def Make_Hours_of_use(timessss,p): ########################## #if(timessss in cp): # return -1 ########################## if(timessss!="Open 24 hours" and timessss!="Closed"): return (Find_Hours_of_use(timessss)[p]) elif(timessss=="Open 24 hours"): return (p*24*60) elif(timessss=="Closed"): return (-1) def Hople_Places(place): hopedf = df[df["Function"] == "Fsdfsd"] places = place for p in places: onehope = df[df["Function"] == p] hopedf = hopedf.append(onehope) return hopedf # In[131]: def Make_Hope_Places(places): hopedf = df[df["Function"] == "Fsdfsd"] for place in places: hdf = Hople_Places(place) hopedf = hopedf.append(hdf) return hopedf #์œ„๋„์˜ 1๋„๊ฐ„ ๊ฑฐ๋ฆฌ๋Š” 114.6km์ด๊ณ  ๊ฒฝ๋„์˜ 1๋„๊ฐ„ ๊ฑฐ๋ฆฌ๋Š” 88km๊ฐ€ ๋œ๋‹ค. def KM_To_Longitude_Latitude(num, clas): if(clas == "Latitude"): #์œ„๋„ return num / 114.6 elif(clas == "Longitude"): #๊ฒฝ๋„ return num / 88 else: return 0 # In[138]: def Distance_filtering(df,lo,la,Odistance,Adistance): df1 = df[(lo-Odistance <= df['Longitude'] ) & (df['Longitude'] <=lo+Odistance )] df2 = df1[(la-Adistance <= df['Latitude'] ) & (df['Latitude'] <=la+Adistance )] return df2 #์œ„๋„์˜ 1๋„๊ฐ„ ๊ฑฐ๋ฆฌ๋Š” 114.6km์ด๊ณ  ๊ฒฝ๋„์˜ 1๋„๊ฐ„ ๊ฑฐ๋ฆฌ๋Š” 88km๊ฐ€ ๋œ๋‹ค. def Longitude_Latitude_To_KM(num, clas): if(clas == "Latitude"): #์œ„๋„ return num * 114.6 elif(clas == "Longitude"): #๊ฒฝ๋„ return num * 88 else: return 0 if __name__ == "__main__": inputs = [] for i in range(1,6): try: inputs.append(sys.argv[i]) except: inputs.append(0) # ์žฅ์†Œ, ๊ตฌ, ์œ„๋„, ๊ฒฝ๋„ #for i in range(0,5): # print(i, inputs[i]) #์œ„์น˜ ๊ฐ€์ง€๊ณ  ์˜ค๊ธฐ if(not(inputs[4])): driver = webdriver.Chrome('chromedriver.exe') Current_location = get_location() driver.close() Longitude, Latitude, stationName = Current_location Longitude, Latitude = float(Longitude), float(Latitude) else: stationName = inputs[2] Longitude, Latitude = float(inputs[4]), float(inputs[3]) url = 'http://openapi.airkorea.or.kr/openapi/services/rest/ArpltnInforInqireSvc/getMsrstnAcctoRltmMesureDnsty' service_key = '.......' # APIํ‚ค๊ฐ€ ํ•„์š” stationName = stationName #๋ฏธ์„ธ๋จผ์ง€ weather_dict = get_weather_data(Longitude, Latitude) fine_dust, Ultrafine_dust, dataTime = get_fine_dict_data(service_key, stationName) df = pd.read_csv("Seoul_Place.csv") # # MAIN CODE t = ['Monday', 'Tuesday', 'Wednesday', 'Thursday', 'Friday', 'Saturday', 'Sunday'] tt = datetime.datetime.now() n = time.localtime().tm_wday opentime=[] closetime=[] for dftn in df[t[n]]: opentime.append(Make_Hours_of_use(dftn,0)) closetime.append(Make_Hours_of_use(dftn,1)) df["opentime"]=opentime df["closetime"]=closetime now = tt.hour*60 + tt.minute df= df[(df["opentime"]<now) & (now<df["closetime"])] # ## ๋‚ ์”จ ํ•„ํ„ฐ๋ง try: fine_dust = int(fine_dust) except: fine_dust=0 try: Ultrafine_dust= int(Ultrafine_dust) except: Ultrafine_dust= 0 try: T1H = int(weather_dict["T1H"]) except: T1H=0 try: RN1 = float(weather_dict["RN1"]) except: RN1=0 try: PTY = int(weather_dict["PTY"]) except: PTY = 0 try: SKY = int(weather_dict["SKY"]) except: SKY = 0 try: LGT = int(weather_dict["LGT"]) except: LGT = 0 try: WSD = int(weather_dict["WSD"]) except: WSD = 0 if (fine_dust>=3 or Ultrafine_dust>=3 or T1H<-5 or 30 <T1H or RN1>=1 or PTY>=1): df = df[df["Indoor/Outdoors"]!="Outdoor"] if(fine_dust==4 or Ultrafine_dust==4 or LGT >=1 or WSD >=100or T1H<-15 or 37 <T1H or RN1>=10): df = df[df["Indoor/Outdoors"]=="Indoor"] # ## ์›ํ•˜๋Š” ์žฅ์†Œ๋กœ ํ•„ํ„ฐ๋ง # ์Œ์‹๊ฐ™์€๊ฑฐ๋ฉด ์ข€ ๋” ์ข€๊ฒŒ , ์˜๋ฏธ์žˆ๋Š” ๊ณณ์€ ๋ฉ€๋ฆฌ, ๋„ˆ๋ฌด ๋ฉ€๋ฆฌ์žˆ์œผ๋ฉด # ์ผ€์ด์Šค๋„ ๋งŒ๋“ค์–ด์„œ ํ—ค๋ณด๊ฒŒ ๋ณด๊ธฐ # ๋ฐ์ดํ„ฐ์˜ ํ•œ๊ณ„์  ๊นŒ์ง€ -> ๊ฐ•๋‚จ๊ตฌ์ฒญ์—์„œ # ๊ฑฐ๋ฆฌ ์„ค์ • -> ok #1.์Œ์‹ ๋จน๊ธฐ EatingFood = [ #๊ฐ€๊นŒ์šด '๋”ค์„ฌ ์ „๋ฌธ ๋ ˆ์Šคํ† ๋ž‘', '์Œ์‹์ ', '์ดํƒˆ๋ฆฌ์•„ ์Œ์‹์ ', '์ผ๋ณธ ์Œ์‹์ ', '์ค‘๊ตญ ์Œ์‹์ ', 'ํŒจ๋ฐ€๋ฆฌ ๋ ˆ์Šคํ† ๋ž‘', 'ํ”„๋ž‘์Šค ์Œ์‹์ ', 'ํ•œ๊ตญ์‹ ์†Œ๊ณ ๊ธฐ ์ „๋ฌธ ์Œ์‹์ ', 'ํ•œ์‹ ๊ณ ๊ธฐ๊ตฌ์ด ๋ ˆ์Šคํ† ๋ž‘' ] #2. ๋งˆ์‹œ๊ธฐ Drinking = [ '๋ฐ” & ๊ทธ๋ฆด', #๊ฐ€๊นŒ์šด '์ˆ ์ง‘', '์™€์ธ ๋ฐ”', '์žฌ์ฆˆ๋ฐ”', '์นดํŽ˜', '์นตํ…Œ์ผ๋ฐ”', ] ##### #3. ์˜๋ฏธ์žˆ๋Š” ๊ณณ MeaningfulPlace = ['๋ฌธํ™”์œ ์‚ฐ๋ณด์กด์ง€์—ญ', #๋ฉ€๋ฆฌ '๋ถˆ๊ต์‚ฌ์ฐฐ', '์„ฑ๋‹น', '์—ญ์‚ฌ์œ ์ ์ง€', '์—ญ์‚ฌ์  ๋ช…์†Œ', '์ •๋ถ€์ฒญ์‚ฌ' ] #4. ๋ฐ– ์ธ๊ณต,์ž์—ฐ Outdoors = ['๊ณ ๊ถ', #๋ฉ€๋ฆฌ '๊ด€๊ด‘๋ช…์†Œ', '๊ด€๊ด‘์ง€', '๋‹ค๋ฆฌ', '๋Œ€๊ด‘์žฅ', 'ํƒ€์›Œ', '๊ณต์›', '๊ตญ๋ฆฝ๊ณต์›', '์‚ฐ' ] #5. ์•‰์•„์„œ ๋ณด๊ณ  ๋“ฃ๊ธฐ SeeAndHear=['๊ณต์—ฐ์˜ˆ์ˆ  ๊ทน์žฅ', #๊ฐ€๊นŒ์šด '๊ทน์žฅ', '์˜ํ™”๊ด€', '์ž๋™์ฐจ๊ทน์žฅ' ] #6. ๊ตฌ๊ฒฝํ•˜๊ธฐ Watch =['๋ฏธ์ˆ ๊ด€', #๋ฉ€๋ฆฌ '๋ฐ•๋ฌผ๊ด€' ] #7. ๋…ธ๋ž˜๋ฐฉ Karaoke = ['๋…ธ๋ž˜๋ฐฉ' ] #๊ฐ€๊นŒ์šด #7. ๋†€์ด๊ณต์› AmusementPark = ['๋†€์ด๊ณต์›'] #๋ฉ€๋ฆฌ #8. ์‡ผํ•‘ํ•˜๊ธฐ Shopping=['์‡ผํ•‘๋ชฐ', #๋ฉ€๋ฆฌ '์‹œ๊ฐ€ ์ „๋ฌธ์ ', '์‹œ์žฅ', '์ปคํ”ผ์šฉํ’ˆ ํŒ๋งค์ ', ] #9. ์šด๋™ํ•˜๊ธฐ Exercise = ['๊ฑด๊ฐ• ์„ผํ„ฐ', #๊ฐ€๊นŒ์šด '์Šคํฌ์ธ  ๋‹จ์ง€', '์Šคํฌ์ธ ๋‹จ์ง€', '๋ฌธํ™”์„ผํ„ฐ' ] Close_Far = 1 #์žฅ์†Œ ์„ ์ • if(inputs[1]): places = [] places_name = inputs[1].split(',') for pl in places_name: if(pl == "EatingFood"): places.append(EatingFood) Close_Far = 0 elif(pl == "Drinking"): places.append(Drinking) Close_Far = 0 elif(pl == "MeaningfulPlace"): places.append(MeaningfulPlace) elif(pl == "Outdoors"): places.append(Outdoors) elif(pl == "SeeAndHear"): places.append(SeeAndHear) Close_Far = 0 elif(pl == "Watch"): places.append(Watch) elif(pl == "Karaoke"): places.append(Karaoke) Close_Far = 0 elif(pl == "AmusementPark"): places.append(AmusementPark) elif(pl == "Shopping"): places.append(Shopping) elif(pl == "Exercise"): places.append(Exercise) Close_Far = 0 else: places = [EatingFood,Drinking,MeaningfulPlace,Outdoors,SeeAndHear,Watch,Play,Shopping,Exercise,Welfare] #print(places) hopedf = Make_Hope_Places(places) df = hopedf #print(df) O = KM_To_Longitude_Latitude(1, "Longitude") A = KM_To_Longitude_Latitude(1, "Latitude") lims = 0 while 1: Fdf = Distance_filtering(df,Longitude,Latitude,O,A) lims += 1 if 10 > len(Fdf["Name"]): O+=KM_To_Longitude_Latitude(0.1, "Longitude") A+=KM_To_Longitude_Latitude(0.1, "Longitude") elif lims < 5: break else: break print(len(Fdf["Name"])) Fdf = Fdf.fillna(0) # ## Socre ๊ณ„์‚ฐ Score = list(Fdf["Score"]) while(1): try: Score.remove(0) except: break # ### ์ดˆ๊ธฐํ™” # 0์ ์œผ๋กœ ์ฆ‰ ๋น„์—ฌ์žˆ๋Š” ๊ฐ’๋“ค์„ ์ œ์ผ ๋‚ฎ์€ ์ ์ˆ˜ -1๋กœ ๋ฐ”๊พธ๊ธฐ zidx = Fdf[Fdf["Score"]==0].index.tolist() ms = min(Score)-1 for i in zidx: Fdf.loc[i,'Score'] = ms # ### Score ๊ฑฐ๋ฆฌ ๊ธฐ๋ฐ˜์œผ๋กœ ๋ณ€๊ฒฝ idx = Fdf.index.tolist() for i in idx: Fdf.loc[i,'Score'] = Fdf.loc[i,'Score'] + (abs(Longitude - Fdf.loc[i,'Longitude'])) + (abs(Latitude - Fdf.loc[i,'Latitude'])) # ### Score ์ถ”์ถœ ๋ฐ ํ™•๋ฅ ํ™” roulette_dic = {} for i in idx: roulette_dic[i] = Fdf.loc[i,'Score'] r_score_l = list(roulette_dic.values()) len(r_score_l), sum(r_score_l) for i,s in roulette_dic.items(): roulette_dic[i]= s/sum(r_score_l) r_index_l = list(roulette_dic) # ### ์ตœ์ข… ์ถ”์ฒœ ์žฅ์†Œ ๋ฝ‘๊ธฐ ๋ฐ ์ถœ๋ ฅ #๋ฝ‘์„ ์ˆซ์ž if(inputs[0]): ref_num = int(inputs[0]) else: ref_num = 3 f = open('output.csv', 'w', encoding='utf-8', newline='') wr = csv.writer(f) find_place_idxs = [] for i in range(ref_num): whiletest = 1 while(whiletest): find_place_idx = np.random.choice(r_index_l, p=list(roulette_dic.values())) whiletest = 0 for fpi in find_place_idxs: whiletest = 1 if(fpi == find_place_idx): whiletest = 1 break else: whiletest = 0 break find_place = Fdf.loc[find_place_idx] find_place print(find_place["Name"]) print("๋ถ„๋ฅ˜ : ",find_place["Function"]) print("์‚ฌ์ง„ : ",'image/'+find_place["Image"]) #img=mpimg.imread('image/'+find_place["Image"]) #imgplot = plt.imshow(img) #plt.show() print("์„ค๋ช… : ",find_place["Details"]) wr.writerow([find_place["Name"]]) wr.writerow([find_place["Function"]]) wr.writerow(['image/'+find_place["Image"]]) wr.writerow([find_place["Details"]]) f.close()
the-stack_106_30882
from dlchord2 import const from dlchord2.accidentals import Accidentals from dlchord2.note import Note from dlchord2.scale import Scale def test_create_from_index_note(): for i in range(12): note = Note.create_from_index_note(i) def test_create_from_tension(): tensions = const.TENSION_TO_INDEX.keys() for tension in tensions: note = Note.create_from_tension(tension) def test_transposed(): for scale_text in const.SCALE_PATTERN.keys(): scale_origin = Scale(scale_text) for note_text in const.SCALE_PATTERN[scale_text]: note = Note(note_text, scale_text=scale_text) for i in range(12): note_transposed = note.transposed(steps=i, scale_text=scale_text) note_index_origin = note.note_index assert (note_index_origin + i) % 12 == note_transposed.note_index assert scale_origin == note_transposed.scale for i in range(0, -12, -1): note_transposed = note.transposed(steps=i, scale_text=scale_text) note_index_origin = note.note_index assert (note_index_origin + i) % 12 == note_transposed.note_index assert scale_origin == note_transposed.scale def test_disabled(): note = Note("C") assert not note.disabled note.disabled = True assert note.disabled def test_note_index(): for scale_text in const.SCALE_PATTERN.keys(): for note_text in const.SCALE_PATTERN[scale_text]: note = Note(note_text, scale_text=scale_text) note_index_origin = const.TONE_TO_INDEX[note_text] assert note_index_origin == note.note_index def test_raw_note_text(): for scale_text in const.SCALE_PATTERN.keys(): for note_text in const.SCALE_PATTERN[scale_text]: note = Note(note_text, scale_text=scale_text) assert note_text == note.raw_note_text def test_normed_note_text(): for scale_text in const.SCALE_PATTERN.keys(): for note_text in const.SCALE_PATTERN[scale_text]: note = Note(note_text, scale_text=scale_text) assert note_text == note.normed_note_text def test_accidentals(): for scale_text in const.SCALE_PATTERN.keys(): for note_text in const.SCALE_PATTERN[scale_text]: note = Note(note_text, scale_text=scale_text) accidentals_origin = Accidentals(note_text[1:]) assert accidentals_origin == note.accidentals def test_scale(): for scale_text in const.SCALE_PATTERN.keys(): for note_text in const.SCALE_PATTERN[scale_text]: note = Note(note_text, scale_text=scale_text) scale_origin = Scale(scale_text) assert scale_origin == note.scale
the-stack_106_30884
from __future__ import print_function import boto3 import datetime import json import CloudCanvas import errors import service import fleet from cgf_utils import aws_utils from cgf_utils import custom_resource_utils from botocore.exceptions import ClientError # import errors # # raise errors.ClientError(message) - results in HTTP 400 response with message # raise errors.ForbiddenRequestError(message) - results in 403 response with message # raise errors.NotFoundError(message) - results in HTTP 404 response with message # # Any other exception results in HTTP 500 with a generic internal service error message. workflow = custom_resource_utils.get_embedded_physical_id(CloudCanvas.get_setting('Workflow')) workflow_domain_name = workflow + '-domain' workflow_type_name = workflow + '-workflow-type' swf_client = boto3.client('swf', region_name=aws_utils.current_region) dynamo_client = boto3.client('dynamodb') kvs_table = custom_resource_utils.get_embedded_physical_id(CloudCanvas.get_setting('KVS')) active_workflow_key = 'active_workflow' @service.api def post(request, body): workflow_id = body['workflow_id'] run_params = body['run_params'] if workflow_id is None or len(workflow_id) == 0: now = datetime.datetime.utcnow().replace(microsecond=0) workflow_id = "exec-%s" % now.isoformat().replace(":", ".") try: response = swf_client.start_workflow_execution( domain=workflow_domain_name, workflowId=workflow_id, workflowType={ 'name': workflow_type_name, 'version': "1.0" }, input=run_params ) response = { 'workflowId': workflow_id, 'runId': response['runId'] } dynamo_client.put_item( TableName=kvs_table, Item={ 'key': {'S': active_workflow_key}, 'value': {'S': json.dumps(response)} } ) except ClientError as e: if e.response['Error']['Code'] == "WorkflowExecutionAlreadyStartedFault": raise errors.ClientError("A workflow execution with this name already exists") else: raise e return json.dumps(response)
the-stack_106_30886
# Copyright 2019 Google LLC. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Tests for tfx.utils.logging_utils.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import logging import os # Standard Imports import tensorflow as tf from tensorflow.python.lib.io import file_io # pylint: disable=g-direct-tensorflow-import from tfx.utils import logging_utils class LoggingUtilsTest(tf.test.TestCase): def setUp(self): self._log_root = os.path.join(self.get_temp_dir(), 'log_dir') self._logger_config = logging_utils.LoggerConfig(log_root=self._log_root) def test_logging(self): """Ensure a logged string actually appears in the log file.""" logger = logging_utils.get_logger(self._logger_config) logger.info('Test') log_file_path = os.path.join(self._log_root) f = file_io.FileIO(os.path.join(log_file_path, 'tfx.log'), mode='r') self.assertRegexpMatches( f.read(), r'^\d\d\d\d-\d\d-\d\d \d\d:\d\d:\d\d,\d\d\d - : \(logging_utils_test.py:\d\d\) - INFO: Test$' ) def test_default_settings(self): """Ensure log defaults are set correctly.""" config = logging_utils.LoggerConfig() self.assertEqual(config.log_root, '/var/tmp/tfx/logs') self.assertEqual(config.log_level, logging.INFO) self.assertEqual(config.pipeline_name, '') self.assertEqual(config.worker_name, '') def test_override_settings(self): """Ensure log overrides are set correctly.""" config = logging_utils.LoggerConfig(log_root='path', log_level=logging.WARN, pipeline_name='pipe', worker_name='wrk') self.assertEqual(config.log_root, 'path') self.assertEqual(config.log_level, logging.WARN) self.assertEqual(config.pipeline_name, 'pipe') self.assertEqual(config.worker_name, 'wrk') if __name__ == '__main__': tf.test.main()
the-stack_106_30887
from tests.base_test import BaseTest from crc.scripts.enum_label import EnumLabel from crc.api.common import ApiError class TestGetEnumLabel(BaseTest): def setUp(self): self.load_example_data() self.workflow = self.create_workflow('enum_options_all') self.workflow_api = self.get_workflow_api(self.workflow) # Assure the form has been loaded at least once. self.task = self.workflow_api.next_task self.assertEqual(self.task.name, 'myFormTask') self.labelScript = EnumLabel() def test_get_enum_label_for_ldap(self): result = self.labelScript.do_task(self.task, self.workflow_api.study_id, self.workflow_api.id, task='myFormTask', field='ldap', value='dhf8r') self.assertEqual("Dan Funk", result) def test_get_enum_label_for_standard_enum(self): result = self.labelScript.do_task(self.task, self.workflow_api.study_id, self.workflow_api.id, task='myFormTask', field='standard_enum', value='one') self.assertEqual('1', result) def test_get_enum_label_using_unnamed_args(self): result = self.labelScript.do_task(self.task, self.workflow_api.study_id, self.workflow_api.id, 'myFormTask', 'standard_enum', 'one') self.assertEqual('1', result) def test_get_enum_label_for_spreadsheet(self): result = self.labelScript.do_task(self.task, self.workflow_api.study_id, self.workflow_api.id, task='myFormTask', field='spreadsheet', value='2') self.assertEqual('T-shirts', result) def test_get_enum_label_for_data(self): result = self.labelScript.do_task(self.task, self.workflow_api.study_id, self.workflow_api.id, task='myFormTask', field='data', value='simo') self.assertEqual('Simo', result) def test_get_enum_label_for_checkbox(self): result = self.labelScript.do_task(self.task, self.workflow_api.study_id, self.workflow_api.id, task='myFormTask', field='checkbox', value='simo') self.assertEqual('Simo', result) def test_get_invalid_spec_name(self): with self.assertRaises(ApiError) as ctx: ldap_result = self.labelScript.do_task(self.task, self.workflow_api.study_id, self.workflow_api.id, task='myWrongFormTask', field='standard_enum', value='one') self.assertEqual("ApiError: Unable to find a task in the workflow called 'myWrongFormTask'. ", str(ctx.exception)) def test_get_invalid_field_name(self): with self.assertRaises(ApiError) as ctx: ldap_result = self.labelScript.do_task(self.task, self.workflow_api.study_id, self.workflow_api.id, task='myFormTask', field='made_up_enum', value='one') self.assertEqual("ApiError: The task 'myFormTask' has no field named 'made_up_enum'. ", str(ctx.exception))
the-stack_106_30888
from collections import OrderedDict from typing import Union, Optional, List, Tuple import os import shutil from pathlib import Path import torch import torch.backends.cudnn as cudnn import torch.nn as nn from tensorboardX import SummaryWriter from torch.utils.data.dataloader import default_collate as default_collate_fn from catalyst.data.dataset import ListDataset from catalyst.dl.fp16 import Fp16Wrap from catalyst.utils.plotly import plot_tensorboard_log from ..utils.model import prepare_optimizable_params, assert_fp16_available class UtilsFactory: prepare_optimizable_params = prepare_optimizable_params assert_fp16_available = assert_fp16_available @staticmethod def create_loader( data_source, open_fn, dict_transform=None, dataset_cache_prob=-1, sampler=None, collate_fn=default_collate_fn, batch_size=32, num_workers=4, shuffle=False, drop_last=False ): dataset = ListDataset( data_source, open_fn=open_fn, dict_transform=dict_transform, cache_prob=dataset_cache_prob ) loader = torch.utils.data.DataLoader( dataset=dataset, sampler=sampler, collate_fn=collate_fn, batch_size=batch_size, num_workers=num_workers, shuffle=shuffle, pin_memory=torch.cuda.is_available(), drop_last=drop_last, ) return loader @staticmethod def create_tflogger(logdir: str, name: str) -> SummaryWriter: log_dir = os.path.join(logdir, f"{name}_log") logger = SummaryWriter(log_dir) return logger @staticmethod def create_loggers( logdir: str, loaders: List[str] ) -> "OrderedDict[str, SummaryWriter]": os.makedirs(logdir, exist_ok=True) loggers = [] for key in loaders: logger = UtilsFactory.create_tflogger(logdir=logdir, name=key) loggers.append((key, logger)) loggers = OrderedDict(loggers) return loggers @staticmethod def prepare_device() -> torch.device: return torch.device("cuda" if torch.cuda.is_available() else "cpu") @staticmethod def prepare_model(model: nn.Module) -> Tuple[nn.Module, torch.device]: device = UtilsFactory.prepare_device() if torch.cuda.is_available(): cudnn.benchmark = True if torch.cuda.device_count() > 1 and not isinstance(model, Fp16Wrap): model = torch.nn.DataParallel(model).to(device) else: model = model.to(device) return model, device @staticmethod def pack_checkpoint( model=None, criterion=None, optimizer=None, scheduler=None, **kwargs ): checkpoint = kwargs if isinstance(model, OrderedDict): raise NotImplementedError() else: model_ = model if isinstance(model_, nn.DataParallel): model_ = model_.module if isinstance(model_, Fp16Wrap): model_ = model_.network checkpoint["model_state_dict"] = model_.state_dict() for dict2save, name2save in zip( [criterion, optimizer, scheduler], ["criterion", "optimizer", "scheduler"] ): if dict2save is None: continue if isinstance(dict2save, dict): for key, value in dict2save.items(): if value is not None: name2save_ = name2save + "_" + str(key) # checkpoint[name2save_] = value name2save_ = name2save_ + "_state_dict" checkpoint[name2save_] = value.state_dict() else: # checkpoint[name2save] = dict2save name2save = name2save + "_state_dict" checkpoint[name2save] = dict2save.state_dict() return checkpoint @staticmethod def unpack_checkpoint( checkpoint, model=None, criterion=None, optimizer=None, scheduler=None ): if model is not None: if isinstance(model, torch.nn.DataParallel): model = model.module if isinstance(model, Fp16Wrap): model.network.load_state_dict(checkpoint["model_state_dict"]) else: model.load_state_dict(checkpoint["model_state_dict"]) for dict2load, name2load in zip( [criterion, optimizer, scheduler], ["criterion", "optimizer", "scheduler"] ): if dict2load is None: continue if isinstance(dict2load, dict): for key, value in dict2load.items(): if value is not None: name2load_ = f"{name2load}_{key}_state_dict" value.load_state_dict(checkpoint[name2load_]) else: name2load = f"{name2load}_state_dict" dict2load.load_state_dict(checkpoint[name2load]) @staticmethod def save_checkpoint( logdir, checkpoint, suffix="", is_best=False, is_last=False ): os.makedirs(logdir, exist_ok=True) filename = f"{logdir}/{suffix}.pth" torch.save(checkpoint, filename) if is_best: shutil.copyfile(filename, f"{logdir}/best.pth") if is_last: shutil.copyfile(filename, f"{logdir}/last.pth") return filename @staticmethod def load_checkpoint(filepath): checkpoint = torch.load( filepath, map_location=lambda storage, loc: storage ) return checkpoint @staticmethod def plot_metrics( logdir: Union[str, Path], step: Optional[str] = "epoch", metrics: Optional[List[str]] = None, height: Optional[int] = None, width: Optional[int] = None ) -> None: """Plots your learning results. Args: logdir: the logdir that was specified during training. step: 'batch' or 'epoch' - what logs to show: for batches or for epochs metrics: list of metrics to plot. The loss should be specified as 'loss', learning rate = '_base/lr' and other metrics should be specified as names in metrics dict that was specified during training height: the height of the whole resulting plot width: the width of the whole resulting plot """ assert step in ["batch", "epoch"], \ f"Step should be either 'batch' or 'epoch', got '{step}'" metrics = metrics or ["loss"] plot_tensorboard_log(logdir, step, metrics, height, width) def get_activation_by_name(activation: str = None): if activation is None or activation == "none": activation_fn = lambda x: x elif activation == "sigmoid": activation_fn = torch.nn.Sigmoid() elif activation == "softmax2d": activation_fn = torch.nn.Softmax2d() else: raise NotImplementedError( "Activation implemented for sigmoid and softmax2d" ) return activation_fn
the-stack_106_30892
# this module contains all the defaults used by the generation of cleaned-up headers # for the Bionic C library # import time, os, sys from utils import * # the list of supported architectures kernel_archs = [ 'arm', 'arm64', 'x86' ] # the list of include directories that belong to the kernel # tree. used when looking for sources... kernel_dirs = [ "linux", "asm", "asm-generic", "mtd" ] # a special value that is used to indicate that a given macro is known to be # undefined during optimization kCppUndefinedMacro = "<<<undefined>>>" # this is the set of known macros we want to totally optimize out from the # final headers kernel_known_macros = { "__KERNEL__": kCppUndefinedMacro, "__KERNEL_STRICT_NAMES":"1", "__CHECKER__": kCppUndefinedMacro, "__CHECK_ENDIAN__": kCppUndefinedMacro, "CONFIG_64BIT": "__LP64__", "CONFIG_X86_32": "__i386__", "__EXPORTED_HEADERS__": "1", "__HAVE_BUILTIN_BSWAP16__": "1", "__HAVE_BUILTIN_BSWAP32__": "1", "__HAVE_BUILTIN_BSWAP64__": "1", # Use this to remove the struct __kernel_old_timeval definition. # Otherwise, there will be two struct timeval definitions when # __kernel_old_timeval is renamed to timeval. "__kernel_old_timeval": "1", } # this is the set of known kernel data structures we want to remove from # the final headers kernel_structs_to_remove = set( [ # Remove the structures since they are still the same as # timeval, itimerval. "__kernel_old_timeval", "__kernel_old_itimerval", ] ) # define to true if you want to remove all defined(CONFIG_FOO) tests # from the clean headers. testing shows that this is not strictly necessary # but just generates cleaner results kernel_remove_config_macros = True # maps an architecture to a set of default macros that would be provided by # toolchain preprocessor kernel_default_arch_macros = { "arm": {"__ARMEB__": kCppUndefinedMacro, "__ARM_EABI__": "1"}, "arm64": {}, "x86": {}, } kernel_arch_token_replacements = { "arm": {}, "arm64": {}, "x86": {}, } # Replace tokens in the output according to this mapping. kernel_token_replacements = { # The kernel usage of __unused for unused struct fields conflicts with the macro defined in <sys/cdefs.h>. "__unused": "__linux_unused", # The kernel usage of C++ keywords causes problems for C++ code so rename. "private": "__linux_private", "virtual": "__linux_virtual", # The non-64 stuff is legacy; msqid64_ds/ipc64_perm is what userspace wants. "msqid_ds": "__kernel_legacy_msqid_ds", "semid_ds": "__kernel_legacy_semid_ds", "shmid_ds": "__kernel_legacy_shmid_ds", "ipc_perm": "__kernel_legacy_ipc_perm", # The kernel semun isn't usable (https://github.com/android-ndk/ndk/issues/400). "semun": "__kernel_legacy_semun", # The kernel's _NSIG/NSIG are one less than the userspace value, so we need to move them aside. "_NSIG": "_KERNEL__NSIG", "NSIG": "_KERNEL_NSIG", # The kernel's SIGRTMIN/SIGRTMAX are absolute limits; userspace steals a few. "SIGRTMIN": "__SIGRTMIN", "SIGRTMAX": "__SIGRTMAX", # We want to support both BSD and Linux member names in struct udphdr. "udphdr": "__kernel_udphdr", # This causes problems when trying to export the headers for the ndk. "__attribute_const__": "__attribute__((__const__))", # In this case the kernel tries to keep out of our way, but we're happy to use its definition. "__kernel_sockaddr_storage": "sockaddr_storage", # The kernel started using struct __kernel_old_timeval in some places, # which is the exact same as struct timeval. Replace that name with # timeval so that kernel structures all use the same named structure. # If struct __kernel_old_timeval and struct timeval become different, # then a different solution needs to be implemented. "__kernel_old_timeval": "timeval", # Do the same for __kernel_old_itimerval as for timeval. "__kernel_old_itimerval": "itimerval", } # This is the set of struct definitions that we want to replace with # a #include of <bits/struct.h> instead. kernel_struct_replacements = set( [ "epoll_event", "flock", "flock64", "in_addr", "ip_mreq_source", "ip_msfilter", ] ) # This is the set of known static inline functions that we want to keep # in the final kernel headers. kernel_known_generic_statics = set( [ "ipt_get_target", # uapi/linux/netfilter_ipv4/ip_tables.h "ip6t_get_target", # uapi/linux/netfilter_ipv6/ip6_tables.h # Byte swapping inlines from uapi/linux/swab.h # The below functions are the ones we are guaranting we export. "__swab16", "__swab32", "__swab64", "__swab16p", "__swab32p", "__swab64p", "__swab16s", "__swab32s", "__swab64s", "__swahw32", "__swahb32", "__swahw32p", "__swahb32p", "__swahw32s", "__swahb32s", # These are required to support the above functions. "__fswahw32", "__fswahb32", ] ) # this is the standard disclaimer # kernel_disclaimer = """\ /**************************************************************************** **************************************************************************** *** *** This header was automatically generated from a Linux kernel header *** of the same name, to make information necessary for userspace to *** call into the kernel available to libc. It contains only constants, *** structures, and macros generated from the original header, and thus, *** contains no copyrightable information. *** *** To edit the content of this header, modify the corresponding *** source file (e.g. under external/kernel-headers/original/) then *** run bionic/libc/kernel/tools/update_all.py *** *** Any manual change here will be lost the next time this script will *** be run. You've been warned! *** **************************************************************************** ****************************************************************************/ """
the-stack_106_30893
import requests from termcolor.termcolor import colored, cprint class httpCommands(): def __init__(self): pass def execute_all_func(self, target): try: self.get_method(target) except: cprint("Error", "red") try: self.post_method(target) except: cprint("Error", "red") try: self.head_method(target) except: cprint("Error", "red") try: self.put_method(target) except: cprint("Error", "red") try: self.delete_method(target) except: cprint("Error", "red") def get_method(self, target): cprint("Testing GET Method", 'yellow') print("") req = requests.get(target) r = req.status_code if r == 200: print(r, "OK") else: print("Response:", r) def post_method(self, target): cprint("Testing POST Method",'yellow') print("") req = requests.post(target) r = req.status_code if r == 200: print(r, "OK") else: print("Response", r) def head_method(self, target): cprint("Testing Head Method",'yellow') print("") req = requests.head(target) r = req.status_code if r == 200: print(r, "OK") else: print("Response", OK) def put_method(self, target): cprint("Testing Put Method",'yellow') print("") req = requests.put(target) r = req.status_code if r == 200: print(r, "OK") else: print("Response", r) def delete_method(self, target): cprint("Testing Delete Method",'yellow') print("") req = requests.delete(target) r = req.status_code if r == 200: print(r, "OK") else: print("Response", r)