max_stars_repo_path
stringlengths 3
269
| max_stars_repo_name
stringlengths 4
119
| max_stars_count
int64 0
191k
| id
stringlengths 1
7
| content
stringlengths 6
1.05M
| score
float64 0.23
5.13
| int_score
int64 0
5
|
---|---|---|---|---|---|---|
jolly-jellyfish/src/django_meta/urls.py | Vthechamp22/summer-code-jam-2021 | 40 | 12788251 | """halfway URL Configuration"""
from django.conf import settings
from django.conf.urls.static import static
from django.contrib import admin
from django.contrib.auth import views as auth_views
from django.urls import path, include
urlpatterns = [
path('admin', admin.site.urls),
path('users/login', auth_views.LoginView.as_view(), name='login'),
path('users/logout', auth_views.LogoutView.as_view(next_page='/'), name='logout'),
path('', include('page_maker.urls')),
] + static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
| 1.632813 | 2 |
cms/views.py | rodrigo-pacheco/X-Serv-15.5-Django-CMS | 0 | 12788252 | from django.shortcuts import render
from cms.models import Pages
from django.http import HttpResponse
from django.http import HttpResponseNotFound
from django.core.exceptions import ObjectDoesNotExist
# Create your views here.
def slash(self):
response = ''
for Page in Pages.objects.all():
redirection = "<a href=/" + str(Page.id) + ">" + Page.name + "</a>"
response += (str(Page.id) + ' : ' + redirection + "/n")
return(HttpResponse(response))
def number(self, num):
try:
Page = Pages.objects.get(id=str(num))
return(HttpResponse(Page.page))
except ObjectDoesNotExist:
return(HttpResponse("Resource not in database"))
def notfound(self):
return(HttpResponseNotFound("NOT FOUND"))
| 2.234375 | 2 |
pytorch/detectron2_test.py | cicicici/deeptensor | 1 | 12788253 | from __future__ import print_function
import torch
''' Env
pip install -U torch torchvision
pip install -U cython
pip install -U 'git+https://github.com/facebookresearch/fvcore.git' 'git+https://github.com/cocodataset/cocoapi.git#subdirectory=PythonAPI'
git clone https://github.com/facebookresearch/detectron2 detectron2_repo
pip install -e detectron2_repo
'''
# Setup detectron2 logger
import detectron2
from detectron2.utils.logger import setup_logger
setup_logger()
# import some common libraries
import numpy as np
import cv2
import random
# import some common detectron2 utilities
from detectron2.engine import DefaultPredictor
from detectron2.config import get_cfg
from detectron2.utils.visualizer import Visualizer
from detectron2.data import MetadataCatalog
im = cv2.imread("./input.jpg")
cfg = get_cfg()
cfg.merge_from_file("../../detectron2/configs/COCO-InstanceSegmentation/mask_rcnn_R_50_FPN_3x.yaml")
cfg.MODEL.ROI_HEADS.SCORE_THRESH_TEST = 0.5 # set threshold for this model
# Find a model from detectron2's model zoo. You can either use the https://dl.fbaipublicfiles.... url, or use the following shorthand
cfg.MODEL.WEIGHTS = "detectron2://COCO-InstanceSegmentation/mask_rcnn_R_50_FPN_3x/137849600/model_final_f10217.pkl"
predictor = DefaultPredictor(cfg)
outputs = predictor(im)
print(outputs["instances"].pred_classes)
print(outputs["instances"].pred_boxes)
v = Visualizer(im[:, :, ::-1], MetadataCatalog.get(cfg.DATASETS.TRAIN[0]), scale=1.2)
v = v.draw_instance_predictions(outputs["instances"].to("cpu"))
#v.get_image()
v.save("output.jpg")
| 2.3125 | 2 |
sensor_adapter.py | lgm1989/rmep | 2 | 12788254 | from robot_base import RobotBase
class SensorAdapter(RobotBase):
"""
3.2.10 传感器转接板控制
https://robomaster-dev.readthedocs.io/zh_CN/latest/sdk/protocol_api.html#id43
"""
def get_adc(self, id, port):
"""
3.2.10.1. 传感器转接板 ADC 值获取
"""
cmd = "sensor_adapter adc id %d port %d;" % (id, port)
return self.ctrl_and_reveive(cmd)
def get_io_evel(self, id, port):
"""
3.2.10.2. 传感器转接板 IO 值获取
"""
cmd = "sensor_adapter io_level id %d port %d;" % (id, port)
return self.ctrl_and_reveive(cmd)
def get_pulse_eriod(self, id, port):
"""
3.2.10.3. 传感器转接板 IO 引脚电平跳变时间值获取
"""
cmd = "sensor_adapter pulse_period id %d port %d;" % (id, port)
return self.ctrl_and_reveive(cmd)
def event_io_level(self, switch):
"""
3.2.10.4. 传感器转接板事件上报控制
"""
cmd = "sensor_adapter event io_level %s;" % switch
return self.ctrl_and_reveive(cmd)
| 2.828125 | 3 |
FLASH4.2.1_save/tools/python/flmake/mv.py | mtsafarzadeh/FLASHverES | 1 | 12788255 | <filename>FLASH4.2.1_save/tools/python/flmake/mv.py
import os
import json
import shutil
# Relative imports needed!
from .. import FLASH_SRC_DIR
from . import logger
from .setup_globals import gvars
USAGE = ("Moves a flash run local sub-directory\n"
"from src to dst. Useful for managing\n"
"many runs.\n\n"
"usage: flmake mv <src> <dst>")
def main(ns, rc):
"""Moves src run dir to dst dir."""
gvars.init(FLASH_SRC_DIR)
# grab id, if possible
id = None
desc_filename = os.path.join(ns.src, gvars.desc_filename)
if os.path.exists(desc_filename):
with open(desc_filename) as desc_file:
desc = json.load(desc_file)
if 'run' in desc:
id = desc['run']['id']
# move the dir
shutil.move(ns.src, ns.dst)
# Log the move
msg = ns.message
if msg is None:
msg = "moved {0} -> {1}".format(ns.src, ns.dst)
logger.info(msg, "mv", id, ns.dst)
| 2.515625 | 3 |
mim/Psi4.py | nbraunsc/MIM | 0 | 12788256 | <filename>mim/Psi4.py<gh_stars>0
import numpy as np
#import psi4
#psi4.core.set_output_file('output.dat', False)
class Psi4():
""" Psi4 Numpy quantum chemistry backend class
An instance of this class is passed into the Fragment class
"""
def __init__(self, theory=None, basis=None, tol=None, active_space=None, nelec=None, nelec_alpha=None, nelec_beta=None, max_memory=None, xc=None, charge=0, spin=0):
self.theory = theory
self.basis = basis
self.spin = spin
self.tol = tol
self.active_space = active_space #number of orbitals in active space
self.nelec = nelec #number of electrons in the active space
self.nelec_alpha = nelec_alpha
self.nelec_beta = nelec_beta
self.max_memory = max_memory
self.xc = xc
self.charge = charge
def energy_gradient(self, input_xyz):
string = "noreorient \n" + str(input_xyz).replace("[", " ").replace(",", "").replace("]", "\n").replace("'", "")
mol = psi4.geometry(string)
options = {'BASIS': self.basis}
psi4.set_options(options)
mol.set_molecular_charge(self.charge)
mol.set_multiplicity(self.spin)
#psi4.set_memory('100 GB')
e = 0
g = 0
h = 0
if self.theory == 'RHF':
e, wfn = psi4.energy('scf', return_wfn=True)
g = np.array(psi4.gradient('scf'))
h = np.array(psi4.hessian('scf'))
if self.theory == 'UHF':
psi4.set_options({'reference': 'uhf', 'basis': self.basis})
e, wfn = psi4.energy('scf', return_wfn=True)
g = np.array(psi4.gradient('scf'))
h = 0
if self.theory == 'ROHF':
psi4.set_options({'reference': 'rohf', 'basis': self.basis})
e, wfn = psi4.energy('scf', return_wfn=True)
g = np.array(psi4.gradient('scf'))
h = 0
if self.theory == 'MP2':
e, wfn = psi4.energy('MP2', return_wfn=True)
g = np.array(psi4.gradient('MP2'))
h = 0
#h = np.array(psi4.hessian('MP2'))
if self.theory == 'CCSD':
e = psi4.energy('CCSD')
g = np.array(psi4.gradient('CCSD'))
h = np.array(psi4.hessian('CCSD'))
if self.theory == 'CCSD(T)':
e = psi4.energy('CCSD(T)')
g = np.array(psi4.gradient('CCSD(T)'))
h = np.array(psi4.hessian('CCSD(T)'))
if self.theory == 'CISD':
e = psi4.energy('CISD')
g = np.array(psi4.gradient('CISD'))
h = np.array(psi4.hessian('CISD'))
if self.theory == 'CISDT':
e = psi4.energy('CISDT')
g = np.array(psi4.gradient('CISDT'))
h = np.array(psi4.hessian('CISDT'))
return e, g, h
def apply_field(self, E, input_xyz, com, origin, direction):
string = "symmetry C1\n noreorient\n no_com \n" + str(input_xyz).replace("[", " ").replace(",", "").replace("]", "\n").replace("'", "")
mol = psi4.geometry(string)
options = {
'basis' : self.basis,
'PERTURB_H' : True,
'PERTURB_WITH' : 'DIPOLE',
'PERTURB_DIPOLE' : E,
'save_jk' : True
}
psi4.set_options(options)
mol.set_molecular_charge(self.charge)
#setting everything to zero
e = 0
g = 0
dip = 0
nuc_dip = 0
g_nuc = 0
g_elec = 0
if self.theory == 'RHF':
e, scf_wfn = psi4.energy('scf', return_wfn=True)
densitya = psi4.core.Matrix.to_array(scf_wfn.Da())
densityb = psi4.core.Matrix.to_array(scf_wfn.Db())
focka = psi4.core.Matrix.to_array(scf_wfn.Fa())
fockb = psi4.core.Matrix.to_array(scf_wfn.Fb())
hcore = psi4.core.Matrix.to_array(scf_wfn.H()) #core hamiltonian
#print(hcore)
mints = psi4.core.MintsHelper(scf_wfn.basisset())
T = np.asarray(mints.ao_kinetic())
V = np.asarray(mints.ao_potential())
S = np.asarray(mints.ao_overlap())
#print(focka + fockb)
g = np.array(psi4.gradient('scf'))*1.88973 #H/B -> H/A
#print(e)
#print(g)
if self.theory == 'UHF':
psi4.set_options({'reference': 'uhf', 'basis': self.basis})
e, scf_wfn = psi4.energy('scf', return_wfn=True)
g = np.array(psi4.gradient('scf'))*1.88973 #H/B -> H/A
if self.theory == 'ROHF':
psi4.set_options({'reference': 'rohf', 'basis': self.basis})
e, wfn = psi4.energy('scf', return_wfn=True)
g = np.array(psi4.gradient('scf'))*1.88973
if self.theory == 'MP2':
e = psi4.energy('MP2')
g = np.array(psi4.gradient('MP2'))*1.88973 #H/B -> H/A
if self.theory == 'CCSD':
e = psi4.energy('CCSD')
g = np.array(psi4.gradient('CCSD'))*1.88973 #H/B -> H/A
if self.theory == 'CCSD(T)':
e = psi4.energy('CCSD(T)')
g = np.array(psi4.gradient('CCSD(T)'))*1.88973 #H/B -> H/A
if self.theory == 'CISD':
e = psi4.energy('CISD')
g = np.array(psi4.gradient('CISD'))*1.88973 #H/B -> H/A
if self.theory == 'CISDT':
e = psi4.energy('CISDT')
g = np.array(psi4.gradient('CISDT'))*1.88973 #H/B -> H/A
#nuc_dip = np.zeros((3))
nuc_dip = np.array(mol.nuclear_dipole())
#compute nuclear gradient
Gradient = {}
Gradient["N"] = psi4.core.Matrix.to_array(mol.nuclear_repulsion_energy_deriv1([0, 0, 0]))
N_grad = psi4.core.Matrix.from_array(Gradient["N"])
N_grad.name = "NUCLEAR GRADIENT"
N_grad.print_out()
g_nuc = np.array(N_grad)*1.88973 #H/B
g_elec = 0
print("nuclear repo energy:", mol.nuclear_repulsion_energy())
return e, g, dip, nuc_dip, g_nuc, g_elec
def get_dipole(self, coords_new):
""" This only for RHF and is used when building the APT's from numerical diff w.r.t
atomic coordinates.
There are no CCSD(T) dipole moments implemented in Psi4.
"""
string = "noreorient \n" + str(coords_new).replace("[", " ").replace(",", "").replace("]", "\n").replace("'", "")
mol = psi4.geometry(string)
options = {'BASIS': self.basis}
psi4.set_options(options)
mol.set_molecular_charge(self.charge)
#psi4.set_memory('100 GB')
#calculate nuclear dip moment
nuc_dip = np.array(mol.nuclear_dipole())
dipole = np.zeros((3)) #Debye
method = self.theory
if self.theory == 'RHF':
method = 'SCF'
psi4.prop('scf', properties=["DIPOLE"])
if self.theory == 'UHF': #unsure if this one works
method = 'SCF'
psi4.set_options({'reference': 'uhf', 'basis': self.basis})
psi4.prop('scf', properties=["DIPOLE"])
if self.theory == 'ROHF':
method = 'SCF'
psi4.set_options({'reference': 'rohf', 'basis': self.basis})
psi4.prop('scf', properties=["DIPOLE"])
if self.theory == 'MP2':
psi4.prop('mp2', properties=["DIPOLE"])
if self.theory == 'CCSD':
method = 'CC'
e, scf_wfn = psi4.energy('scf', return_wfn=True)
psi4.prop('ccsd', properties=["DIPOLE"], ref_wfn=scf_wfn)
#psi4.prop('ccsd', properties=["DIPOLE"], return_wfn=False)
if self.theory == 'CISD':
method = 'CI'
psi4.prop('cisd', properties=["DIPOLE"])
if self.theory == 'CISDT':
method = 'CI'
psi4.prop('cisdt', properties=["DIPOLE"])
#if self.theory == 'CC2':
# psi4.prop('cc2', properties=["DIPOLE"])
dipole[0] = psi4.get_variable(method + " DIPOLE X")
dipole[1] = psi4.get_variable(method + " DIPOLE Y")
dipole[2] = psi4.get_variable(method + " DIPOLE Z")
return dipole, nuc_dip
| 2.703125 | 3 |
catalog/migrations/0005_auto_20210406_1240.py | Dessand/local_library | 0 | 12788257 | # Generated by Django 3.1.7 on 2021-04-06 12:40
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('catalog', '0004_auto_20210405_1042'),
]
operations = [
migrations.AlterField(
model_name='book',
name='title',
field=models.CharField(max_length=300),
),
]
| 1.539063 | 2 |
exprimo/graph.py | Lagostra/exprimo | 3 | 12788258 | <gh_stars>1-10
"""
Graph representation of a Deep Neural Network.
This code is based off of corresponding code in the Paleo library. See license file in submodule.
Changes are made in order to support specific device assignment of layers.
"""
import json
from copy import deepcopy
import collections
from paleo import layers
from paleo.graph import GraphWalker
class LayerSpec:
def __init__(self, name, params):
self.name = name
self.params = dict(params)
self.operation = None
self.parents = []
self.inbounds = []
self.outbounds = []
def attach_op(self, operation):
self.operation = operation
def __repr__(self):
return self.name
def __hash__(self):
return hash(self.name)
def __getitem__(self, key):
return self.params[key]
def get(self, key, default):
return self.params.get(key, default)
class ComputationGraph:
def __init__(self, path=None, attach_ops=True, force_device=None):
self.nested_list = None
self.topological_order = None
self.attach_ops = attach_ops
self.force_device = force_device
if path:
self.load(path)
def load(self, path):
with open(path) as f:
net = json.load(f)
self._build(net)
def load_from_string(self, string):
net = json.loads(string)
self._build(net)
def _create_topological_order(self, names_to_specs):
incoming = {}
S = []
self.topological_order = []
for name, spec in names_to_specs.items():
n_incoming = len(spec.inbounds)
incoming[spec] = n_incoming
if n_incoming == 0:
S.append(spec)
while len(S):
node = S.pop()
self.topological_order.append(node)
for n in node.outbounds:
incoming[n] -= 1
if incoming[n] == 0:
S.append(n)
def _attach_layer_op(self):
names_to_specs = dict()
for layer_spec in self.topological_order:
if len(layer_spec['parents']) == 1:
parent_name = layer_spec['parents'][0]
inputs = names_to_specs[parent_name].operation.outputs
else:
inputs = []
try:
for parent_name in layer_spec['parents']:
inputs.append(names_to_specs[parent_name].operation.outputs)
except KeyError:
raise KeyError(f'Cannot find parent {parent_name} of {layer_spec.name}')
try:
layer = None
if layer_spec['type'] == 'Input':
layer = layers.Input(layer_spec.name, layer_spec['tensor'])
elif layer_spec['type'] == 'Convolution':
layer = layers.Conv2d(
layer_spec.name,
inputs,
layer_spec['filter'],
layer_spec['strides'],
layer_spec['padding'],
backprop=('data' not in layer_spec['parents']),
activation_fn=layer_spec.get('activation_fn', 'relu'),
splits=layer_spec.get('splits', None)
)
elif layer_spec['type'] == 'Deconvolution':
layer = layers.Deconv2D(
layer_spec.name,
inputs,
layer_spec['filter'],
layer_spec['strides'],
layer_spec['padding'],
layer_spec['output_shape'],
backprop=('data' not in layer_spec['parents']),
activation_fn=layer_spec.get('activation_fn', 'relu'))
elif layer_spec['type'] == 'Pooling':
layer = layers.Pool2d(
layer_spec.name,
inputs,
layer_spec['ksize'],
layer_spec['strides'],
layer_spec['padding'],
pool_type='max')
elif layer_spec['type'] == 'UpSampling2D':
layer = layers.UpSampling2D(layer_spec.name, inputs,
layer_spec['ksize'])
elif layer_spec['type'] == 'AvgPool':
layer = layers.Pool2d(
layer_spec.name,
inputs,
layer_spec['ksize'],
layer_spec['strides'],
layer_spec['padding'],
pool_type='avg')
elif layer_spec['type'] == 'Dropout':
layer = layers.Dropout(layer_spec.name, inputs,
layer_spec['dropout_keep_prob'])
elif layer_spec['type'] == 'Concatenate':
layer = layers.Concatenate(layer_spec.name, inputs,
layer_spec['dim'])
elif layer_spec['type'] == 'Reshape':
layer = layers.Reshape(layer_spec.name, inputs,
layer_spec['output_shape'])
elif layer_spec['type'] == 'Elementwise':
layer = layers.Elementwise(layer_spec.name, inputs)
elif layer_spec['type'] == 'Softmax':
layer = layers.Softmax(layer_spec.name, inputs,
layer_spec.get('num_classes', None))
elif layer_spec['type'] == 'Sigmoid':
layer = layers.Sigmoid(layer_spec.name, inputs)
elif layer_spec['type'] == 'InnerProduct':
layer = layers.InnerProduct(layer_spec.name, inputs,
layer_spec['num_outputs'])
else:
layer = layers.Generic(layer_spec.name, inputs,
layer_spec['type'])
except Exception as e:
raise e
if layer:
layer_spec.parents.extend([names_to_specs[p] for p in layer_spec['parents']])
layer.parents = layer_spec['parents']
layer_spec.attach_op(layer)
names_to_specs[layer_spec.name] = layer_spec
def _build(self, net):
names_to_specs = dict()
block_endpoints = dict()
def _parents(parents):
# Replace with endpoint if parent is a block.
transformed_parents = []
for parent_name in parents:
transformed_parents.append(block_endpoints.get(parent_name, parent_name))
return transformed_parents
sharded_layers = {}
def _shard(layer_spec, endpoint_block=None):
devices = layer_spec.params['device']
assert isinstance(devices, collections.Sequence), 'devices must be a Sequence for sharding to be allowed!'
dim_vector_name = None
if layer_spec.params['type'] == 'Convolution':
dim_vector_name = 'filter'
# elif layer_spec.params['type'] == 'Pooling':
# dim_vector_name = 'ksize'
else:
layer_spec.params['device'] = layer_spec.params['device'][0]
names_to_specs[layer_spec.name] = layer_spec
return
channel_sizes = [layer_spec.params[dim_vector_name][-1] // len(devices)] * len(devices)
i = 0
while sum(channel_sizes) < layer_spec.params[dim_vector_name][-1]:
channel_sizes[i] += 1
i += 1
shard_names = []
for i, device in enumerate(devices):
shard_params = deepcopy(layer_spec.params)
shard_name = f'{layer_spec.name}_shard{i}'
shard_params['device'] = device
shard_params[dim_vector_name][-1] = channel_sizes[i]
shard_names.append(shard_name)
shard_spec = LayerSpec(shard_name, shard_params)
assert shard_name not in names_to_specs, f'Duplicate {shard_name}.'
names_to_specs[shard_name] = shard_spec
sharded_layers[layer_name] = shard_names
if endpoint_block:
sharded_layers[endpoint_block] = shard_names
# Transform all specs into LayerSpec objects
for layer_name, layer_params in net['layers'].items():
if layer_params.get('type', None) in ['Block']:
block_name = layer_name
block_parents = _parents(layer_params['parents'])
# If block provides an endpoint, subsequent layers can refer to the block name as parent.
if 'endpoint' in layer_params:
block_endpoints[block_name] = f'{block_name}/{layer_params["endpoint"]}'
for sublayer_name, sublayer_params in layer_params['layers'].items():
is_endpoint = 'endpoint' in layer_params and layer_params['endpoint'] == sublayer_name
sublayer_name = f'{block_name}/{sublayer_name}'
if 'device' not in sublayer_params and 'device' in layer_params:
sublayer_params['device'] = layer_params['device']
sublayer = LayerSpec(sublayer_name, sublayer_params)
# Update parents
if len(sublayer_params['parents']) == 0:
# Use the parent of the block
sublayer_parents = block_parents
else:
# Add blockname to the parent names
sublayer_parents = [f'{block_name}/{n}' for n in sublayer_params['parents']]
sublayer_parents = _parents(sublayer_parents)
sublayer.params['parents'] = sublayer_parents
if 'device' in sublayer.params and isinstance(sublayer.params['device'], collections.Sequence):
endpoint_block = layer_name if is_endpoint else None
_shard(sublayer, endpoint_block)
else:
assert sublayer_name not in names_to_specs, f'Duplicate {sublayer_name}.'
names_to_specs[sublayer_name] = sublayer
else:
layer_params['parents'] = _parents(layer_params['parents'])
layer = LayerSpec(layer_name, layer_params)
if 'device' in layer.params and isinstance(layer.params['device'], collections.Sequence):
_shard(layer)
else:
assert layer_name not in names_to_specs, f'Duplicate {layer_name}'
names_to_specs[layer_name] = layer
# Update parents list for children of sharded layers
for layer_name, layer_spec in names_to_specs.copy().items():
new_parents = []
for parent in layer_spec['parents']:
if parent in sharded_layers:
conc_layer_params = {
'type': 'Concatenate',
'parents': sharded_layers[parent],
'device': layer_spec.params['device'],
'dim': 3
}
conc_layer_name = f'{layer_name}_conc_{parent}'
conc_layer_spec = LayerSpec(conc_layer_name, conc_layer_params)
names_to_specs[conc_layer_name] = conc_layer_spec
new_parents.append(conc_layer_name)
else:
new_parents.append(parent)
layer_spec.params['parents'] = new_parents
# Add edges
for layer_name, layer_spec in names_to_specs.items():
for parent_name in _parents(layer_spec['parents']):
assert parent_name in names_to_specs, f'Parent layer {parent_name} of {layer_name} ' \
f'does not have a LayerSpec object.'
names_to_specs[parent_name].outbounds.append(layer_spec)
layer_spec.inbounds.append(names_to_specs[parent_name])
# Set default devices for any layer with unspecified device
for layer_name, layer_spec in names_to_specs.items():
if 'device' not in layer_spec.params:
layer_spec.params['device'] = 0
if self.force_device is not None:
layer_spec.params['device'] = self.force_device
self._create_topological_order(names_to_specs)
if self.attach_ops:
self._attach_layer_op()
def get_number_of_jumps(self, return_max_jumps=False):
"""
Returns the number of jumps - i.e. the number of pairs of consecutive operations that are placed on different
devices.
:param return_max_jumps: If True, the maximum possible number of jumps is also returned.
"""
num_jumps = 0
max_jumps = 0
for layer in self.topological_order:
for parent in layer.parents:
max_jumps += 1
if layer['device'] != parent['device']:
num_jumps += 1
if return_max_jumps:
return num_jumps, max_jumps
return num_jumps
def get_flattened_layer_names(net_string):
graph = ComputationGraph()
graph.load_from_string(net_string)
return map(lambda x: x.name, graph.topological_order)
| 2.546875 | 3 |
EDU-CF-R102/B.py | patwadeepak/codeforces | 0 | 12788259 | <filename>EDU-CF-R102/B.py<gh_stars>0
import math
def LCM(s, t):
n = len(s)
m = len(t)
s_count = {'a': 0, 'b': 0}
t_count = {'a': 0, 'b': 0}
for char in s:
s_count[char] += 1
for char in t:
t_count[char] += 1
if s_count['a']/s_count['b'] == t_count['a']/t_count['b']:
if n > m:
gcd = math.gcd(t_count['a'], t_count['b'])
pattern_length = t_count['a']/gcd + t_count['b']/gcd
else:
gcd = math.gcd(s_count['a'], s_count['b'])
pattern_length = s_count['a']/gcd + s_count['b']/gcd
pattern_length = int(pattern_length)
a = n//pattern_length
b = m//pattern_length
if s[:pattern_length]*a == s and t[:pattern_length]*b == t:
lcm = int(a*b/math.gcd(a,b))
return s[:pattern_length]*lcm
else:
return -1
else:
return -1
if __name__ == "__main__":
t = int(input())
while t:
p = input()
q = input()
print(LCM(p, q))
t -= 1 | 3.46875 | 3 |
apps/modules/theme_setting/process/nav_setting.py | Bension/osroom | 579 | 12788260 | <gh_stars>100-1000
#!/usr/bin/env python
# -*-coding:utf-8-*-
# @Time : 2019/12/2 14:43
# @Author : <NAME>
from bson import ObjectId
from flask import request, g
from flask_babel import gettext
from apps.app import mdbs, cache
from apps.core.flask.reqparse import arg_verify
from apps.utils.format.obj_format import json_to_pyseq, objid_to_str, str_to_num
@cache.cached(timeout=86400, key_base64=False, db_type="redis")
def get_global_theme_navs(theme_name, lang):
langs = g.site_global["language"]["all_language"].keys()
navs = mdbs["sys"].dbs["theme_nav_setting"].find(
{
"language": lang,
"theme_name": theme_name
},
{"_id": 0}
).sort([("order", 1)])
if navs.count(True):
return list(navs)
else:
for la in langs:
if la == lang:
continue
navs = mdbs["sys"].dbs["theme_nav_setting"].find(
{
"language": la,
"theme_name": theme_name
},
{"_id": 0}
).sort([("order", 1)])
if navs.count(True):
return list(navs)
return []
def get_navs():
theme_name = request.argget.all("theme_name")
lang = request.argget.all("language")
s, r = arg_verify(
[
(gettext("theme name"), theme_name),
(gettext("language"), lang)
],
required=True
)
if not s:
return r
navs = mdbs["sys"].dbs["theme_nav_setting"].find(
{"language": lang, "theme_name": theme_name}
).sort([("order", 1)])
navs = objid_to_str(navs)
data = {
"navs": navs
}
return data
def nav_setting():
"""
Update
:RETURN:
"""
cid = request.argget.all("id")
theme_name = request.argget.all("theme_name")
lang = request.argget.all("language")
display_name = request.argget.all("display_name")
order = str_to_num(request.argget.all("order", 99))
json_data = json_to_pyseq(request.argget.all("json_data"))
s, r = arg_verify(
[(gettext("Display name"), display_name),
(gettext("theme name"), theme_name),
(gettext("language"), lang),
(gettext("Json data"), json_data)
],
required=True
)
if not s:
return r
if not isinstance(json_data, dict):
data = {
"msg": gettext('Value must be of type json'),
"msg_type": "e",
"custom_status": 400
}
return data
if not cid:
updata = {
'theme_name': theme_name,
'display_name': display_name,
'language': lang,
'json_data': json_data,
"order": order
}
r = mdbs["sys"].dbs["theme_nav_setting"].insert_one(updata)
if r.inserted_id:
data = {
"msg": gettext("Navigation added successfully"),
"msg_type": "s",
"custom_status": 200
}
else:
data = {
"msg": gettext("Failed to add navigation"),
"msg_type": "w",
"custom_status": 400
}
else:
updata = {
'theme_name': theme_name,
'display_name': display_name,
'language': lang,
'json_data': json_data,
"order": order
}
r = mdbs["sys"].dbs["theme_nav_setting"].update_one(
{"_id": ObjectId(cid)},
{"$set": updata}
)
if r.modified_count:
data = {
"msg": gettext("Updated successfully"),
"msg_type": "s",
"custom_status": 200
}
elif r.matched_count:
data = {
"msg": gettext("Unmodified"),
"msg_type": "w",
"custom_status": 200
}
else:
data = {
"msg": gettext("Update failed"),
"msg_type": "w",
"custom_status": 400
}
cache.delete_autokey(
fun="get_global_theme_navs",
theme_name=".*",
lang=".*",
db_type="redis",
key_regex=True
)
return data
def del_navs():
ids = json_to_pyseq(request.argget.all("ids"))
s, r = arg_verify(
[(gettext("ids"), ids)],
required=True
)
if not s:
return r
del_ids = []
for id in ids:
del_ids.append(ObjectId(id))
r = mdbs["sys"].dbs["theme_nav_setting"].delete_many({"_id": {"$in": del_ids}})
if r.deleted_count:
data = {
"msg": gettext("Deleted successfully"),
"msg_type": "s",
"custom_status": 200
}
else:
data = {
"msg": gettext("Delete failed"),
"msg_type": "s",
"custom_status": 200
}
cache.delete_autokey(
fun="get_global_theme_navs",
theme_name=".*",
lang=".*",
db_type="redis",
key_regex=True
)
return data
| 2.28125 | 2 |
iqmon/scripts/ingest_script.py | joshwalawender/IQMon | 9 | 12788261 | from keckdrpframework.core.framework import Framework
from keckdrpframework.config.framework_config import ConfigClass
from keckdrpframework.models.arguments import Arguments
from keckdrpframework.utils.drpf_logger import getLogger
import subprocess
import time
import argparse
import sys
import traceback
import pkg_resources
import logging.config
from pathlib import Path
from datetime import datetime
from glob import glob
# the preferred way to import the pipeline is a direct import
from iqmon.pipelines.ingest import IngestPipeline
def _parseArguments(in_args):
description = "Ingest pipeline CLI"
# this is a simple case where we provide a frame and a configuration file
parser = argparse.ArgumentParser(prog=f"{in_args[0]}", description=description)
parser.add_argument('-c', dest="config_file", type=str, help="Configuration file")
parser.add_argument('-frames', nargs='*', type=str, help='input image file (full path, list ok)', default=None)
# in this case, we are loading an entire directory, and ingesting all the files in that directory
parser.add_argument('-infiles', dest="infiles", help="Input files", nargs="*")
parser.add_argument('-d', '--directory', dest="dirname", type=str, help="Input directory", nargs='?', default=None)
# after ingesting the files, do we want to continue monitoring the directory?
parser.add_argument('-m', '--monitor', dest="monitor", action='store_true', default=False)
# special arguments, ignore
parser.add_argument("-i", "--ingest_data_only", dest="ingest_data_only", action="store_true",
help="Ingest data and terminate")
parser.add_argument("-w", "--wait_for_event", dest="wait_for_event", action="store_true", help="Wait for events")
parser.add_argument("-W", "--continue", dest="continuous", action="store_true",
help="Continue processing, wait for ever")
parser.add_argument("-s", "--start_queue_manager_only", dest="queue_manager_only", action="store_true",
help="Starts queue manager only, no processing",
)
args = parser.parse_args(in_args[1:])
return args
##-----------------------------------------------------------------------------
## Setup Framework
##-----------------------------------------------------------------------------
def setup_framework(args, pipeline=IngestPipeline):
# START HANDLING OF CONFIGURATION FILES ##########
pkg = 'iqmon'
framework_config_file = "configs/framework.cfg"
framework_config_fullpath = pkg_resources.resource_filename(pkg, framework_config_file)
framework_logcfg_file = 'configs/logger_ingest.cfg'
framework_logcfg_fullpath = pkg_resources.resource_filename(pkg, framework_logcfg_file)
# add PIPELINE specific config files
if args.config_file is None:
pipeline_config_file = 'configs/pipeline.cfg'
pipeline_config_fullpath = pkg_resources.resource_filename(pkg, pipeline_config_file)
pipeline_config = ConfigClass(pipeline_config_fullpath, default_section='DEFAULT')
else:
pipeline_config = ConfigClass(args.pipeline_config_file, default_section='DEFAULT')
# END HANDLING OF CONFIGURATION FILES ##########
try:
framework = Framework(IngestPipeline, framework_config_fullpath)
logging.config.fileConfig(framework_logcfg_fullpath)
framework.config.instrument = pipeline_config
except Exception as e:
print("Failed to initialize framework, exiting ...", e)
traceback.print_exc()
sys.exit(1)
# this part defines a specific logger for the pipeline, so that we can
# separate the output of the pipeline from the output of the framework
framework.context.pipeline_logger = getLogger(framework_logcfg_fullpath, name="pipeline")
framework.logger = getLogger(framework_logcfg_fullpath, name="DRPF")
framework.logger.info("Framework initialized")
return framework
##-----------------------------------------------------------------------------
## Analyze One File
##-----------------------------------------------------------------------------
def analyze_one():
args = _parseArguments(sys.argv)
p = Path(args.input).expanduser().absolute()
if p.exists() is False:
print(f'Unable to find file: {p}')
return
args.name = f"{p}"
pkg = 'iqmon'
framework_config_file = "configs/framework.cfg"
framework_config_fullpath = pkg_resources.resource_filename(pkg, framework_config_file)
cfg = ConfigClass(framework_config_fullpath)
queue = queues.get_event_queue(cfg.queue_manager_hostname,
cfg.queue_manager_portnr,
cfg.queue_manager_auth_code)
if queue is None:
print("Failed to connect to Queue Manager")
return
if args.overwrite is True:
pending = queue.get_pending()
event = Event("set_overwrite", args)
queue.put(event)
pending = queue.get_pending()
event = Event("next_file", args)
queue.put(event)
##-----------------------------------------------------------------------------
## Watch Directory
##-----------------------------------------------------------------------------
def watch_directory():
args = _parseArguments(sys.argv)
framework = setup_framework(args, pipeline=IngestPipeline)
now = datetime.utcnow()
data_path = framework.config.instrument.get('FileHandling', 'ingest_dir')
data_path = data_path.replace('YYYY', f'{now.year:4d}')
data_path = data_path.replace('MM', f'{now.month:02d}')
data_path = data_path.replace('DD', f'{now.day:02d}')
framework.logger.info(f'Setting data path: {data_path}')
data_path = Path(data_path).expanduser()
if data_path.exists() is False:
data_path.mkdir(parents=True, exist_ok=True)
framework.logger.info(f'Ingesting files from {data_path}')
infiles = data_path.glob(framework.config['DEFAULT']['file_type'])
framework.ingest_data(str(data_path), infiles, True)
framework.start(False, False, False, True)
##-----------------------------------------------------------------------------
## Change Watched Directory
##-----------------------------------------------------------------------------
def change_directory():
args = _parseArguments(sys.argv)
if args.input is not '':
newdir = Path(args.input).expanduser().absolute()
else:
now = datetime.utcnow()
data_path = framework.config.instrument.get('FileHandling', 'ingest_dir')
data_path = data_path.replace('YYYY', f'{now.year:4d}')
data_path = data_path.replace('MM', f'{now.month:02d}')
data_path = data_path.replace('DD', f'{now.day:02d}')
newdir = Path(data_path).expanduser()
args.input = str(newdir)
if newdir.exists() is False:
newdir.mkdir(parents=True)
pkg = 'iqmon'
framework_config_file = "configs/framework.cfg"
framework_config_fullpath = pkg_resources.resource_filename(pkg, framework_config_file)
cfg = ConfigClass(framework_config_fullpath)
queue = queues.get_event_queue(cfg.queue_manager_hostname,
cfg.queue_manager_portnr,
cfg.queue_manager_auth_code)
if queue is None:
print("Failed to connect to Queue Manager")
else:
pending = queue.get_pending()
event = Event("set_file_type", args)
queue.put(event)
event = Event("update_directory", args)
queue.put(event)
##-----------------------------------------------------------------------------
## List Queue
##-----------------------------------------------------------------------------
def list_queue():
args = _parseArguments(sys.argv)
pkg = 'iqmon'
framework_config_file = "configs/framework.cfg"
framework_config_fullpath = pkg_resources.resource_filename(pkg, framework_config_file)
cfg = ConfigClass(framework_config_fullpath)
drpif = FrameworkInterface(cfg)
# Print pending Events
if drpif.is_queue_ok():
events = drpif.pending_events()
print(f'Found {len(events)} in queue')
if args.verbose is True:
for event in events:
print(event)
else:
print ("Pending events: Queue not available", drpif.queue)
##-----------------------------------------------------------------------------
## Clear Queue
##-----------------------------------------------------------------------------
def clear_queue():
args = _parseArguments(sys.argv)
pkg = 'iqmon'
framework_config_file = "configs/framework.cfg"
framework_config_fullpath = pkg_resources.resource_filename(pkg, framework_config_file)
cfg = ConfigClass(framework_config_fullpath)
drpif = FrameworkInterface(cfg)
# Print pending Events
if drpif.is_queue_ok():
events = drpif.pending_events()
print(f'Found {len(events)} in queue')
else:
print ("Pending events: Queue not available", drpif.queue)
if drpif.is_queue_ok():
drpif.stop_event_queue()
print ("Queue manager stopped")
else:
print ("Queue manager already stopped")
if __name__ == "__main__":
analyze_one()
| 2.125 | 2 |
01_label_uncertainty/models.py | xiaozhanguva/Intrinsic_robustness_label_uncertainty | 3 | 12788262 | <gh_stars>1-10
import torch
import torch.nn as nn
import torch.nn.functional as F
import numpy as np
import math
class Flatten(nn.Module):
def forward(self, x):
return x.view(x.size(0), -1)
class Normalize(nn.Module):
def __init__(self, mu, std):
super(Normalize, self).__init__()
self.mu, self.std = mu, std
def forward(self, x):
return (x - self.mu) / self.std
class PreActBlock(nn.Module):
'''Pre-activation version of the BasicBlock.'''
expansion = 1
def __init__(self, in_planes, planes, stride=1):
super(PreActBlock, self).__init__()
self.bn1 = nn.BatchNorm2d(in_planes)
self.conv1 = nn.Conv2d(in_planes, planes, kernel_size=3, stride=stride, padding=1, bias=False)
self.bn2 = nn.BatchNorm2d(planes)
self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=1, padding=1, bias=False)
if stride != 1 or in_planes != self.expansion*planes:
self.shortcut = nn.Sequential(
nn.Conv2d(in_planes, self.expansion*planes, kernel_size=1, stride=stride, bias=False)
)
def forward(self, x):
out = F.relu(self.bn1(x))
shortcut = self.shortcut(out) if hasattr(self, 'shortcut') else x
out = self.conv1(out)
out = self.conv2(F.relu(self.bn2(out)))
out += shortcut
return out
class PreActBottleneck(nn.Module):
'''Pre-activation version of the original Bottleneck module.'''
expansion = 4
def __init__(self, in_planes, planes, stride=1):
super(PreActBottleneck, self).__init__()
self.bn1 = nn.BatchNorm2d(in_planes)
self.conv1 = nn.Conv2d(in_planes, planes, kernel_size=1, bias=False)
self.bn2 = nn.BatchNorm2d(planes)
self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=stride, padding=1, bias=False)
self.bn3 = nn.BatchNorm2d(planes)
self.conv3 = nn.Conv2d(planes, self.expansion*planes, kernel_size=1, bias=False)
if stride != 1 or in_planes != self.expansion*planes:
self.shortcut = nn.Sequential(
nn.Conv2d(in_planes, self.expansion*planes, kernel_size=1, stride=stride, bias=False)
)
def forward(self, x):
out = F.relu(self.bn1(x))
shortcut = self.shortcut(out) if hasattr(self, 'shortcut') else x
out = self.conv1(out)
out = self.conv2(F.relu(self.bn2(out)))
out = self.conv3(F.relu(self.bn3(out)))
out += shortcut
return out
class PreActResNet(nn.Module):
def __init__(self, block, num_blocks, num_classes=10):
super(PreActResNet, self).__init__()
self.in_planes = 64
self.conv1 = nn.Conv2d(3, 64, kernel_size=3, stride=1, padding=1, bias=False)
self.layer1 = self._make_layer(block, 64, num_blocks[0], stride=1)
self.layer2 = self._make_layer(block, 128, num_blocks[1], stride=2)
self.layer3 = self._make_layer(block, 256, num_blocks[2], stride=2)
self.layer4 = self._make_layer(block, 512, num_blocks[3], stride=2)
self.bn = nn.BatchNorm2d(512 * block.expansion)
self.linear = nn.Linear(512*block.expansion, num_classes)
def _make_layer(self, block, planes, num_blocks, stride):
strides = [stride] + [1]*(num_blocks-1)
layers = []
for stride in strides:
layers.append(block(self.in_planes, planes, stride))
self.in_planes = planes * block.expansion
return nn.Sequential(*layers)
def forward(self, x):
out = self.conv1(x)
out = self.layer1(out)
out = self.layer2(out)
out = self.layer3(out)
out = self.layer4(out)
out = F.relu(self.bn(out))
out = F.avg_pool2d(out, 4)
out = out.view(out.size(0), -1)
out = self.linear(out)
return out
def PreActResNet18(num_classes=10):
return PreActResNet(PreActBlock, [2,2,2,2], num_classes=num_classes)
def PreActResNet34():
return PreActResNet(PreActBlock, [3,4,6,3])
def PreActResNet50():
return PreActResNet(PreActBottleneck, [3,4,6,3])
def PreActResNet101():
return PreActResNet(PreActBottleneck, [3,4,23,3])
def PreActResNet152():
return PreActResNet(PreActBottleneck, [3,8,36,3])
## wideresnet architecture
class BasicBlock(nn.Module):
def __init__(self, in_planes, out_planes, stride, dropRate=0.0):
super(BasicBlock, self).__init__()
self.bn1 = nn.BatchNorm2d(in_planes)
self.relu1 = nn.ReLU(inplace=True)
self.conv1 = nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride,
padding=1, bias=False)
self.bn2 = nn.BatchNorm2d(out_planes)
self.relu2 = nn.ReLU(inplace=True)
self.conv2 = nn.Conv2d(out_planes, out_planes, kernel_size=3, stride=1,
padding=1, bias=False)
self.droprate = dropRate
self.equalInOut = (in_planes == out_planes)
self.convShortcut = (not self.equalInOut) and nn.Conv2d(in_planes, out_planes, kernel_size=1, stride=stride,
padding=0, bias=False) or None
def forward(self, x):
if not self.equalInOut:
x = self.relu1(self.bn1(x))
else:
out = self.relu1(self.bn1(x))
out = self.relu2(self.bn2(self.conv1(out if self.equalInOut else x)))
if self.droprate > 0:
out = F.dropout(out, p=self.droprate, training=self.training)
out = self.conv2(out)
return torch.add(x if self.equalInOut else self.convShortcut(x), out)
class NetworkBlock(nn.Module):
def __init__(self, nb_layers, in_planes, out_planes, block, stride, dropRate=0.0):
super(NetworkBlock, self).__init__()
self.layer = self._make_layer(block, in_planes, out_planes, nb_layers, stride, dropRate)
def _make_layer(self, block, in_planes, out_planes, nb_layers, stride, dropRate):
layers = []
for i in range(int(nb_layers)):
layers.append(block(i == 0 and in_planes or out_planes, out_planes, i == 0 and stride or 1, dropRate))
return nn.Sequential(*layers)
def forward(self, x):
return self.layer(x)
class WideResNet(nn.Module):
def __init__(self, depth, num_classes, widen_factor=1, dropRate=0.0):
super(WideResNet, self).__init__()
nChannels = [16, 16*widen_factor, 32*widen_factor, 64*widen_factor]
assert((depth - 4) % 6 == 0)
n = (depth - 4) / 6
block = BasicBlock
# 1st conv before any network block
self.conv1 = nn.Conv2d(3, nChannels[0], kernel_size=3, stride=1,
padding=1, bias=False)
# 1st block
self.block1 = NetworkBlock(n, nChannels[0], nChannels[1], block, 1, dropRate)
# 2nd block
self.block2 = NetworkBlock(n, nChannels[1], nChannels[2], block, 2, dropRate)
# 3rd block
self.block3 = NetworkBlock(n, nChannels[2], nChannels[3], block, 2, dropRate)
# global average pooling and classifier
self.bn1 = nn.BatchNorm2d(nChannels[3])
self.relu = nn.ReLU(inplace=True)
self.fc = nn.Linear(nChannels[3], num_classes)
self.nChannels = nChannels[3]
for m in self.modules():
if isinstance(m, nn.Conv2d):
nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
elif isinstance(m, nn.Linear):
m.bias.data.zero_()
def forward(self, x):
out = self.conv1(x)
out = self.block1(out)
out = self.block2(out)
out = self.block3(out)
out = self.relu(self.bn1(out))
out = F.avg_pool2d(out, 8)
out = out.view(-1, self.nChannels)
return self.fc(out)
def cifar_model_small():
model = nn.Sequential(
nn.Conv2d(3, 32, 4, stride=2, padding=1),
nn.ReLU(),
nn.Conv2d(32, 64, 4, stride=2, padding=1),
nn.ReLU(),
Flatten(),
nn.Linear(64*8*8,512),
nn.ReLU(),
nn.Linear(512, 10)
)
for m in model.modules():
if isinstance(m, nn.Conv2d):
n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
m.weight.data.normal_(0, math.sqrt(2. / n))
m.bias.data.zero_()
return model
def cifar_model_large():
model = nn.Sequential(
nn.Conv2d(3, 32, 3, stride=1, padding=1),
nn.ReLU(),
nn.Conv2d(32, 32, 4, stride=2, padding=1),
nn.ReLU(),
nn.Conv2d(32, 64, 3, stride=1, padding=1),
nn.ReLU(),
nn.Conv2d(64, 64, 4, stride=2, padding=1),
nn.ReLU(),
Flatten(),
nn.Linear(64*8*8,512),
nn.ReLU(),
nn.Linear(512,512),
nn.ReLU(),
nn.Linear(512,10)
)
for m in model.modules():
if isinstance(m, nn.Conv2d):
n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
m.weight.data.normal_(0, math.sqrt(2. / n))
m.bias.data.zero_()
return model
def get_model(model_name):
if model_name == 'resnet18': ## RN-18
model = PreActResNet18()
elif model_name == 'resnet50': ## RN-50
model = PreActResNet50()
elif model_name == 'wideresnet': ## WRN-34-10
model = WideResNet(34, 10, widen_factor=10, dropRate=0.2)
elif model_name == 'small': ## CNN-4
model = cifar_model_small()
elif model_name == 'large': ## CNN-7
model = cifar_model_large()
elif model_name == "PreActResNet18":
model = PreActResNet18()
model = nn.DataParallel(model)
elif model_name == "WideResNet":
model = WideResNet(34, 10, widen_factor=10, dropRate=0.0)
model = nn.DataParallel(model)
else:
raise ValueError('wrong model')
return model
| 2.421875 | 2 |
explainaboard/tasks/re/eval_spec.py | Shadowlized/ExplainaBoard | 255 | 12788263 | # -*- coding: utf-8 -*-
import explainaboard.error_analysis as ea
import numpy
import os
def get_aspect_value(sample_list, dict_aspect_func):
dict_span2aspect_val = {}
dict_span2aspect_val_pred = {}
for aspect, fun in dict_aspect_func.items():
dict_span2aspect_val[aspect] = {}
dict_span2aspect_val_pred[aspect] = {}
# maintain it for print error case
dict_sid2sent = {}
sample_id = 0
for info_list in sample_list:
#
#
#
# word_list = word_segment(sent).split(" ")
# Sentence Entities Paragraph True Relation Label Predicted Relation Label
# Sentence Length Paragraph Length Number of Entities in Ground Truth Relation Average Distance of Entities
sent, entities, paragraph, true_label, pred_label, sent_length, para_length, n_entity, avg_distance = info_list
dict_sid2sent[str(sample_id)] = ea.format4json2(entities + "|||" + sent)
sent_pos = ea.tuple2str((sample_id, true_label))
sent_pos_pred = ea.tuple2str((sample_id, pred_label))
# Sentence Length: sentALen
aspect = "sLen"
if aspect in dict_aspect_func.keys():
dict_span2aspect_val[aspect][sent_pos] = float(sent_length)
dict_span2aspect_val_pred[aspect][sent_pos_pred] = float(sent_length)
# Paragraph Length: pLen
aspect = "pLen"
if aspect in dict_aspect_func.keys():
dict_span2aspect_val[aspect][sent_pos] = float(para_length)
dict_span2aspect_val_pred[aspect][sent_pos_pred] = float(para_length)
# Number of Entity: nEnt
aspect = "nEnt"
if aspect in dict_aspect_func.keys():
dict_span2aspect_val[aspect][sent_pos] = float(n_entity)
dict_span2aspect_val_pred[aspect][sent_pos_pred] = float(n_entity)
# Average Distance: avgDist
aspect = "avgDist"
if aspect in dict_aspect_func.keys():
dict_span2aspect_val[aspect][sent_pos] = float(avg_distance)
dict_span2aspect_val_pred[aspect][sent_pos_pred] = float(avg_distance)
# Tag: tag
aspect = "tag" ############## MUST Be Gold Tag for text classification task
if aspect in dict_aspect_func.keys():
dict_span2aspect_val[aspect][sent_pos] = true_label
dict_span2aspect_val_pred[aspect][sent_pos_pred] = true_label
sample_id += 1
# print(dict_span2aspect_val["bleu"])
return dict_span2aspect_val, dict_span2aspect_val_pred, dict_sid2sent
def evaluate(task_type="ner", analysis_type="single", systems=[], dataset_name = 'dataset_name', model_name = 'model_name', output_filename="./output.json", is_print_ci=False,
is_print_case=False, is_print_ece=False):
path_text = systems[0] if analysis_type == "single" else ""
path_comb_output = "model_name" + "/" + path_text.split("/")[-1]
dict_aspect_func, dict_precomputed_path, obj_json = ea.load_task_conf(task_dir=os.path.dirname(__file__))
sample_list, sent_list, entity_list, true_list, pred_list = file_to_list(path_text)
error_case_list = []
if is_print_case:
error_case_list = get_error_case(sent_list, entity_list, true_list, pred_list)
print(" -*-*-*- the number of error casse:\t", len(error_case_list))
dict_span2aspect_val, dict_span2aspect_val_pred, dict_sid2sent = get_aspect_value(sample_list, dict_aspect_func)
holistic_performance = ea.accuracy(true_list, pred_list)
holistic_performance = format(holistic_performance, '.3g')
# Confidence Interval of Holistic Performance
confidence_low, confidence_up = 0, 0
if is_print_ci:
confidence_low, confidence_up = ea.compute_confidence_interval_acc(true_list, pred_list, n_times=1000)
dict_span2aspect_val, dict_span2aspect_val_pred, dict_sid2sent = get_aspect_value(sample_list, dict_aspect_func)
print("------------------ Holistic Result----------------------")
print(holistic_performance)
# print(f1(list_true_tags_token, list_pred_tags_token)["f1"])
dict_bucket2span = {}
dict_bucket2span_pred = {}
dict_bucket2f1 = {}
aspect_names = []
for aspect, func in dict_aspect_func.items():
# print(aspect, dict_span2aspect_val[aspect])
dict_bucket2span[aspect] = ea.select_bucketing_func(func[0], func[1], dict_span2aspect_val[aspect])
# print(aspect, dict_bucket2span[aspect])
# exit()
dict_bucket2span_pred[aspect] = ea.bucket_attribute_specified_bucket_interval(dict_span2aspect_val_pred[aspect],
dict_bucket2span[aspect].keys())
# dict_bucket2span_pred[aspect] = __select_bucketing_func(func[0], func[1], dict_span2aspect_val_pred[aspect])
dict_bucket2f1[aspect] = get_bucket_acc_with_error_case(dict_bucket2span[aspect],
dict_bucket2span_pred[aspect], dict_sid2sent,
is_print_ci, is_print_case)
aspect_names.append(aspect)
print("aspect_names: ", aspect_names)
print("------------------ Breakdown Performance")
for aspect in dict_aspect_func.keys():
ea.print_dict(dict_bucket2f1[aspect], aspect)
print("")
# Calculate databias w.r.t numeric attributes
dict_aspect2bias = {}
for aspect, aspect2Val in dict_span2aspect_val.items():
if type(list(aspect2Val.values())[0]) != type("string"):
dict_aspect2bias[aspect] = numpy.average(list(aspect2Val.values()))
print("------------------ Dataset Bias")
for k, v in dict_aspect2bias.items():
print(k + ":\t" + str(v))
print("")
dict_fine_grained = {}
for aspect, metadata in dict_bucket2f1.items():
dict_fine_grained[aspect] = []
for bucket_name, v in metadata.items():
# print("---------debug--bucket name old---")
# print(bucket_name)
bucket_name = ea.beautify_interval(bucket_name)
# print("---------debug--bucket name new---")
# print(bucket_name)
# bucket_value = format(v[0]*100,'.4g')
bucket_value = format(v[0], '.4g')
n_sample = v[1]
confidence_low_bucket = format(v[2], '.4g')
confidence_up_bucket = format(v[3], '.4g')
bucket_error_case = v[4]
# instantiation
dict_fine_grained[aspect].append({"bucket_name": bucket_name, "bucket_value": bucket_value, "num": n_sample,
"confidence_low": confidence_low_bucket,
"confidence_up": confidence_up_bucket,
"bucket_error_case": bucket_error_case})
obj_json["task"] = task_type
obj_json["data"]["language"] = "English"
obj_json["data"]["name"] = dataset_name
obj_json["data"]["bias"] = dict_aspect2bias
obj_json["data"]["output"] = path_comb_output
obj_json["model"]["name"] = model_name
obj_json["model"]["results"]["overall"]["error_case"] = error_case_list
obj_json["model"]["results"]["overall"]["performance"] = holistic_performance
obj_json["model"]["results"]["overall"]["confidence_low"] = confidence_low
obj_json["model"]["results"]["overall"]["confidence_up"] = confidence_up
obj_json["model"]["results"]["fine_grained"] = dict_fine_grained
raise NotImplementedError('RE is not fully implemented yet, see below')
# ece = 0
# dic_calibration = None
# if is_print_ece:
# ece, dic_calibration = process_all(path_text,
# size_of_bin=10, dataset=corpus_type, model=model_name)
# obj_json["model"]["results"]["calibration"] = dic_calibration
# # print(dic_calibration)
# ea.save_json(obj_json, output_filename)
#
# def main():
#
# parser = argparse.ArgumentParser(description='Interpretable Evaluation for NLP')
#
#
# parser.add_argument('--task', type=str, required=True,
# help="absa")
#
# parser.add_argument('--ci', type=str, required=False, default= False,
# help="True|False")
#
# parser.add_argument('--case', type=str, required=False, default= False,
# help="True|False")
#
# parser.add_argument('--ece', type=str, required=False, default= False,
# help="True|False")
#
#
# parser.add_argument('--type', type=str, required=False, default="single",
# help="analysis type: single|pair|combine")
# parser.add_argument('--systems', type=str, required=True,
# help="the directories of system outputs. Multiple one should be separated by comma, for example, system1,system2 (no space)")
#
# parser.add_argument('--output', type=str, required=True,
# help="analysis output file")
# args = parser.parse_args()
#
#
# is_print_ci = args.ci
# is_print_case = args.case
# is_print_ece = args.ece
#
# task = args.task
# analysis_type = args.type
# systems = args.systems.split(",")
# output = args.output
#
#
# print("task", task)
# print("type", analysis_type)
# print("systems", systems)
# # sample_list = file_to_list_re(systems[0])
# # print(sample_list[0])
# evaluate(task_type=task, analysis_type=analysis_type, systems=systems, output=output, is_print_ci = is_print_ci, is_print_case = is_print_case, is_print_ece = is_print_ece)
#
# # python eval_spec.py --task re --systems ./test_re.tsv --output ./a.json
# if __name__ == '__main__':
# main()
def get_bucket_acc_with_error_case(dict_bucket2span, dict_bucket2span_pred, dict_sid2sent, is_print_ci, is_print_case):
# The structure of span_true or span_pred
# 2345|||Positive
# 2345 represents sentence id
# Positive represents the "label" of this instance
dict_bucket2f1 = {}
for bucket_interval, spans_true in dict_bucket2span.items():
spans_pred = []
if bucket_interval not in dict_bucket2span_pred.keys():
raise ValueError("Predict Label Bucketing Errors")
else:
spans_pred = dict_bucket2span_pred[bucket_interval]
# loop over samples from a given bucket
error_case_bucket_list = []
if is_print_case:
for info_true, info_pred in zip(spans_true, spans_pred):
sid_true, label_true = info_true.split("|||")
sid_pred, label_pred = info_pred.split("|||")
if sid_true != sid_pred:
continue
sent_entities = dict_sid2sent[sid_true]
if label_true != label_pred:
error_case_info = label_true + "|||" + label_pred + "|||" + sent_entities
error_case_bucket_list.append(error_case_info)
accuracy_each_bucket = ea.accuracy(spans_pred, spans_true)
confidence_low, confidence_up = 0, 0
if is_print_ci:
confidence_low, confidence_up = ea.compute_confidence_interval_acc(spans_pred, spans_true)
dict_bucket2f1[bucket_interval] = [accuracy_each_bucket, len(spans_true), confidence_low, confidence_up,
error_case_bucket_list]
return ea.sort_dict(dict_bucket2f1)
def get_error_case(sent_list, entity_list, true_label_list, pred_label_list):
error_case_list = []
for sent, entities, true_label, pred_label in zip(sent_list, entity_list, true_label_list, pred_label_list):
if true_label != pred_label:
error_case_list.append(true_label + "|||" + pred_label + "|||" + entities + "|||" + ea.format4json2(sent))
return error_case_list
def file_to_list(file_path):
sample_list = []
fin = open(file_path, "r")
true_list = []
pred_list = []
sent_list = []
entity_list = []
for idx, line in enumerate(fin):
if idx == 0:
continue
info_list = line.rstrip("\n").split("\t")
sample_list.append([info for info in info_list])
true_list.append(info_list[3])
pred_list.append(info_list[4])
sent_list.append(info_list[0])
entity_list.append(info_list[1])
return sample_list, sent_list, entity_list, true_list, pred_list | 2.65625 | 3 |
tests/test_query_runner.py | o19s/solr-grid-tuning | 0 | 12788264 | <filename>tests/test_query_runner.py<gh_stars>0
from solr_grid_tuning.solr_client import SolrClient
from solr_grid_tuning.solr_query import SolrQuery
from solr_grid_tuning.query_runner import QueryRunner
# This is the configuration of the local Solr instance that is assumed by this test
base_url = "http://localhost:8983/solr"
collection = "tmdb"
request_handler = "select"
def test_basic_query():
solr_client = SolrClient(base_url, auth=None, collection=collection, request_handler=request_handler)
solr_query = SolrQuery(q="star wars", fl=["id"], other_params=[("qf", "title")])
query_runner = QueryRunner()
results = query_runner.run_query(solr_client, solr_query)
assert len(results) == 10
| 2.34375 | 2 |
old_backup/seattle.py | GabbyHE/Bigscity-LibCity-Datasets | 32 | 12788265 | <filename>old_backup/seattle.py
# link: https://www.microsoft.com/en-us/research/publication/hidden-markov-map-matching-noise-sparseness/
import re
import os
import json
from util import ensure_dir
network = trackInfo = truth = geo = rel = dyna = usr = route = None
def processGeoAndRelAndRoute():
geo.write("geo_id, type, coordinate\n")
rel.write("rel_id,type,origin_id,destination_id\n")
network.readline()
line = network.readline()
nodeInfo = re.search("\\(.+\\)", line)
j = 0
currentSum = 0
dic = {}
while nodeInfo is not None:
flag = line.split('\t')[3]
nodes = nodeInfo[0].replace('(', '').replace(')', '').split(', ')
i = 0
while i < len(nodes):
node1 = nodes[i].split(" ")[0]
node2 = nodes[i].split(" ")[1]
geo.write(str(j) + ',Point,"[' + node1 + ',' + node2 + ']"\n')
if i != len(nodes) - 1:
rel.write(str(currentSum) + ',geo,' + str(j) + ',' + str(j + 1) + '\n')
if line.split('\t')[0] in dic.keys():
dic[line.split('\t')[0]].append(currentSum)
else:
dic[line.split('\t')[0]] = [currentSum]
currentSum += 1
if flag == '1':
rel.write(str(currentSum) + ',geo,' + str(j + 1) + ',' + str(j) + '\n')
currentSum += 1
i += 1
j += 1
line = network.readline()
nodeInfo = re.search("\\(.+\\)", line)
nodeNum = j
trackInfo.readline()
nodeInfo = re.split('\t| ', trackInfo.readline())
while len(nodeInfo) == 6:
node1 = nodeInfo[3]
node2 = nodeInfo[2]
geo.write(str(j) + ',Point,"[' + node1 + ',' + node2 + ']"\n')
j += 1
nodeInfo = re.split('\t| ', trackInfo.readline())
route.write("route_id,usr_id,rel_id\n")
truth.readline()
truth_info = truth.readline()
route_id = 0
while truth_info != '':
edge_id = truth_info.split("\t")[0]
traversed = truth_info.split("\t")[1].replace('\n', '')
if traversed == '1':
i = 0
while i < len(dic[edge_id]):
route.write(str(route_id) + ',0,' + str(dic[edge_id][i]) + '\n')
route_id += 1
i += 1
else:
i = len(dic[edge_id]) - 1
while i >= 0:
route.write(str(route_id) + ',0,' + str(dic[edge_id][i]) + '\n')
route_id += 1
i -= 1
truth_info = truth.readline()
return nodeNum
def processUsr():
usr.write("usr_id\n")
usr.write("0")
def processDyna(nodeNum):
dyna.write("dyna_id,type,time,entity_id,location\n")
trackInfo.seek(0)
trackInfo.readline()
nodeInfo = re.split('\t| ', trackInfo.readline())
i = 0
while len(nodeInfo) == 6:
second = nodeInfo[1]
time = "2009-01-17T" + second + 'Z'
dyna.write(str(i) + ',trajectory,' + time + ',0,' + str(i + nodeNum) + '\n')
i += 1
nodeInfo = re.split('\t| ', trackInfo.readline())
def processConfig():
config = dict()
config['geo'] = dict()
config['geo']['including_types'] = ['Point']
config['geo']['Point'] = {}
config['usr'] = dict()
config['usr']['properties'] = {}
config['rel'] = dict()
config['rel']['including_types'] = ['geo']
config['rel']['geo'] = {'speed': 'num'}
config['dyna'] = dict()
config['dyna']['including_types'] = ['trajectory']
config['dyna']['trajectory'] = {'entity_id': 'usr_id', 'location': 'geo_id'}
config['info'] = dict()
json.dump(config, open('../output/Seattle/config.json', 'w', encoding='utf-8'),
ensure_ascii=False, indent=4)
def openFile():
global network, trackInfo, truth, geo, rel, dyna, usr, route
input_path = '../input/Seattle'
network = open(os.path.join(input_path, "road_network.txt"), "r")
trackInfo = open(os.path.join(input_path, "gps_data.txt"), "r")
truth = open(os.path.join(input_path, "ground_truth_route.txt"), "r")
outputPath = './output/Seattle'
ensure_dir(outputPath)
geo = open(os.path.join(outputPath, "Seattle.geo"), "w")
rel = open(os.path.join(outputPath, "Seattle.rel"), "w")
dyna = open(os.path.join(outputPath, "Seattle.dyna"), "w")
usr = open(os.path.join(outputPath, "Seattle.usr"), "w")
route = open(os.path.join(outputPath, "Seattle.route"), "w")
def closeFile():
global network, trackInfo, truth, geo, rel, dyna, usr, route
network.close()
trackInfo.close()
truth.close()
geo.close()
rel.close()
dyna.close()
usr.close()
route.close()
def dataTransform():
openFile()
nodeNum = processGeoAndRelAndRoute()
processUsr()
processDyna(nodeNum)
processConfig()
closeFile()
if __name__ == '__main__':
dataTransform()
| 2.265625 | 2 |
ThreadFixProApi/Applications/_utils/_cicd.py | denimgroup/threadfix-python-api | 1 | 12788266 | <gh_stars>1-10
#!/usr/bin/env python
# -*- coding: utf-8 -*-
__author__ = "<NAME>"
__copyright__ = "(C) 2019 Denim group"
__contributors__ = ["<NAME>"]
__status__ = "Production"
__license__ = "MIT"
from ...API import API
class CICDAPI(API):
def __init__(self, host, api_key, verify_ssl, timeout, headers, user_agent, cert, debug):
"""
Initialize a ThreadFix Pro CI/CD API instance.
:param host: The URL for the ThreadFix Pro server. (e.g., http://localhost:8080/threadfix/) NOTE: must include http:// TODO: make it so that it is required or implicitly added if forgotten
:param api_key: The API key generated on the ThreadFix Pro API Key page.
:param verify_ssl: Specify if API requests will verify the host's SSL certificate, defaults to true.
:param timeout: HTTP timeout in seconds, default is 30.
:param user_agent: HTTP user agent string, default is "threadfix_pro_api/[version]".
:param cert: You can also specify a local cert to use as client side certificate, as a single file (containing
the private key and the certificate) or as a tuple of both file’s path
:param debug: Prints requests and responses, useful for debugging.
"""
super().__init__(host, api_key, verify_ssl, timeout, headers, user_agent, cert, debug)
def evaluate_cicd_pass_criteria(self, application_id, from_date=None, to_date=None):
"""
Checks the specified application against all of the CI/CD pass criteria attached to it
:param application_id: Application identifier
:param from_date: Evaluate against any new open vulnerabilities from this date. If no date is specified, the start date will be December 31, 1969.
The time will be the start of day, 00:00:00. Format as yyyy-MM-dd
:param to_date: Evaluate against any new open vulnerabilities until this date. If no start date is specified, the end date will be the current date.
The time will be the end of day, 23:59:59. Format as yyyy-MM-dd
"""
params = {}
if from_date:
params['fromDate'] = from_date
if to_date:
params['toDate'] = to_date
return super().request('GET', '/policy/status/application/' + str(application_id) + '/evaluate', params)
def create_cicd_defect_reporter(self, severity, minimum=None, group_by=None):
"""
Creates a new CI/CD defect reporter
:param severity: Name of severity
:param minimum: If true, includes all severities greater than the specified one as well. Default value is false.
:param group_by: How to group vulnerabilities for defects
"""
params = {'severity' : severity}
if minimum:
params['minimum'] = minimum
if group_by:
params['groupBy'] = group_by
return super().request('POST', '/cicd/defectReporting/create', params)
def update_cicd_defect_reporter(self, cicd_id, severity, minimum=None, group_by=None):
"""
Creates a new CI/CD defect reporter
:param cicd_id: CI/CD identifier
:param severity: Name of severity
:param minimum: If true, includes all severities greater than the specified one as well. Default value is false.
:param group_by: How to group vulnerabilities for defects
"""
params = {'severity' : severity}
if minimum:
params['minimum'] = minimum
if group_by:
params['groupBy'] = group_by
return super().request('PUT', '/cicd/defectReporting/' + str(cicd_id) + '/update', params)
def list_cicd_defect_reporters(self):
"""
Lists CI/CD defect reporters
"""
return super().request('GET', '/cicd/defectReporting')
def get_cicd_defect_reporter_details(self, cicd_id):
"""
Returns CI/CD defect reporter details
:param cicd_id: CI/CD identifier
"""
return super().request('GET', '/cicd/defectReporting/' + str(cicd_id) + '/detail')
def delete_cicd_defect_reporter(self, cicd_id):
"""
Deletes the CI/CD defect reporter
:param cicd_id: CI/CD identifier
"""
return super().request('DELETE', '/cicd/defectReporting/' + str(cicd_id) + '/delete')
def add_application_defect_tracker_to_cicd_defect_reporter(self, defect_reporter_id, app_defect_tracker_id):
"""
Attaches the specified Application Defect Tracker to the specified CI/CD Defect Reporter
:param defect_reporter_id: Defect Reporter identifier
:param app_defect_tracker_id: App Defect Tracker identifier
"""
return super().request('PUT', '/cicd/defectReporting/' + str(defect_reporter_id) + '/addApplicationDefectTracker/' + str(app_defect_tracker_id))
def remove_application_defect_tracker_to_cicd_defect_reporter(self, defect_reporter_id, app_defect_tracker_id):
"""
Attaches the specified Application Defect Tracker to the specified CI/CD Defect Reporter
:param defect_reporter_id: Defect Reporter identifier
:param app_defect_tracker_id: App Defect Tracker identifier
"""
return super().request('PUT', '/cicd/defectReporting/' + str(defect_reporter_id) + '/removeApplicationDefectTracker/' + str(app_defect_tracker_id))
def create_cicd_pass_criteria_group(self, name, severity, max_allowed=None, max_introduced=None, not_allowed=None, not_introduced=None):
"""
Creates a new CI/CD Pass Criteria Group.
:param name: Name of pass criteria
:param severity: Name of severity.
:param max_allowed: The maximum number of vulnerabilities allowed for the Pass Criteria. If no value is specified, there is no limit.
:param max_introduced: The maximum number of new vulnerabilities in a scan for the Pass Criteria. If no value is specified, there is no limit.
:param not_allowed: If no vulnerabilities allowed for the Pass Criteria (analogous to setting maxAllowed=0)
:param not_introduced: If no new vulnerabilities allowed in a scan for the Pass Criteria (analogous to setting maxIntroduced=0)
"""
params = {'name' : name, 'severity' : severity}
if not max_allowed is None:
params['maxAllowed'] = max_allowed
if not max_introduced is None:
params['maxIntroduced'] = max_introduced
if not not_allowed is None:
params['notAllowed'] = not_allowed
if not not_introduced is None:
params['notIntroduced'] = not_introduced
return super().request('POST', '/cicd/passCriteriaGroup/create', params)
def list_cicd_pass_criteria_group(self):
"""
Lists CI/CD Pass Criteria Group.
"""
return super().request('GET', '/cicd/passCriteriaGroup')
def get_cicd_pass_criteria_group(self, cicd_group_id):
"""
Returns detailed information about the specified CI/CD Pass Criteria Group.
:param cicd_group_id: The CI/CD Group to get information about
"""
return super().request('GET', '/cicd/passCriteriaGroup/' + str(cicd_group_id) + '/detail')
def add_cicd_pass_criterion_to_pass_criteria_group(self, cicd_group_id, severity, max_allowed=None, max_introduced=None, not_allowed=None, not_introduced=None):
"""
Creates a new CI/CD Pass Criteria Group.
:param cicd_group_id: The CI/CD Group to update
:param severity: Name of severity.
:param max_allowed: The maximum number of vulnerabilities allowed for the Pass Criteria. If no value is specified, there is no limit.
:param max_introduced: The maximum number of new vulnerabilities in a scan for the Pass Criteria. If no value is specified, there is no limit.
:param not_allowed: If no vulnerabilities allowed for the Pass Criteria (analogous to setting maxAllowed=0)
:param not_introduced: If no new vulnerabilities allowed in a scan for the Pass Criteria (analogous to setting maxIntroduced=0)
"""
params = {'severity' : severity}
if not max_allowed is None:
params['maxAllowed'] = max_allowed
if not max_introduced is None:
params['maxIntroduced'] = max_introduced
if not not_allowed is None:
params['notAllowed'] = not_allowed
if not not_introduced is None:
params['notIntroduced'] = not_introduced
return super().request('POST', '/cicd/passCriteriaGroup/' + str(cicd_group_id) + '/addCriterion', params)
def remove_cicd_pass_criterion_from_pass_criteria_group(self, cicd_group_id, cicd_criterion_id):
"""
Removes and deletes the Pass Criterion object attached to the Pass Criteria Group.
:param cicd_group_id: The CI/CD Group to update
:param cicd_criterion_id: The CI/CD criterion to remove
"""
return super().request('DELETE', '/cicd/passCriteriaGroup/' + str(cicd_group_id) + '/removeCriterion/' + str(cicd_criterion_id))
def add_application_to_cicd_pass_criteria_group(self, cicd_group_id, application_id):
"""
Attaches the specified Application to the specified CI/CD Pass Criteria Group.
:param cicd_group_id: CI/CD Group to attach the application to
:param application_id: ID of application to attach to the group
"""
return super().request('POST', '/cicd/passCriteriaGroup/' + str(cicd_group_id) + '/addApplication/' + str(application_id))
def remove_application_from_cicd_pass_criteria_group(self, cicd_group_id, application_id):
"""
Removes the specified Application from the specified CI/CD Pass Criteria Group.
:param cicd_group_id: CI/CD Group to remove the application from
:param application_id: ID of application to remove from the group
"""
return super().request('DELETE', '/cicd/passCriteriaGroup/' + str(cicd_group_id) + '/removeApplication/' + str(application_id))
def update_cicd_pass_criteria_group(self, cicd_group_id, severity, max_allowed=None, max_introduced=None):
"""
Creates a new CI/CD Pass Criteria Group.
:param cicd_group_id: The CI/CD Group to update
:param severity: Name of severity.
:param max_allowed: The maximum number of vulnerabilities allowed for the Pass Criteria. If no value is specified, there is no limit.
:param max_introduced: The maximum number of new vulnerabilities in a scan for the Pass Criteria. If no value is specified, there is no limit.
"""
params = {'severity' : severity}
if not max_allowed is None:
params['maxAllowed'] = max_allowed
if not max_introduced is None:
params['maxIntroduced'] = max_introduced
return super().request('POST', '/cicd/passCriteriaGroup/' + str(cicd_group_id) + '/update', params)
def delete_cicd_pass_criteria_group(self, cicd_group_id):
"""
Deletes the specified CI/CD Pass Criteria Group.
:param cicd_group_id: The CI/CD Group to delete
"""
return super().request('DELETE', '/cicd/passCriteriaGroup/' + str(cicd_group_id) + '/delete') | 2.3125 | 2 |
accounts/urls.py | 7h3qu1rkyb1t/Xarena | 0 | 12788267 | <gh_stars>0
from . import views
from django.urls import path
from django.contrib.auth import views as auth_views
urlpatterns = [
path("", views.target),
path("register/", views.register, name="register"),
path("login/", auth_views.LoginView.as_view(template_name = "accounts/login.html"), name="login"),
path("logout/", auth_views.LogoutView.as_view(template_name = "accounts/logout.html"), name="logout"),
path("password-reset/", auth_views.PasswordResetView.as_view(template_name="accounts/password_reset.html"), name="password_reset"),
path("password-reset/done", auth_views.PasswordResetDoneView.as_view(template_name="accounts/password_reset_done.html"), name="password_reset_done"),
path("password-reset-confirm/<uidb64>/<token>/", auth_views.PasswordResetConfirmView.as_view(template_name="accounts/password_reset_confirm.html"), name="password_reset_confirm"),
path("password-reset-complete/", auth_views.PasswordResetCompleteView.as_view(template_name="accounts/password_reset_complete.html"), name="password_reset_complete"),
path("activate/<uidb64>/<token>/", views.activate, name="activate"),
path("profile/", views.profile, name="profile"),
path("profile/payment_update/<int:pk>", views.payment_update, name="payment_update"),
path("Money-requests/", views.MoneyRequests.as_view(), name="money_req"),
path("Money-requests/handle", views.money_req_handle, name="money_req_handle"),
path("profile/image-upload", views.image_upload, name="image_upload"),
path("profile/update-info/<int:pk>/", views.UpdateSubscription.as_view(), name="update_subscription"),
path("profile/update-info/<int:pk>/delete", views.DeleteSubscription.as_view(), name="delete_subscription"),
path("profile/transfer", views.money_transfer, name= "transfer"),
path("profile/transfer/to_wallet", views.to_wallet, name= "to_wallet"),
path("profile/trans-status", views.trans_status, name="trans_status")
]
| 1.9375 | 2 |
job_configs.py | rajeswar18/url_benchmark | 0 | 12788268 | <filename>job_configs.py
import os
if os.environ["EAI_ACCOUNT_ID"] == "<KEY>":
user = "pau"
elif os.environ["EAI_ACCOUNT_ID"] == "68fdc833-1eaa-4b60-9b64-ac27814e4b61":
user = "sai"
else:
user = "issam"
JOB_CONFIG = {
"account_id": os.environ["EAI_ACCOUNT_ID"],
"image": "registry.console.elementai.com/snow.colab/ssh",
"data": ["snow.colab.public:/mnt/public", f"snow.{user}.home:/mnt/home"],
"restartable": True,
"resources": {
"cpu": 8,
"mem": 32,
"gpu": 1,
# 'gpu_mem': 20,
"gpu_model": "!A100",
},
"interactive": False,
"bid": 0,
}
| 1.75 | 2 |
analysis/HtmlAnalysis.py | matthewcornell/skeptical-toolbox | 0 | 12788269 | <reponame>matthewcornell/skeptical-toolbox<gh_stars>0
import urllib.request
import urllib.parse
from bs4 import BeautifulSoup as bs
class HtmlAnalysis(object):
"""
Defines routines to analyze a URL and extract various resources to support a skeptical analysis of it.
"""
def __init__(self, url, html=None):
"""
:param url: string
:param html: html content (string) for testing, or None, which means connect to the URL for content
:return:
"""
self.url = url
# set the soup
if not html:
# pass user_agent because some sites require spoofing
user_agent = 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_9_3) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/35.0.1916.47 Safari/537.36'
req = urllib.request.Request(url, headers={'User-Agent': user_agent})
httpResponse = urllib.request.urlopen(req)
self.soup = bs(httpResponse.read(), "lxml") # todo encoding?
else:
self.soup = bs(html, "lxml")
self.title = self.soup.title.string
def is_offsite_link(self, a):
"""
:param a: the tag to examine
:return: True if a's href is non-null and links to a domain that is not mine in self.url, False o/w
"""
site_netloc = urllib.parse.urlparse(self.url).netloc
link_netloc = urllib.parse.urlparse(a.get('href')).netloc
return link_netloc and (link_netloc != site_netloc) # NB: link_netloc is None for roots, e.g., <a href="/">Home</a>
def outgoing_links(self, only_offsite):
"""
:param only_offsite: controls whether only offsite links are included (if True) or all links (False)
:return: list of <a> tags, excluding ones that have no text, and optionally excluding onsite ones if only_offsite
"""
all_links = [a for a in self.soup.find_all('a') if a.string and a.string.strip()]
return [a for a in all_links if self.is_offsite_link(a)] if only_offsite else all_links
def outgoing_link_to_para_ratio(self):
return len(self.outgoing_links(True)) / len(self.soup.find_all('p'))
| 3.09375 | 3 |
libs/chromosome/mutation_service.py | Hunteerq/GeneticAlgorithms | 0 | 12788270 | import numpy as np
from libs.chromosome.chromosome_modifier import ChromosomeModifier
from libs.chromosome.mutation_types import MutationTypes
class MutationService:
def __init__(self, algorithm_configuration):
self.__algorithm_configuration = algorithm_configuration
self.__chromosome_modifier = ChromosomeModifier(algorithm_configuration.chromosome_config,
algorithm_configuration.left_range_number,
algorithm_configuration.right_range_number)
def handle_mut(self, pop_to_mut):
return [self.__apply_mut(chromosome) for chromosome in pop_to_mut]
def __apply_mut(self, chromosome):
mut_type = self.__algorithm_configuration.chromosome_config.mut_type
if mut_type == MutationTypes.INDICES_SWAP.name:
return self.__chromosome_modifier.mutation_indices_swap(chromosome)
if mut_type == MutationTypes.STEADY.name:
return self.__chromosome_modifier.mutation_steady(chromosome)
| 2.671875 | 3 |
cakebot/bot.py | jeffseif/cakebot | 0 | 12788271 | <gh_stars>0
import inspect
import irc.bot
import irc.connection
import re
import ssl
import cakebot.bind
import cakebot.config
import cakebot.logging
import cakebot.mods
from cakebot import __version__
from cakebot import KILL_SWITCH
class Bot(irc.bot.SingleServerIRCBot):
nick_to_kill = None
def reset_attrs(self):
self.forwards = set()
self.listens = set()
self.patterns = list()
@classmethod
def from_dict(cls, the_dict):
config = {
attr: the_dict.pop(attr)
for attr in ('forwards', 'listens', 'patterns')
}
instance = cls(**the_dict)
instance.config = config
instance.reset_attrs()
return instance
def get_version(self):
return 'AIRC CakeBot Version {version}'.format(version=__version__)
def on_nicknameinuse(self, conn, event):
self.nick_to_kill = conn.get_nickname()
new = '_'.join((
self.nick_to_kill,
'killah',
))
cakebot.logging.warning('[{new}] {old} already in use; trying to kill it with {kill} as {new} ...'.format(old=self.nick_to_kill,new=new,kill=KILL_SWITCH))
conn.nick(new)
def on_welcome(self, conn, event):
if self.nick_to_kill:
event.target = self.nick_to_kill
self.send(conn, event, KILL_SWITCH, override_target=True)
self.die()
nickname = conn.get_nickname()
cakebot.logging.info('[{nickname}] successfully connected to AIRC'.format(nickname=nickname))
for channel in self.config['forwards']:
cakebot.logging.info('[{nickname}] forwarding to channel {channel}'.format(nickname=nickname, channel=channel))
conn.join(channel)
self.forwards.add(channel)
for channel in self.config['listens']:
cakebot.logging.info('[{nickname}] listening to channel {channel}'.format(nickname=nickname, channel=channel))
conn.join(channel)
self.listens.add(channel)
for pattern in self.config['patterns']:
self.patterns.append(cakebot.bind.bind_inner('hear', pattern, cakebot.mods.forward))
@staticmethod
def get_is_to_me(nickname, message):
return message.lower().startswith(nickname.lower())
@staticmethod
def strip_nick_from_message(nickname, message):
index = len(nickname)
if message[index] in (':', ','):
index += 1
return message[index:].strip()
def action(self, conn, event, message):
self.send(conn, event, '\001ACTION {message}\001'.format(message=message))
def send(self, conn, event, message, override_target=False):
if (not override_target) and (event.target.lower() == conn.get_nickname().lower()):
event.target = event.source.nick
conn.privmsg(event.target, message)
cakebot.logging.info('> ({target}): {message}'.format(target=event.target, message=message))
def on_privmsg(self, conn, event):
self.respond(conn, event, is_private=True)
def on_pubmsg(self, conn, event):
self.respond(conn, event)
def respond(self, conn, event, is_private=False):
nickname = conn.get_nickname()
message = event.arguments[0].strip()
if is_private or self.get_is_to_me(nickname, message):
if not is_private:
message = self.strip_nick_from_message(nickname, message)
self.try_reply_or_hear(conn, event, message, cakebot.bind.BINDS['reply'])
self.try_reply_or_hear(conn, event, message, cakebot.bind.BINDS['hear'])
self.try_reply_or_hear(conn, event, message, self.patterns)
def try_reply_or_hear(self, conn, event, message, binds):
for bind_type, name, pattern, match, func in binds:
match = match.match(message)
if match:
cakebot.logging.info('[{nickname}] {bind_type}: {name} (`{pattern}`)'.format(
nickname=conn.get_nickname(),
bind_type=bind_type.upper(),
name=name,
pattern=pattern,
))
func(self, conn, event, message, match)
| 2.09375 | 2 |
tests/conftest.py | federicober/cookiecutter-python-lib | 1 | 12788272 | import contextlib
import os
import pathlib
import shutil
import subprocess
import sys
import pytest
from cookiecutter.main import cookiecutter
_template_dir = pathlib.Path(__file__).parent.parent
_base_cookiecutter_args = {
"project_name": "my-python-package",
"package_name": "my_python_package",
"friendly_name": "My Python Package",
"author": "<NAME>",
"email": "<EMAIL>",
"github_user": "federicober",
"version": "0.1.0",
"dockerized": "false",
"docs_backend": "sphinx",
}
@contextlib.contextmanager
def change_dir(dir_name):
cwd = os.getcwd()
try:
os.chdir(dir_name)
yield
finally:
os.chdir(cwd)
@pytest.fixture(scope="session")
def default_generated_project(tmpdir_factory):
base_temp_dir = tmpdir_factory.mktemp("default_generated_project")
subprocess.check_call(
[
sys.executable,
"-m",
"cookiecutter",
"--no-input",
"--output-dir",
str(base_temp_dir),
str(_template_dir),
],
stderr=subprocess.STDOUT,
)
project_dir = base_temp_dir / "my-python-package"
with change_dir(project_dir):
yield project_dir
@pytest.fixture()
def tmp_generated_project(default_generated_project, tmp_path):
shutil.copytree(default_generated_project, tmp_path, dirs_exist_ok=True)
with change_dir(tmp_path):
yield tmp_path
@pytest.fixture()
def custom_generated_project(tmp_path, request):
cookiecutter_args = _base_cookiecutter_args.copy()
if hasattr(request, "param"):
cookiecutter_args.update(request.param)
cookiecutter(
str(_template_dir),
output_dir=str(tmp_path),
no_input=True,
extra_context=cookiecutter_args,
)
project_dir = tmp_path / cookiecutter_args["project_name"]
with change_dir(project_dir):
yield project_dir
| 2.0625 | 2 |
dihedrals.py | meyresearch/ANI-Peptides | 1 | 12788273 | <reponame>meyresearch/ANI-Peptides
# Based on: http://archive.ambermd.org/201304/0256.html
# https://github.com/fylinhub/2D-free-energy-plots-and-similarity-calculation/blob/master/plot_freeenergy_chi1chi2.ipynb
# import MDAnalysis as mda
# from MDAnalysis.analysis.dihedrals import Ramachandran
import mdtraj as md
import matplotlib.pyplot as plt
import time
import argparse
import numpy as np
import os.path
import datetime
TRAJECTORY_FN = "trajectory_reimaged.dcd"
TOPOLOGY_FN = "topology.pdb"
parser = argparse.ArgumentParser()
parser.add_argument("prod_dir", help="Production directory to perform analysis on")
args = parser.parse_args()
if not os.path.isdir(args.prod_dir):
print(f"Production directory to resume is not a directory: {args.prod_dir}")
quit()
# Check all required files exist in prod directory to resume
files_available = os.listdir(args.prod_dir)
files_required = (
TOPOLOGY_FN,
TRAJECTORY_FN,
)
if not all(filename in files_available for filename in files_required):
print(f"Production directory to analyse must contain files with the following names: {files_required}")
quit()
output_dir = os.path.join(args.prod_dir, f"dihedral_analysis_{datetime.datetime.now().strftime('%H%M%S_%d%m%y')}")
os.mkdir(output_dir)
def free_energy(phi, psi):
# Plot free energy
x = phi
y = psi
degrees_fmt = lambda x, _: f"{x}°"
ticks = np.arange(-180, 181, 60)
temperature = 300
heatmap, xedges, yedges = np.histogram2d(x, y, bins=90, normed=False, range=np.array([[-180, 180], [-180,180]]))
heatmap = heatmap.T
extent = [xedges[0], xedges[-1], yedges[0], yedges[-1]]
# delta G = RT*ln(P)
with np.errstate(divide='ignore'):
heatmap = np.log(heatmap/heatmap.sum()) * (8.314/4.184) * temperature * -0.001
heatmap[np.isinf(heatmap)] = np.nan
plt.imshow(heatmap, extent=extent, origin='lower', interpolation=None, cmap='gist_earth')
ax = plt.gca()
ax.xaxis.set_major_formatter(plt.FuncFormatter(degrees_fmt))
ax.yaxis.set_major_formatter(plt.FuncFormatter(degrees_fmt))
plt.xticks(ticks)
plt.yticks(ticks)
plt.grid(color="black", alpha=0.2, linewidth=0.3, linestyle="--")
plt.colorbar(label=r'$\Delta\mathit{G}$ (kcal mol$^{-1}$)')
plt.xlabel(r'$\phi$')
plt.ylabel(r'$\psi$')
def timetrace(phi, psi):
# Plot Phi and Psi
angles = (phi, psi)
fig, axs = plt.subplots(2,1,sharex=True, dpi=500)
ticks = np.arange(-180, 181, 90)
degrees_fmt = lambda y, _: f"{y}°"
if len(psi) < 1e6:
time_fmt = lambda x, _: f"{x/1e3} ns"
else:
time_fmt = lambda x, _: f"{x/1e6} µs"
x = np.arange(len(psi))
for j, name in enumerate((r'$\phi$', r'$\psi$')):
ax = axs[j]
ax.scatter(x, angles[j], marker=".", s=0.1)
ax.grid(color="black", alpha=0.2, linewidth=0.3, linestyle="--")
ax.set_yticks(ticks)
ax.set(ylabel=name, ylim=(-180, 180))
ax.yaxis.set_major_formatter(plt.FuncFormatter(degrees_fmt))
ax.xaxis.set_major_formatter(plt.FuncFormatter(time_fmt))
plt.xlabel('step')
plotters = {
"Free Energy Surface": free_energy,
"Timetrace": timetrace
}
TRAJ = os.path.join(args.prod_dir, TRAJECTORY_FN)
TOP = os.path.join(args.prod_dir, TOPOLOGY_FN)
with md.formats.DCDTrajectoryFile(TRAJ, mode="r") as dcd:
total_frames = len(dcd)
print(f"{total_frames} frames total")
top = md.load(TOP).topology
# no time to fix indexing so we're doing it like this
phis = []
psis = []
print(f"Starting...")
time_start = time.time()
chunk_size = 50000
traj = md.iterload(TRAJ, top=TOP, chunk=chunk_size)
for i, chunk in enumerate(traj):
frames_remaining = total_frames - (i * chunk_size)
_, chunk_phis = md.compute_phi(chunk)
_, chunk_psis = md.compute_psi(chunk)
chunk_size = len(chunk_phis)
phis.append(np.rad2deg(chunk_phis))
psis.append(np.rad2deg(chunk_psis))
speed = chunk_size // (time.time() - time_start)
time_start = time.time()
print(f"{i*100*chunk_size/total_frames:.1f}%, {speed:.1f} frames per sec, {frames_remaining} frames remaining ", end="\r")
print("\nDihedral analysis complete")
phis = np.vstack(phis)
psis = np.vstack(psis)
residues = tuple(top.residues)
# produce graphs for individual residue pairs
for phi, psi, res1, res2 in zip(phis.T, psis.T, residues, residues[1:]):
for title, plotter in plotters.items():
plt.figure(0, facecolor="white", dpi=500)
plt.title(f"{title} [dihedral {res1}-{res2}]")
ax = plt.gca()
ax.set_aspect(1)
plotter(phi, psi)
plt.tight_layout()
plt.savefig(os.path.join(output_dir, f"{title}_{res1}_{res2}.png"))
plt.clf()
print(f"Saved {res1}-{res2} {title}")
# produce single plots for all residue pairs
for title, plotter in plotters.items():
plt.figure(0, facecolor="white", dpi=500)
plt.title(f"{title} [Entire Peptide]")
ax = plt.gca()
ax.set_aspect(1)
plotter(phis.reshape(-1), psis.reshape(-1))
plt.tight_layout()
plt.savefig(os.path.join(output_dir, f"{title}_entire_peptide.png"))
plt.clf()
print(f"Saved Entire Peptide {title}")
print("Done") | 2.203125 | 2 |
DATA_PROCESS/tiy_test.py | realmiya/scentsearcher | 0 | 12788274 | import pandas as pd
import numpy as np
import re
import math
import sys
def top_extract(s):
top = []
for i in range (1,len(s)+1):
if s[i-1].lower() == 'top':
top.append(s[i])
return top
def base_extract(s):
base = []
for i in range (1,len(s)+1):
if s[i-1].lower() == 'base':
base.append(s[i])
return base
def middle_extract(s):
middle = []
for i in range (1,len(s)+1):
if s[i-1].lower() == 'middle':
middle.append(s[i])
return middle
def note_extract(s):
result = []
location = ['top', 'middle', 'base']
for i in range (1,len(s)+1):
for ll in location:
if s[i-1].lower() == ll:
result.append(s[i])
return result
# 去掉所有notes前面的tag以及top,middle,base
# 如果没有的 设为空 nan
def delete_note_tag(s):
s = re.split('-1|0|1|2|3|4|5|6|7|8|9|10|11|12|13|14|15|16|17|18|19|20',s)[1]
if len(s)==0:
s = np.nan
return s
def notes_table(ori_data):
data = ori_data.loc[:, ['title', 'notes_1', 'notes_2', 'notes_3', 'notes_4', 'notes_5',
'notes_6', 'notes_7', 'notes_8', 'notes_9', 'notes_10',
'notes_11', 'notes_12', 'notes_13', 'notes_14', 'notes_15',
'notes_16', 'notes_17', 'notes_18', 'notes_19', 'notes_20']]
split_data = data
split_data.fillna('-1', inplace=True)
for i in range(1, 21):
split_data['notes_{}'.format(str(i))] = data['notes_{}'.format(str(i))].apply(
lambda s: delete_note_tag(s))
note_in_perfume = pd.DataFrame(columns=['perfume_name', 'note_name'])
rows, cols = data.shape
# 处理所有的notes 对应好(note与perfume的对应关系
for row in range(0, rows):
cur_perfume = split_data['title'][row]
i = 1
while i < 21:
if pd.isnull(data['notes_{}'.format(str(i))][row]):
i = 21
else:
new = pd.DataFrame({'perfume_name': cur_perfume,
'note_name': data['notes_{}'.format(str(i))][row]}, index=[1])
note_in_perfume = note_in_perfume.append(new, ignore_index=True)
i += 1
# 将所有的note 放到集合中,-》得到一张note的表格
note_list = list(set(note_in_perfume['note_name'].tolist()))
note_table = pd.DataFrame(note_list, columns=['note_name'])
note_table.to_csv('nnnnew_note.csv', index=False)
note_in_perfume.to_csv('note_in_perfume.csv', index=False)
'''
data = ori_data['title']
for i in range(1, 21):
data['notes_{}'.format(str(i))] = data['notes_{}'.format(str(i))]
data = ori_data
split_data = data
split_data.fillna('-1', inplace=True)
for i in range(1, 21):
split_data['notes_{}'.format(str(i))] = data['notes_{}'.format(str(i))].apply(
lambda s: re.split('-1|0|1|2|3|4|5|6|7|8|9|10|11|12|13|14|15|16|17|18|19|20', s))
notes = split_data['notes_1']
for i in range(2, 21):
notes = notes + split_data['notes_{}'.format(str(i))]
notes = notes.apply(lambda s: list(filter(lambda x: x != '', s)))
# 提取了所有的的notes-》整合到一列里面
test_notes = notes.apply(note_extract)
#top_notes = notes.apply(top_extract)
#middle_notes = notes.apply(middle_extract)
#base_notes = notes.apply(base_extract)
'''
return
def perfume_table(original_data):
rows, cols = original_data.shape
data = pd.DataFrame(columns=['title', 'brand', 'date', 'image', 'description', 'target'])
data['title'] = data['title'].astype(np.str)
data['brand'] = original_data['brand']
data['date'] = original_data['date']
data['image'] = data['image'].astype(np.str)
data['description'] = data['description'].astype(np.str)
data['target'] = 0
# perfume_name, brand, date, image, description, target
# 处理title 去掉所有的for women/men 对应到target里面
target_dict = {'for women': 0,
'for men': 1,
'for women and men': 2}
for r in range(0, rows):
item = original_data['title'][r]
if 'for men' in item:
tt = target_dict['for men']
title = item[0:(item.find('for') - 1)]
data.loc[r, 'title'] = title
elif 'for women' in item:
if 'for women and men' in item:
tt = target_dict['for women and men']
else:
tt = target_dict['for women']
title = item[0:(item.find('for') - 1)]
data.loc[r, 'title'] = title
else:
tt = 3
data.loc[r, 'title'] = title
data.loc[r, 'target'] = tt
data['target'] = data['target'].astype(dtype=int)
data.rename(columns={'title': 'perfume_name'}, inplace=True)
data.to_csv('nnnnew_perfume.csv', index = False)
return
# 将csv数据全部变成sql 的insert语句
def insert_perfume_data_into_sql():
pp_index = pd.read_csv('/Users/woody/UNSW-MIT/21T2-COMP9900/pp_index.csv')
pp_df = pp_index[['Unnamed: 0', 'perfume_name', 'brand', 'date', 'target']]
d = pp_df.values.tolist()
k_list = [0, 10000, 20000, 30000, 40000, 51212]
k = 0
while k in range(0, 5):
k_1 = k_list[k]
k_2 = k_list[k + 1]
result = 'INSERT INTO ttperfume(ttperfume_id, ttperfume_name, ttbrand, ttdate, tttarget) VALUES'
i = k_1
while i in range(k_1, k_2):
if pd.isna(d[i][1]):
d[i][1] = d[i][2]
if "'" in d[i][1]:
d[i][1] = d[i][1].replace("'", "''")
if "'" in d[i][2]:
d[i][2] = d[i][2].replace("'", "''")
if i != k_2 - 1:
dd = '(' + str(d[i][0]) + ", '" + str(d[i][1]) + "', '" + str(d[i][2]) + "', " + str(
d[i][3]) + ", " + str(d[i][4]) + '),'
else:
dd = '(' + str(d[i][0]) + ", '" + str(d[i][1]) + "', '" + str(d[i][2]) + "', " + str(
d[i][3]) + ", " + str(d[i][4]) + ');'
result = result + dd
i += 1
# result = result.replace('"',"'",10086)
name = 'ttttpp_index_' + str(k_1) + '_' + str(k_2) + 'k.txt'
fh = open(name, 'w')
fh.write(result)
fh.close()
k += 1
return
# note_in_perfume 处理
# 去重 -》 将csv变成insert语句(str)
def process_n_in_p():
note_df = pd.read_csv('/Users/woody/UNSW-MIT/21T2-COMP9900/note_index.csv')
nn = note_df.set_index('note_name')
note_dic = nn.to_dict()['Unnamed: 0']
# key: perfume_name value:perfume_id
pp1 = pd.read_csv('/Users/woody/UNSW-MIT/21T2-COMP9900/perfume_for_index.csv')
pp12 = pp1.set_index('title')
pp_dic = pp12.to_dict()['Unnamed: 0']
# key: perfume_id value:perfume_name 用于检验用的
pp22 = pp1.set_index('Unnamed: 0')
p2_dic = pp22.to_dict()['title']
n_in_p = pd.read_csv('/Users/woody/UNSW-MIT/21T2-COMP9900/note_in_perfume.csv')
np_index = pd.DataFrame(columns=['perfume_id', 'note_id'])
for r in range(0, n_in_p.shape[0]):
pp = n_in_p['perfume_name'][r]
nn = n_in_p['note_name'][r]
pi = pp_dic[pp]
ni = note_dic[nn]
# 重要!!先创建一个DataFrame,用来增加进数据框的最后一行
new = pd.DataFrame({'perfume_id': pi,
'note_id': ni},
index=[1]) # 自定义索引为:1 ,这里也可以不设置index
np_index = np_index.append(new, ignore_index=True)
#将csv进行保存
np_index.to_csv('np_index.csv')
# 如果是同一个p——id 就加到同一个list里面
# 如果pid变化,前一个list-》set 然后全部加到txt里面
ex_p = np_index['perfume_id'][0]
ex_n = np_index['note_id'][0]
cur_pn_list = [ex_n]
nn_pp = pd.DataFrame(columns=['perfume_id', 'note_id'])
for r in range(1, np_index.shape[0]):
# for r in range(1,30):
cur_p = np_index['perfume_id'][r]
cur_n = np_index['note_id'][r]
if ex_p == cur_p:
cur_pn_list.append(cur_n)
else:
aset = list(set(cur_pn_list))
cur_pn_list = [cur_n]
# print(ex_p)
# print(aset)
for ni in aset:
new = pd.DataFrame({'perfume_id': ex_p, 'note_id': ni}, index=[1])
nn_pp = nn_pp.append(new, ignore_index=True)
ex_p = cur_p
nn_pp.to_csv('nn_pp.csv')
np_list = nn_pp.values.tolist()
for k1 in range(0, len(np_list), 50000):
k2 = k1 + 50000
result = 'INSERT INTO note_in_perfume(perfume_id, note_id) VALUES'
for i in range(k1, k2):
le = len(str(np_list[i]))
q = '(' + str(np_list[i])[1:le - 1] + ')'
result = result + q
if i != k2 - 1:
result = result + ','
else:
result = result + ';'
name = '50nip_' + str(k1 / 10000) + '_' + str(k2 / 10000) + 'w.txt'
fh = open(name, 'w')
fh.write(result)
fh.close()
k1 = 350000
k2 = len(np_list)
result = 'INSERT INTO note_in_perfume(perfume_id, note_id) VALUES'
for i in range(k1, k2):
le = len(str(np_list[i]))
q = '(' + str(np_list[i])[1:le - 1] + ')'
result = result + q
if i != k2 - 1:
result = result + ','
else:
result = result + ';'
name = '50nip_' + str(k1 / 10000) + '_' + str(k2 / 10000) + 'w.txt'
fh = open(name, 'w')
fh.write(result)
fh.close()
return
if __name__ == '__main__':
operation = sys.argv[1]
original_perfume = pd.read_csv('perfume.csv')
###########################
# 处理notes表
if operation == 'notes_table':
notes_table(original_perfume)
if operation == 'perfume_table':
perfume_table(original_perfume)
| 2.828125 | 3 |
calipso/tools/tooltip.py | NASA-DEVELOP/vocal | 18 | 12788275 | ######################################
# tooltip.py
# @author: <NAME>
# 6/24/2015
######################################
from Tkinter import Toplevel, TclError, Label, LEFT, SOLID
class ToolTip(object):
"""
Displays text in a label below a passed widget
:param widget: The widget tooltip will be binding text to
"""
def __init__(self, widget):
self.widget = widget
self.tipWindow = None
self.x = self.y = 0
self.text = ''
# noinspection PyProtectedMember
def show_tip(self, text):
"""
Create and pack the tooltip, bound to the ``'<Enter>'`` event when
:py:func:`createToolTip` is called
:param str text: string to place inside label
"""
self.text = text
if self.tipWindow or not self.text:
return
x, y, cx, cy = self.widget.bbox('insert') # @UnusedVariable
# Larger button should have the tip placed lower
if self.widget.winfo_height() > 70:
x = x + self.widget.winfo_rootx() + 50
y = y + cy + self.widget.winfo_rooty() + 50
else:
x = x + self.widget.winfo_rootx() + 27
y = y + cy + self.widget.winfo_rooty() + 27
self.tipWindow = tw = Toplevel(self.widget)
tw.wm_overrideredirect(1)
tw.wm_geometry('+%d+%d' % (x, y))
try:
# For Mac OS
tw.tk.call('::Tk::unsupported::MacWindowStyle',
'style', tw._w,
'help', 'noActivates')
except TclError:
pass
label = Label(tw, text=self.text, justify=LEFT,
background='#ffffe0', relief=SOLID, borderwidth=1,
font=('tahoma', '8', 'normal'))
label.pack(ipadx=1)
def hide_tip(self):
"""
Hide or destroy the tool tip label when the mouse leaves widget.
Bound to the ``'<Leave>'`` event when :py:func:`createToolTip` is called
"""
tw = self.tipWindow
self.tipWindow = None
if tw:
tw.destroy()
def create_tool_tip(widget, text):
"""
Create an instance of :py:class:`ToolTip` and bind the ``'<Enter>'`` and
``'<Leave>'`` events for displaying to the widget passed
:param widget: the widget for the tooltip to be displayed below
:param str text: text contained in the tooltip
"""
tool_tip = ToolTip(widget)
# noinspection PyUnusedLocal
def enter(event):
tool_tip.show_tip(text)
# noinspection PyUnusedLocal
def leave(event):
tool_tip.hide_tip()
widget.bind('<Enter>', enter)
widget.bind('<Leave>', leave)
| 3.625 | 4 |
external/kit_sch_ge/config.py | sjeknic/CellST | 0 | 12788276 | from pathlib import Path
def get_project_path():
return Path(__file__).parent
def get_data_path():
project_path = get_project_path()
parent_dir = project_path.parent
return parent_dir / 'data'
def get_results_path():
project_path = get_project_path()
parent_dir = project_path.parent
return parent_dir / 'results'
# get string path
def string_path(path_arg):
if not isinstance(path_arg, str):
if hasattr(path_arg, 'as_posix'):
path_arg = path_arg.as_posix()
else:
raise TypeError('Cannot convert variable to string path')
else:
path_arg = path_arg.replace('\\', '/')
return path_arg
image_formats = ('bmp', 'jpeg', 'tif', 'png', 'tiff')
| 2.84375 | 3 |
models/two_d/alexnet.py | lykzsm/Pytorch-Medical-Classification | 37 | 12788277 | <reponame>lykzsm/Pytorch-Medical-Classification
import torch.nn as nn
import torch.nn.functional as F
import torch
class AlexNet(nn.Module):
def __init__(self, num_classes=1000):
super(AlexNet, self).__init__()
self.features = nn.Sequential(
nn.Conv2d(1, 64, kernel_size=11, stride=4, padding=2),
nn.ReLU(inplace=True),
nn.MaxPool2d(kernel_size=3, stride=2),
nn.Conv2d(64, 192, kernel_size=5, padding=2),
nn.ReLU(inplace=True),
nn.MaxPool2d(kernel_size=3, stride=2),
nn.Conv2d(192, 384, kernel_size=3, padding=1),
nn.ReLU(inplace=True),
nn.Conv2d(384, 256, kernel_size=3, padding=1),
nn.ReLU(inplace=True),
nn.Conv2d(256, 256, kernel_size=3, padding=1),
nn.ReLU(inplace=True),
nn.MaxPool2d(kernel_size=3, stride=2),
)
self.classifier = nn.Sequential(
nn.Dropout(),
nn.Linear(256 * 6 * 6, 4096),
nn.ReLU(inplace=True),
nn.Dropout(),
nn.Linear(4096, 4096),
nn.ReLU(inplace=True),
nn.Linear(4096, num_classes),
)
def forward(self, x):
x = self.features(x)
x = x.view(x.size(0), 256 * 6 * 6)
x = self.classifier(x)
return x
def alexnet(num_classes):
return AlexNet(num_classes)
if __name__ == "__main__":
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
image_size = 224
x = torch.Tensor(1, 1, image_size, image_size)
x = x.to(device)
print("x size: {}".format(x.size()))
model = alexnet(num_classes=2).to(device)
out1 = model(x)
print("out size: {}".format(out1.size())) | 3 | 3 |
pdlearn/feature_selection/variance_threshold.py | lewisacidic/pandas-learn | 1 | 12788278 | #! /usr/bin/env python
# -*- coding: utf-8 -*-
#
# This file is part of pandas-learn
# https://github.com/RichLewis42/pandas-learn
#
# Licensed under the MIT license:
# http://www.opensource.org/licenses/MIT
# Copyright (c) 2015, <NAME> <<EMAIL>>
"""
pdlearn.feature_selection.variance_threshold
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Package adapting scikit-learn's variance threshold model.
"""
import sklearn.feature_selection
from ..adaptor import transformer, feature_property
# pylint: disable=C0111
@transformer
class VarianceThreshold(sklearn.feature_selection.VarianceThreshold):
variances_ = feature_property('variances')
@property
def target_names_(self):
""" Transformed features """
return self.feature_names_[self.variances_ > self.threshold]
| 3.09375 | 3 |
Conditional Statements/If.py | Adi-19/Python | 0 | 12788279 | <filename>Conditional Statements/If.py<gh_stars>0
# WAP to show the use of If statment
phone_balance = int(input("enter the balence"))
bank_balance = 100
print(phone_balance, bank_balance)
if phone_balance < 5:
phone_balance += 10
bank_balance -= 10
print(phone_balance, bank_balance)
#the lines that increment phone_balance and decrement bank_balance only execute
#if it is true that phone_balance is less than 5. If not, the code in this if block is simply skipped.
# if the balence is less than 5 more balence 10 is added and subtracted from bank balance
| 3.9375 | 4 |
pystorm/hal/neuromorph/graph/graph_object.py | Stanford-BIS/pystorm | 3 | 12788280 | from abc import ABC, abstractmethod
# GraphObject-specific exceptions
class GraphObjectError(Exception):
"""Base class for GraphObject Exceptions"""
name = "graph_object.py"
class ConnectionTypeError(GraphObjectError):
"""Exception to raise when connected GraphObject types are incompatible"""
def __init__(self, input_obj, output_obj):
self.input_obj = input_obj
self.output_obj = output_obj
self.message = (
str(type(self.input_obj)) + " can't be connected to " +
str(type(self.output_obj)) + ".\n" + "tried to connect" +
self.input_obj.get_label() + " to " + self.output_obj.get_label())
def __str__(self):
return self.message
class FanoutError(GraphObjectError):
"""Exception to raise when a GraphObject's fanout is too large"""
def __init__(self, obj, max_fanout=1):
self.obj = obj
self.fanout = len(obj.out_conns)
self.max_fanout = max_fanout
self.message = (
self.obj.get_label() + " of type " + str(type(self.obj)) +
" can only have max fanout of " + str(self.max_fanout) +
" but tried making fanout of " + str(self.fanout))
def __str__(self):
return self.message
class GraphObject(ABC):
def __init__(self, label):
self.label = label
# Dictionary keys are of the form (resource_name, target_graph_object).
# Resource_name is a string, and typically refers to the string __name__ of the stored Resource.
# target_graph_object is a GraphObject or None. When the stored resource is intrinsic, or isn't
# associated with any particular connection (e.g. TATFanout for a fanout to many outputs),
# target_graph_object is None, otherwise, it's the GraphObject of the target connected to by the Resource
self.resources = {}
# outgoing connections made from this GraphObject
self.out_conns = []
@abstractmethod
def __repr__(self):
pass
def __gt__(self, other_obj):
return self.label > other_obj.label
def get_label(self):
return self.label
def reinit_resources(self):
self.resources = {}
def __check_key(self, key):
"""Puts key to self.resources into proper form
parameters:
key: string or (string, GraphObject)
string is converted to (string, None) (for intrinsic resources)
returns:
(string, GraphObject)
"""
if isinstance(key, str):
key = (key, None)
elif isinstance(key, tuple) and len(key) == 2 and \
isinstance(key[0], str) and \
isinstance(key[1], GraphObject):
pass
else:
raise TypeError("key for _append_resource must be str or (str, GraphObject)")
return key
# we want to ensure we don't clobber any resources
def _append_resource(self, key, resource):
key = self.__check_key(key)
if key in self.resources:
logger.critical("tried to add the same resource key twice")
logger.critical(" to {} {}".format(self.label, self))
assert(False)
self.resources[key] = resource
def _get_resource(self, key):
key = self.__check_key(key)
return self.resources[key]
@abstractmethod
def create_intrinsic_resources(self):
pass
@abstractmethod
def _connect_from(self, src, src_resource_key, conn):
pass
@abstractmethod
def create_connection_resources(self):
pass
def _get_single_conn_out(self):
if len(self.out_conns) > 1:
raise FanoutError(self)
conn = self.out_conns[0]
tgt = conn.dest
return conn, tgt
def _check_conn_from_type(self, src, allowed_types):
if type(src).__name__ not in allowed_types:
raise ConnectionTypeError(src, self)
| 3.328125 | 3 |
Python/questions/BinaryTreePreorderTraversal/binary-tree-preorder-traversal-solution-2.py | udcymen/leetcode | 0 | 12788281 | <gh_stars>0
class TreeNode:
def __init__(self, val:int = 0, left:'TreeNode' = None, right:'TreeNode' = None):
self.val = val
self.left = left
self.right = right
# Solution 2: Iterative
def preorderTraversal(root: TreeNode) -> list[int]:
result = []
if not root:
return result
stack = [root]
while stack:
node = stack.pop()
while node:
result.append(node.val)
if node.right:
stack.append(node.right)
node = node.left
return result
if __name__ == "__main__":
root = TreeNode(1)
root.right = TreeNode(2)
root.right.left = TreeNode(3)
print(preorderTraversal(root)) | 3.859375 | 4 |
module/invite.py | lolotree11/pyscord | 14 | 12788282 | <filename>module/invite.py<gh_stars>10-100
import channels
import client
import general
import guild
import invite
import listener
import messages
import user
class Invite:
def __init__(self, invite, basic_header, api_version=9):
self.basic_header = basic_header
self.api_version = api_version
self.raw = invite
self.invite = invite
self.code = general.essai_element(invite, "code")
self.guild = guild.Guild(general.essai_element(invite, "guild"), api_version=self.api_version, basic_header=self.basic_header)
self.channel = channels.Channel(general.essai_element(invite, "channel"), api_version=self.api_version, basic_header=self.basic_header)
self.inviter = user.User(general.essai_element(invite, "inviter"), api_version=self.api_version, basic_header=self.basic_header)
self.target_type = general.essai_element(invite, "target_type")
self.target_user = user.User(general.essai_element(invite, "target_user"), api_version=self.api_version, basic_header=self.basic_header)
self.target_application = general.essai_element(invite, "target_application")
self.approximate_presence_count = general.essai_element(invite, "approximate_presence_count")
self.approximate_member_count = general.essai_element(invite, "approximate_member_count")
self.expires_at = general.essai_element(invite, "expires_at")
self.stage_instance = general.essai_element(invite, "stage_instance") | 2.3125 | 2 |
flex/ussd/screens/metadata.py | centergy/flex_ussd | 0 | 12788283 | <reponame>centergy/flex_ussd
import re
from flex.utils import text
from flex.utils.void import Void
from flex.utils.decorators import export
from flex.utils.metadata import BaseMetadata, metafield, get_metadata_class
from .. import exc
from ..namespaces import module_ussd_namespace, isvalid_namespace_name
__all__ = [
'metafield', 'get_metadata_class',
]
@export
class ScreenMetadata(BaseMetadata):
abstract = metafield(default=False)
@property
def screen(self):
return self.target
@metafield()
def label(self, value):
return value or text.startcase(str(self.name).rpartition('.')[2])
@metafield()
def description(self, value):
return value or self.target.__doc__
@metafield()
def name(self, value):
if self.abstract:
return value
value = value or text.snake(self.target.__name__)
if not isvalid_namespace_name(value, split=False):
raise exc.ScreenMetadataError(
'Invalid UssdScreen name <%s.%s(name="%s")>.'\
% (self.target.__module__, self.target.__name__, value)
)
return '%s.%s' % (self.namespace, value,) if self.namespace else value
@metafield(default=Void)
def namespace(self, value):
if self.abstract:
return None
if value is Void:
try:
value = module_ussd_namespace(self.target.__module__)
except exc.UssdNamespaceError as e:
raise exc.ScreenMetadataError(
'Invalid UssdScreen namespace %s in <%s.%s>.'\
% (value, self.target.__module__, self.target.__name__)
) from e
elif value is not None and not isvalid_namespace_name(value):
raise exc.ScreenMetadataError(
'Invalid UssdScreen namespace %s in <%s.%s>.'\
% (value, self.target.__module__, self.target.__name__)
)
return value
@metafield()
def state_attributes(self, value):
return set()
| 2.0625 | 2 |
tests/test_router.py | Bilonan/django-binder | 14 | 12788284 | <reponame>Bilonan/django-binder
from django.test import TestCase
from binder.exceptions import BinderNotFound
from binder.json import jsondumps
from binder.models import BinderModel
from binder.router import Router, Route, detail_route
from binder.views import ModelView
from django.urls.base import is_valid_path, clear_url_caches
from django.conf.urls import url, include
from . import urls_module
# Two unique local models, to use for view registration
from .testapp.models import Country
class FooModel(BinderModel):
class Meta(BinderModel.Meta):
app_label = 'test'
class BarModel(BinderModel):
class Meta(BinderModel.Meta):
app_label = 'test'
class RouterTest(TestCase):
def tearDown(self):
# Without this, tests can influence one another!
clear_url_caches()
def test_double_model_registration_triggers_error(self):
class ParentView(ModelView):
pass
class FooView1(ParentView):
model = FooModel
class FooView2(ParentView):
model = FooModel
with self.assertRaises(ValueError):
Router().register(ParentView)
def test_double_route_registration_triggers_error(self):
class ParentView(ModelView):
pass
class FooView(ParentView):
model = FooModel
route = 'myroute'
class BarView(ParentView):
model = BarModel
route = 'myroute'
with self.assertRaises(ValueError):
Router().register(ParentView)
def test_register_adds_default_routes_from_modelname(self):
class ParentView(ModelView):
pass
class FooView(ParentView):
model = FooModel
class BarView(ParentView):
model = BarModel
r = Router()
r.register(ParentView)
urls_module.urlpatterns = [url(r'^', include(r.urls))]
self.assertTrue(is_valid_path('/foo_model/', urls_module))
self.assertTrue(is_valid_path('/foo_model/1/', urls_module))
self.assertTrue(is_valid_path('/bar_model/12345/', urls_module))
self.assertFalse(is_valid_path('/bar_model/lalala/', urls_module))
self.assertFalse(is_valid_path('/another_model/', urls_module))
def test_register_adds_custom_route_names(self):
class ParentView(ModelView):
pass
class FooView(ParentView):
model = FooModel
route = 'foo'
class BarView(ParentView):
model = BarModel
# Explicit Route objects should also be accepted
route = Route('bar')
r = Router()
r.register(ParentView)
urls_module.urlpatterns = [url(r'^', include(r.urls))]
self.assertTrue(is_valid_path('/foo/', urls_module))
self.assertTrue(is_valid_path('/foo/1/', urls_module))
self.assertTrue(is_valid_path('/bar/12345/', urls_module))
# Default named routes should not be there
self.assertFalse(is_valid_path('/foo_model/1/', urls_module))
self.assertFalse(is_valid_path('/bar_model/1/', urls_module))
def test_register_obeys_custom_route_config(self):
class ParentView(ModelView):
pass
class FooView(ParentView):
model = FooModel
route = Route('foo', list_endpoint=False)
class BarView(ParentView):
model = BarModel
route = Route('bar', detail_endpoint=False)
r = Router()
r.register(ParentView)
urls_module.urlpatterns = [url(r'^', include(r.urls))]
self.assertFalse(is_valid_path('/foo/', urls_module))
self.assertTrue(is_valid_path('/foo/1/', urls_module))
self.assertTrue(is_valid_path('/bar/', urls_module))
self.assertFalse(is_valid_path('/bar/1/', urls_module))
class TestFetchObj(TestCase):
def test_get_obj_turns_pk_in_object(self):
that = self
country = Country.objects.create(name='foo')
class RequestMock:
method='GET'
class Foo(ModelView):
model = Country
@detail_route('foo', methods=['GET'], fetch_obj=True, unauthenticated=True)
def foo(self, request, obj):
that.assertTrue(isinstance(obj, Country))
that.assertEqual(country.pk, obj.pk)
return jsondumps({})
Foo().foo(RequestMock(), country.pk)
def test_get_obj_turns_kwarg_pk_in_object(self):
that = self
country = Country.objects.create(name='foo')
class RequestMock:
method = 'GET'
class Foo(ModelView):
model = Country
@detail_route('foo', methods=['GET'], fetch_obj=True, unauthenticated=True)
def foo(self, request, obj):
that.assertTrue(isinstance(obj, Country))
that.assertEqual(country.pk, obj.pk)
return jsondumps({})
Foo().foo(RequestMock(), pk=country.pk)
def test_get_obj_raises_binder_not_exists_error(self):
class RequestMock:
method='GET'
class Foo(ModelView):
model = Country
@detail_route('foo', methods=['GET'], fetch_obj=True, unauthenticated=True)
def foo(self, request, obj):
return jsondumps({})
with self.assertRaises(BinderNotFound):
Foo().foo(RequestMock(), 5)
| 2.234375 | 2 |
passenger_wsgi.py | gijzelaerr/workflow-service | 32 | 12788285 | <filename>passenger_wsgi.py
from wes_service.wes_service_main import setup
application = setup()
| 0.957031 | 1 |
examples/helloworld.py | JettHu/mongo-orm | 0 | 12788286 | <reponame>JettHu/mongo-orm
from mongo_orm import Model
from mongo_orm import StringField
from mongo_orm import CommonField
class User(Model):
name = StringField('user_name', type_check=True)
test_field = CommonField('test', default=-9999)
| 2.1875 | 2 |
checks_available/check_solr.py | oddeyeco/oe-agent3 | 5 | 12788287 | <reponame>oddeyeco/oe-agent3
import lib.record_rate
import lib.puylogger
import lib.record_rate
import lib.getconfig
import lib.commonclient
import lib.basecheck
import json
solr_url = lib.getconfig.getparam('Solr', 'stats')
check_type = 'solr'
class Check(lib.basecheck.CheckBase):
def precheck(self):
try:
stats_json = json.loads(lib.commonclient.httpget(__name__, solr_url))
requests = ('delete', 'get', 'head', 'move', 'options', 'other', 'put', 'trace')
responses = ('1xx', '2xx', '3xx', '4xx', '5xx')
heapstats = ('committed', 'init', 'max', 'used')
sothreads = ('threads.count', 'threads.daemon.count')
garbage = ('gc.ConcurrentMarkSweep.count', 'gc.ConcurrentMarkSweep.time', 'gc.ParNew.count', 'gc.ParNew.time',
'gc.G1-Old-Generation.count','gc.G1-Old-Generation.time' ,'gc.G1-Young-Generation.count', 'gc.G1-Young-Generation.time')
for rqst in requests:
rqst_name = 'org.eclipse.jetty.server.handler.DefaultHandler.' + rqst + '-requests'
rqvalue= stats_json['metrics']['solr.jetty'][rqst_name]['count']
csrate = self.rate.record_value_rate('slr_'+rqst, rqvalue, self.timestamp)
self.local_vars.append({'name': 'solr_' + rqst + '_requests', 'timestamp': self.timestamp, 'value': csrate, 'check_type': check_type, 'chart_type': 'Rate'})
total_requests = 'org.eclipse.jetty.server.handler.DefaultHandler.requests'
trv = stats_json['metrics']['solr.jetty'][total_requests]['count']
rqrate = self.rate.record_value_rate('slr_total_requests', trv, self.timestamp)
self.local_vars.append({'name': 'solr_requests_all', 'timestamp': self.timestamp, 'value': rqrate, 'check_type': check_type, 'chart_type': 'Rate'})
for resp in responses:
resp_name = 'org.eclipse.jetty.server.handler.DefaultHandler.' + resp + '-responses'
csvalue = stats_json['metrics']['solr.jetty'][resp_name]['count']
csrate = self.rate.record_value_rate('slr_'+resp, csvalue, self.timestamp)
self.local_vars.append({'name': 'solr_' + resp + '_responses', 'timestamp': self.timestamp, 'value': csrate, 'check_type': check_type, 'chart_type': 'Rate'})
for hu in heapstats:
hu_name = 'memory.heap.' + hu
huvalue = stats_json['metrics']['solr.jvm'][hu_name]
self.local_vars.append({'name': 'solr_heap_' + hu, 'timestamp': self.timestamp, 'value': huvalue, 'check_type': check_type})
for nohu in heapstats:
nohu_name = 'memory.non-heap.' + nohu
nohuvalue = stats_json['metrics']['solr.jvm'][nohu_name]
self.local_vars.append({'name': 'solr_non_heap_' + nohu, 'timestamp': self.timestamp, 'value': nohuvalue, 'check_type': check_type})
for tr in sothreads:
trvalue = stats_json['metrics']['solr.jvm'][tr]
self.local_vars.append({'name': 'solr_' + tr.replace('.', '_').replace('_count', ''), 'timestamp': self.timestamp, 'value': trvalue, 'check_type': check_type})
for gc in garbage:
if gc in stats_json['metrics']['solr.jvm']:
gcvalue = stats_json['metrics']['solr.jvm'][gc]
self.local_vars.append({'name': 'solr_' + gc.replace('.', '_').replace('ConcurrentMarkSweep', 'CMS').lower(), 'timestamp': self.timestamp, 'value': gcvalue, 'check_type': check_type})
except Exception as e:
lib.puylogger.print_message(__name__ + ' Error : ' + str(e))
pass
| 2.078125 | 2 |
2020/day/16/tickets.py | mboos/advent-of-code | 0 | 12788288 | <gh_stars>0
# Lint as: python3
"""
Solution to https://adventofcode.com/2020/day/16
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from absl import app
from absl import flags
from dataclasses import dataclass
import re
FLAGS = flags.FLAGS
flags.DEFINE_string("input", None, "Input file.")
flags.mark_flag_as_required("input")
rule_pattern = re.compile(r'(?P<name>.+): (?P<min1>\d+)-(?P<max1>\d+) or (?P<min2>\d+)-(?P<max2>\d+)')
@dataclass(frozen=True)
class Rule:
name: str
min1: int
max1: int
min2: int
max2: int
def validate_value(self, value: int) -> bool:
return (self.min1 <= value and value <= self.max1) or (self.min2 <= value and value <= self.max2)
def validate_ticket(self, values: [int]) -> bool:
return any(map(self.validate_value, values))
def read_ticket(line):
return list(map(int, line.split(',')))
def main(argv):
if len(argv) > 2:
raise app.UsageError('Too many command-line arguments.')
read_section = 'rules'
rules = []
your_ticket = None
nearby_tickets = []
with open(FLAGS.input) as fp:
for line in fp:
if read_section == 'rules':
if 'your ticket:' in line:
read_section = 'your ticket'
continue
match = rule_pattern.match(line)
if match:
rules.append(
Rule(
match.group('name'),
int(match.group('min1')),
int(match.group('max1')),
int(match.group('min2')),
int(match.group('max2')),
))
elif read_section == 'your ticket':
if 'nearby tickets:' in line:
read_section = 'nearby tickets'
continue
if line.strip():
your_ticket = read_ticket(line)
elif read_section == 'nearby tickets':
nearby_tickets.append(read_ticket(line))
error_rate = 0
good_tickets = []
for ticket in nearby_tickets:
good = True
for value in ticket:
if not any(map(lambda r: r.validate_value(value), rules)):
error_rate += value
good = False
if good:
good_tickets.append(ticket)
print(f'Ticket scanning error rate: {error_rate}')
candidates = [set(rules) for r in rules]
for ticket in good_tickets:
for value, column_candidates in zip(ticket, candidates):
for rule in tuple(column_candidates):
if not rule.validate_value(value):
column_candidates.remove(rule)
# Like a sudoku, remove remaining candidates by process of elimination
while not all(len(c) == 1 for c in candidates):
for c in candidates:
if len(c) == 1:
rule = tuple(c)[0]
for other in candidates:
if other != c:
other.discard(rule)
candidates = [c.pop() for c in candidates]
departure_values = 1
for value, rule in zip(your_ticket, candidates):
if rule.name.startswith('departure'):
departure_values *= value
print(f'Product of departure values {departure_values}')
if __name__ == '__main__':
app.run(main)
| 2.875 | 3 |
boa-nimbus/lambda/UserUpdateHandlerFunction/index.py | moduspwnens/boa-chat | 12 | 12788289 | <filename>boa-nimbus/lambda/UserUpdateHandlerFunction/index.py<gh_stars>10-100
"""UserUpdateHandlerFunction
Updates a user's profile attributes.
"""
from __future__ import print_function
import os
import json
import boto3
import botocore
from apigateway_helpers.exception import APIGatewayException
from apigateway_helpers.headers import get_response_headers
cognito_idp_client = boto3.client("cognito-idp")
def lambda_handler(event, context):
print("Event: {}".format(json.dumps(event)))
if "warming" in event and "{}".format(event["warming"]).lower() == "true":
return {
"message": "Warmed!"
}
event["request-body"] = json.loads(event.get("body", "{}"))
new_email_address = event["request-body"].get("email-address", "")
if new_email_address == "":
raise APIGatewayException("Value for \"email-address\" must be specified in request body.", 400)
cognito_auth_provider_string = event["requestContext"]["identity"]["cognitoAuthenticationProvider"]
cognito_idp_name = cognito_auth_provider_string.split(",")[0]
user_pool_id = "/".join(cognito_idp_name.split("/")[1:])
cognito_user_pool_sub_value = cognito_auth_provider_string.split(",")[1].split(":")[2]
response = cognito_idp_client.list_users(
UserPoolId = user_pool_id,
AttributesToGet = [],
Filter = "sub = \"{}\"".format(cognito_user_pool_sub_value),
Limit = 1
)
cognito_user_pool_username = response["Users"][0]["Username"]
cognito_idp_client.admin_update_user_attributes(
UserPoolId = user_pool_id,
Username = cognito_user_pool_username,
UserAttributes = [
{
"Name": "email",
"Value": new_email_address
}
]
)
return {
"registration-id": cognito_user_pool_username,
"message": "E-mail address verification message sent."
}
def proxy_lambda_handler(event, context):
response_headers = get_response_headers(event, context)
try:
return_dict = lambda_handler(event, context)
except APIGatewayException as e:
return {
"statusCode": e.http_status_code,
"headers": response_headers,
"body": json.dumps({
"message": e.http_status_message
})
}
return {
"statusCode": 200,
"headers": response_headers,
"body": json.dumps(return_dict)
} | 2.328125 | 2 |
listings/middleware.py | darkismus/kompassi | 13 | 12788290 | <gh_stars>10-100
from django.conf import settings
from django.utils.deprecation import MiddlewareMixin
class ListingsMiddleware(MiddlewareMixin):
def process_request(self, request):
hostname = request.META['HTTP_HOST']
if hostname in settings.KOMPASSI_LISTING_URLCONFS:
request.urlconf = settings.KOMPASSI_LISTING_URLCONFS[hostname]
return None
| 1.828125 | 2 |
attic/shell/cmds/token.py | glycerine/pyg | 76 | 12788291 | # Copyright 2011 <NAME>. All rights reserved.
# Use of this source code is governed by a BSD-style
# license that can be found in the LICENSE file.
import sh
def token(*args):
for arg in args:
print "%s -> %s" % (arg, sh.tokenise(arg))
| 2.484375 | 2 |
src/verifier.py | athalonis/CCL-Verification-Environment | 2 | 12788292 | #!/usr/bin/env python
#Copyright (c) 2014, <NAME> <<EMAIL>>
#All rights reserved.
#
#Redistribution and use in source and binary forms, with or without
#modification, are permitted provided that the following conditions are met:
#
#* Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
#* Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
#THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
#AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
#IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
#DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
#FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
#DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
#SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
#CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
#OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
#OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
ccl_c_dir="ccl_single_pass_c_code/"
import subprocess
import signal
import glob
import os
import re
import numpy as np
import threading
from PIL import Image
from cola import ComponentLabeling as cola
from converter import Img_conv as Img_conv
class CompareCP:
def get_error_cnt (self):
return self.__error_cnt
def get_report (self):
return self.__report
def __init__(self, timeout, max_x, max_y, file_show=None):
self.__timeout = timeout
self.__report = ""
self.__error_cnt = 0
self.__c_box = []
# convert images
try:
os.mkdir(ccl_c_dir + "/img");
except:
None
for files in glob.glob(ccl_c_dir + "/img/*.pbm"):
os.remove(files)
destreg = re.compile(r".*/(.*)$")
file_chk=""
file_cnt=0
if file_show is None:
for files in glob.glob("../img/*.pbm"):
if files != "../img/sim_in.pbm" and files != "../img\\sim_in.pbm":
img = Img_conv(files, max_x, max_y, 0.5)
m = destreg.match(files)
if m.group(1) is not None:
file_cnt+=1
file_chk+="img/" + m.group(1) + "\n"
img.save(ccl_c_dir + "/img/" + m.group(1))
else:
img = Img_conv(file_show, max_x, max_y, 0.5)
m = destreg.match(file_show)
if m.group(1) is not None:
file_cnt+=1
file_chk+="img/" + m.group(1) + "\n"
img.save(ccl_c_dir + "/img/" + m.group(1))
f = open(ccl_c_dir + "/test_batch_01.txt", "w")
f.write(str(file_cnt) + "\n" + file_chk)
f.close()
del f
self.get_c_box()
if file_show is None:
for files in glob.glob("../img/*.pbm"):
if files != "../img/sim_in.pbm" and files != "../img\\sim_in.pbm":
file_cnt+=1
pycola = cola(files, max_x=max_x, max_y=max_y);
self.chk_file(files, pycola)
del pycola
else:
pycola = cola(file_show, max_x=max_x, max_y=max_y);
c_boxes=self.chk_file(file_show, pycola)
print((str(c_boxes)))
pycola.plot_sp_add('Boxes C', None, c_boxes)
def chk_file(self, files, pycola):
self.__report += "Check file: " + files + "\n"
py_boxes = pycola.get_boxes().copy()
c_boxes = {}
box_cnt = 0
for b in py_boxes:
((py_start_x, py_start_y), (py_end_x, py_end_y)) = py_boxes[b]
found = False
for bc in self.__c_box:
(stim_file, c_start_y, c_start_x, c_end_y, c_end_x) = bc
c_end_x -= 1
c_end_y -= 1
c_boxes[str(c_start_x) + str(c_start_y) + str(c_end_x) + str(c_end_y)] = ((c_start_x, c_start_y), (c_end_x, c_end_y))
box_cnt += 1
if stim_file == files[3:] and py_start_x == c_start_x and py_start_y == c_start_y and py_end_x == c_end_x and py_end_y == c_end_y:
found = True
self.__c_box.remove(bc)
break
if not found:
self.__report += "\033[91mError\033[0m" + " Python Box: ((" + str(py_start_x)
self.__report += ", " + str(py_start_y) + "), (" + str(py_end_x) + ", " + str(py_end_y) + ")"
self.__report += " not in C implementation\n"
self.__error_cnt += 1
for bc in self.__c_box:
(stim_file, c_start_y, c_start_x, c_end_y, c_end_x) = bc
c_end_x -= 1
c_end_y -= 1
if stim_file == files[3:]:
self.__report += "\033[91mError\033[0m" + " C Box: ((" + str(c_start_x)
self.__report += ", " + str(c_start_y) + "), (" + str(c_end_x) + ", " + str(c_end_y) + ")"
self.__report += " not in Python implementation\n"
self.__error_cnt += 1
del pycola
return c_boxes
def get_c_box(self):
c_box = C_parser()
c_box.start()
while not c_box.done:
c_box.event.wait(self.__timeout)
if not c_box.event.is_set():
break;
if not c_box.done:
self.__report += "\033[91mError\033[0m" + " Verification with C Code timedout\n"
self.__error_cnt += 1
else:
self.__c_box = c_box.getMessages()
del c_box
class CompareF:
def get_py_lable (self):
return self.__py_lable
def get_hdl_lable (self):
return self.__hdl_lable
def get_hdl_boxes (self):
return self.__hdl_boxes
def get_error_cnt (self):
return self.__error_cnt
def get_report (self):
return self.__report
def get_pycola(self):
return self.__pycola
def __init__(self, stim_file, passone, timeout, wdir, hdl_file, box_only,
resolution, max_x, max_y, continuous, run_only=False):
self.__timeout = timeout
self.__wdir = wdir
self.__max_x__ = max_x
self.__max_y__ = max_y
self.__passone__ = passone
self.__continuous__ = continuous
self.__resolution__ = resolution
self.__hdl_file__ = hdl_file
self.__stim_file__ = stim_file
self.__regmeta = re.compile(r".*metavalue detected.*")
self.__py_colas = {}
self.__py_lables = {}
self.__hdl_lables = {}
self.__px_boxes = {}
self.__hdl_boxes = {}
self.__report = ""
self.__error_cnt=0
if not run_only:
self.__prepare__()
else:
#write stimulus file
j = Image.fromarray(self.__stim_file__.astype(np.uint8))
j.mode = "1";
j.save("../img/sim_in.pbm")
del j
def __prepare__(self):
from cola import ComponentLabeling as cola
self.__pycola = cola(self.__stim_file__, max_x=self.__max_x__,
max_y=self.__max_y__);
#labels of first pass
if self.__passone__:
self.__py_lable = self.__pycola.get_lable_f()
else:
self.__py_lable = self.__pycola.get_lable_s()
#generate empty array to store results of vhdl output
self.__hdl_lable = -1*np.ones(self.__py_lable.shape, dtype=np.int)
if not self.__continuous__:
self.__py_colas[self.__stim_file__] = self.__pycola
self.__py_lables[self.__stim_file__] = self.__py_lable
self.__hdl_lables[self.__stim_file__] = self.__hdl_lable
#write test image file for vhdl
j = Image.fromarray(self.__pycola.get_img().astype(np.uint8))
j.mode = "1";
j.save("../img/sim_in.pbm")
del j
#if stim_file != "../img/sim_in.pbm":
# shutil.copy(stim_file, "../img/sim_in.pbm")
if not box_only:
self.verify_labels(self.__hdl_file__, self.__stim_file__,
self.__resolution__, self.__continuous__)
if not self.__passone__:
if self.__hdl_file__ == "tb_labeling":
self.run_boxes("tb_labeling_box", self.__stim_file__,
self.__resolution__, self.__continuous__)
elif self.__hdl_file__ == "tb_labeling_cont":
self.run_boxes("tb_labeling_box_cont", self.__stim_file__,
self.__resolution__, self.__continuous__)
else:
self.run_boxes(self.__hdl_file__, self.__stim_file__,
self.__resolution__, self.__continuous__)
def verify_labels(self, hdl_file, stim_file, resolution="ns", continuous=False):
vsim = VSIM_parser(hdl_file, "vhdl/", resolution)
vsim.start()
#compile some regex pattern
if continuous:
regline = re.compile(r"File: '([^']+)' Label: ([0-9]+).*")
else:
regline = re.compile(r"(Label:) ([0-9]+).*")
# index of picture
pos_x=0
pos_y=0
while not vsim.done:
vsim.event.wait(self.__timeout)
if not vsim.event.is_set():
break;
messages = vsim.getMessages()
for message in messages:
(time, severity, text) = message
if severity == "Note":
res = regline.match(text)
if res is None:
print(("unparsed text: " + text))
elif res.group(2) is not None:
label = int(res.group(2))
if continuous:
img_file = res.group(1)[3:]
stim_file = img_file
if img_file not in self.__py_lables:
pos_x = 0
pos_y = 0
self.__py_colas[img_file] = cola(stim_file, max_x=self.__max_x__, max_y=self.__max_y__);
self.__py_lables[img_file] = self.__py_colas[img_file].get_lable_s()
self.__hdl_lables[img_file] = -1*np.ones(self.__py_lables[img_file].shape, dtype=np.int)
if pos_y >= len(self.__py_lables[stim_file]):
self.__report += stim_file + ": additional pixel (x=" + str(pos_x) +", y=" + str(pos_y) +")\n"
self.__error_cnt += 1
else:
self.__hdl_lables[stim_file][pos_y][pos_x] = label
if self.__py_lables[stim_file][pos_y][pos_x] != label:
self.__report += ("\033[91mError\033[0m" + " File: "+ stim_file +" at pixel x=" + str(pos_x) + " y=" +
str(pos_y) + " expected: " + str(self.__py_lables[stim_file][pos_y][pos_x]) + " vhdl: " +
str(label) + " at time: " + str(time) + "\n")
self.__error_cnt += 1
pos_x = pos_x + 1
if pos_x == len(self.__py_lable[0]):
pos_y = pos_y + 1
pos_x = 0
elif res.group(2) is not None:
self.__report = "\033[91mError\033[0m" + "Unknown Message: " + text + "\n"
else:
metaval = self.__regmeta.match(text)
if not(severity == "Warning" and metaval is not None):
self.__report += severity + " " + text + "\n"
if severity != "Note" and severity != "Warning":
#self.__error_cnt += 1
None
#TODO report this seperately
if not vsim.done:
self.__report = self.__report + stim_file + ": Output of data reached timeout in 2-pass simulation. Simulation abort\n"
self.__error_cnt += 1
for files in self.__py_lables:
if len(self.__py_lables[files][0]) > pos_y and pos_x != 0:
self.__report = self.__report + files + ": Not all pixels processed. First unprocessed pixel: x=" + str(pos_x+1) + " y=" + str(pos_y+1) + "\n"
self.__error_cnt += 1
del vsim
def run_boxes(self, hdl_file, stim_file, resolution="ns",
continuous=False, compare=True):
vsim = VSIM_parser(hdl_file, self.__wdir, resolution)
vsim.start()
if continuous:
regline = re.compile(r"File: '([^']+)' Box: \(([0-9]+), ([0-9]+)\), \(([0-9]+), ([0-9]+)\).*|Box: (error).*")
else:
regline = re.compile(r"(Box): \(([0-9]+), ([0-9]+)\), \(([0-9]+), ([0-9]+)\).*|Box: (error).*")
cnt={}
if (stim_file not in self.__px_boxes) and compare:
self.__px_boxes[stim_file] = self.__py_colas[stim_file].get_boxes().copy()
self.__hdl_boxes[stim_file] = {}
cnt[stim_file] = 0
elif not compare:
self.__hdl_boxes[stim_file] = {}
cnt[stim_file] = 0
while not vsim.done:
vsim.event.wait(self.__timeout)
if not vsim.event.is_set():
break;
messages = vsim.getMessages()
for message in messages:
(time, severity, text) = message
#print ("test:" + str(message))
if severity == "Note":
res = regline.match(text)
if res is None:
print(("unparsed text: \""+text+ "\""))
elif res.group(6) is not None:
self.__error_cnt += 1
self.__report = "Recognised error with to small heap\n"
elif res.group(2) is not None:
img_file = res.group(1)[3:]
if continuous:
self.__px_boxes[img_file] = self.__px_boxes[stim_file]
self.__hdl_boxes[img_file] = self.__hdl_boxes[stim_file]
cnt[stim_file] = cnt[img_file]
stim_file = img_file
start_x = int(res.group(2))
start_y = int(res.group(3))
end_x = int(res.group(4))
end_y = int(res.group(5))
self.__hdl_boxes[stim_file][cnt[stim_file]] = ((start_x, start_y), (end_x, end_y))
cnt[stim_file] += 1
if compare:
found = False
for b in self.__px_boxes[stim_file]:
((py_start_x, py_start_y), (py_end_x, py_end_y)) = self.__px_boxes[stim_file][b]
if py_start_x == start_x and py_start_y == start_y and py_end_x == end_x and py_end_y == end_y:
found = True
del self.__px_boxes[stim_file][b]
break
if not found:
self.__report += "\033[91mError\033[0m" + " File: '" + stim_file
self.__report += "' VHDL found box ((" + str(start_x) + ", "
self.__report += str(start_y) + "), (" + str(end_x) + ", "
self.__report += str(end_y) + ")) but python not\n"
self.__error_cnt += 1
elif res.group(3) is not None:
self.__report = "\033[91mError\033[0m" + "Unknown Message: " + text
else:
metaval = self.__regmeta.match(text)
if not(severity == "Warning" and metaval is not None):
self.__report += severity + " " + text + "\n"
if severity != "Note" and severity != "Warning":
#self.__error_cnt += 1
#TODO: Report this separatly
None
if compare:
for f in self.__px_boxes:
if self.__px_boxes[f] != {}:
for b in self.__px_boxes[f]:
((start_x, start_y), (end_x, end_y)) = self.__px_boxes[f][b]
self.__report += "\033[91mError\033[0m" + " File: '" + f
self.__report += "' VHDL missing box ((" + str(start_x) + ", "
self.__report += str(start_y) + "), (" + str(end_x) + ", " + str(end_y) + "))\n"
self.__error_cnt += 1
if not vsim.done:
self.__report = self.__report + stim_file + ": Output of data reached timeout in simulation of 2-pass with boundbox calculation. Simulation abort\n"
self.__error_cnt += 1
del vsim
class Exec_parser(threading.Thread):
## Executes a binary file and parses the output
#
# You can use the event.wait to wait for new messages or the done signal
# The boolean done gives you the information if the simulation is done
# @param cmd command to execute
# @param cwd working directory
# @param regex used to parse the output of each line of stdout and
# use the result as parameter to run the eval_line
def __init__(self, cmd, cwd=".", regex = None):
super(Exec_parser, self).__init__()
self.__cmd = cmd;
self.__cwd = cwd
self.event = threading.Event()
self.__sema = threading.Semaphore()
self.__messages = []
self.done = False
self.__stop = False
# store parsed messages
# overwrite this values
self.__regline = re.compile(regex)
print(("Exec: " + str(cmd)))
print(("CWD: " + self.__cwd))
self.__proc = subprocess.Popen(cmd, stdout=subprocess.PIPE, cwd=self.__cwd)
def add_message(self, m):
with self.__sema:
self.__messages.append(m)
self.event.set()
## Get all Messages stored in the message queue
def getMessages(self):
with self.__sema:
ret_msg = self.__messages
self.__messages = []
self.event.clear()
return ret_msg
def __del__(self):
self.__stop = True
os.kill(self.__proc.pid, signal.SIGKILL)
## This methode has to evaluate the result of the regex for each line
# you need to overwrite this methode
def eval_line(self, res):
None
def run(self):
line = ' ';
while not self.__stop and line != '':
#apply precompile regex pattern
line = self.__proc.stdout.readline().decode()
res = self.__regline.match(line)
if res is not None:
self.eval_line(res)
# notify the event if done
with self.__sema:
self.event.set()
self.done = True
class VSIM_parser(Exec_parser):
vsim="vsim"
## Executes Modelsim and parses the output
#
# You can use the event.wait to wait for new messages or the done signal
# The boolean done gives you the information if the simulation is done
# @param hdl_entity entity wich should be executed
# @param cwd working directory this has to be the directory where the vlib is stored
def __init__(self, hdl_entity, cwd=".", resolution="ns"):
super(VSIM_parser, self).__init__([self.vsim, "-c", "-do", "run -all;quit", "-t", resolution, hdl_entity], cwd, r"# Time: ([0-9]+ [fpnum]s).*|# \*\* (Note|Warning|Error|Failure): (.*)")
self.__msg = []
## This methode has to evaluate the result of the regex for each line
def eval_line(self, res):
if res.group(1) is not None:
# this is the output of a time info
for m in self.__msg:
(severity, text) = m
self.add_message((res.group(1), severity, text))
self.__msg = []
else:
if res.group(2) is not None:
severity = res.group(2)
if res.group(3) is not None:
self.__msg.append((severity, res.group(3)))
class C_parser(Exec_parser):
## Executes Cpp Code and parses the output
#
# You can use the event.wait to wait for new messages or the done signal
# The boolean done gives you the information if the simulation is done
# @param cwd working directory this has to be the directory where the vlib is stored
def __init__(self, cwd=ccl_c_dir):
super(C_parser, self).__init__(["./ccl"], cwd, r"Processing file '([^']+)' and .*|Completed object:\[([0-9]+), ([0-9]+)\]x\[([0-9]+), ([0-9]+)\].*")
self.__file=""
## This methode has to evaluate the result of the regex for each line
def eval_line(self, res):
if res.group(1) is not None:
# filename of analyzed file
self.__file = res.group(1)
else:
if res.group(2) is not None and res.group(3) is not None and res.group(4) is not None and res.group(5) is not None :
self.add_message((self.__file, int(res.group(2)), int(res.group(3)), int(res.group(4)), int(res.group(5))))
if __name__== "__main__":
from optparse import OptionParser
parser = OptionParser()
parser.add_option("-s", "--sim-file", dest="sim_file",
help="filename for which the simulation should run and the result be visualized")
parser.add_option("-p", "--pass-one", dest="passone", action="store_true",
help="only the first pass will be analyzed otherwise the lables after the second pass and the boundboxes will be analyzed")
parser.add_option("-u", "--uart-tb", dest="uart_tb", action="store_true",
help="Simulates the uart_tb and compare the output with the python implementation")
parser.add_option("-n", "--continuous", dest="continuous", action="store_true",
help="Sends all Pictures in one continuous stream to the DUT")
parser.add_option("-t", "--timeout", dest="timeout", type="float", default=120.0,
help="seconds (as float) how long the time between two outputs before abort the simulation")
parser.add_option("-c", dest="c", action="store_true",
help="Checks the Python boundbox calculation against the cpp")
parser.add_option("-v", "--vhdl-dut", dest="v", action="store_true",
help="Checks the Python boundbox calculation against the vhdl DUT")
parser.add_option("--no-lables", dest="nl", action="store_true",
help="Don't check lables")
parser.add_option("-x", "--max-x", dest="max_x", type="int", default=32,
help="Max width of image send to ccl")
parser.add_option("-y", "--max-y", dest="max_y", type="int", default=32,
help="Max height of image send to ccl")
parser.add_option("-d", "--input-dir", dest="indir" , default="../img/",
help="Input dir used to check all Files")
parser.add_option("-e", "--file-extension", dest="fext", default="pbm",
help="File extension for the input dir run (default \"pbm\")")
(option, args) = parser.parse_args()
fext = option.fext
indir = option.indir
box_only = False
hdl_file = "tb_labeling"
resolution = "ns"
if option.uart_tb:
hdl_file = "tb_com_uart"
resolution = "ps"
box_only = True
if option.passone:
hdl_file = "tb_labeling_p1"
wdir="vhdl/"
if option.v:
wdir="vhdl/ccl_dut/"
box_only = True
if option.nl:
box_only = True
if (not option.c) and option.sim_file:
if option.passone:
comp_first=CompareF(option.sim_file, option.passone, option.timeout, wdir,
hdl_file, box_only, resolution, option.max_x,
option.max_y, False)
comp_first.get_pycola().plot_fp_add('First Pass HDL',
comp_first.get_hdl_lable())
else:
comp_first=CompareF(option.sim_file, False, option.timeout, wdir,
hdl_file, box_only, resolution, option.max_x,
option.max_y, False)
errors = comp_first.get_error_cnt()
print(str(errors) + " errors reported")
print("error report: \n" + comp_first.get_report())
if box_only:
boxes = comp_first.get_hdl_boxes()
if len(boxes) == 1:
for k in boxes:
comp_first.get_pycola().plot_sp_add('Boxes HDL', None, boxes[k])
elif len(boxes) == 0:
comp_first.get_pycola().plot_sp_add('Boxes HDL', None, None)
else:
print ("more outputs received than expected")
print((str(boxes)))
else:
boxes = comp_first.get_hdl_boxes()
if len(boxes) <= 1:
for k in boxes:
comp_first.get_pycola().plot_sp_add('Second Pass HDL',
comp_first.get_hdl_lable(), boxes[k])
elif len(boxes) == 0:
comp_first.get_pycola().plot_sp_add('Second Pass HDL',
comp_first.get_hdl_lable(), None)
else:
print ("more outputs received than expected")
print((str(boxes)))
else:
# run verification of all availible stimuli files and generate a report
# count errors
errors=0
chkdfiles=""
err_by_file={}
report=""
if option.c:
cmp_cp = CompareCP(option.timeout, option.max_x, option.max_y, option.sim_file)
errors = cmp_cp.get_error_cnt()
print((cmp_cp.get_report()))
elif option.continuous:
cnt = 0
filenames = ""
for files in glob.glob(indir + "/*." + option.fext):
if files != indir + "/sim_in." + fext and files != indir + "\\sim_in."+fext:
filenames += "../" + files + "\n"
cnt += 1
f = open("../img/continuous.files", 'w')
f.write(str(cnt) + "\n")
f.write(str(option.max_x) + "\n")
f.write(str(option.max_y) + "\n")
f.write(filenames)
f.close()
hdl_file="tb_labeling_cont"
comp_first=CompareF(files, option.passone, option.timeout, wdir, hdl_file,
box_only, resolution, option.max_x, option.max_y, True)
errors = errors + comp_first.get_error_cnt()
print((comp_first.get_report()))
else:
#run vhdl simulation for each input file
for files in glob.glob(indir + "/*."+fext):
if files != indir + "/sim_in." +fext and files != indir + "\\sim_in." +fext:
print(("\n\nStart verification with input of " + files+"\n"))
chkdfiles = chkdfiles + files +'\n'
comp_first=CompareF(files, option.passone, option.timeout, wdir,
hdl_file, box_only, resolution, option.max_x, option.max_y, False)
errors = errors + comp_first.get_error_cnt()
err_by_file[files] = comp_first.get_error_cnt()
print((comp_first.get_report()))
print("Verification with the following files:")
for filename in err_by_file:
if err_by_file[filename] == 0:
print(("\033[92m" + filename + "\033[0m"))
else:
print(("\033[91m" + filename + " errors: " + str(err_by_file[filename]) + "\033[0m"))
if errors == 0:
print("\033[92mVerification successful\033[0m")
else:
print(report)
print(("\033[91mVerification failed\033[0m with " + str(errors) + " errors"))
if wdir == "vhdl/ccl_dut/":
print(("The verification is only valid if you run ./mk_build.sh in "+wdir))
print("Don't forget to run ./mk_synthesis.sh before a synthesis run")
| 1.695313 | 2 |
archive/2016/week3/homework/xor.py | YAtOff/python0 | 6 | 12788293 | <filename>archive/2016/week3/homework/xor.py
"""
Дефинирайте функцията `xor` (изключващо ИЛИ), която има следната таблица на
истинност:
+----------------------+
|Input |Output|
+-------+-------+------|
|A |B | |
+-------+-------+------|
|False |False |False |
+-------+-------+------|
|False |True |True |
+-------+-------+------|
|True |False |True |
+-------+-------+------|
|True |True |False |
+-------+-------+------+
>>> xor(True, True)
False
>>> xor(True, False)
True
"""
def xor(a, b):
raise Exception('Not implemented')
| 3.828125 | 4 |
testing/vcsaddons/test_vcs_addons_pcoords.py | xylar/cdat | 62 | 12788294 |
import sys,os
data = sys.argv[1]
src = sys.argv[2]
import vcs.testing.regression as regression
import vcs
import vcsaddons, numpy
import cdms2
f = cdms2.open(data)
rms_xyt = f("rms_xyt")
ax1 = cdms2.createAxis(['0071-0100' ,'ACCESS1-0' ,'ACCESS1-3' ,'CCSM4' ,'CESM1-BGC' ,'CESM1-CAM5',
'CESM1-FASTCHEM' ,'CESM1-WACCM' ,'CSIRO-Mk3-6-0' ,'FGOALS-g2' ,'GFDL-CM3',
'GFDL-ESM2G' ,'GFDL-ESM2M' ,'HadGEM2-AO' ,'MIROC4h' ,'bcc-csm1-1',
'bcc-csm1-1-m'],id="models")
ax2 = cdms2.createAxis(['pr', 'prw', 'psl', 'rltcre', 'rlut', 'rstcre', 'ta-200', 'ta-850', 'tas', 'ua-200',
'ua-850', 'va-200', 'va-850', 'zg-500'],id="statistic")
rms_xyt.setAxisList([ax2,ax1])
x = regression.init(geometry=(1200,600))
import vcsaddons
bg = False
gm = vcsaddons.createparallelcoordinates(x=x)
t = vcs.createtemplate()
to=x.createtextorientation()
to.angle=-45
to.halign="right"
t.xlabel1.textorientation = to.name
t.reset('x',0.05,0.9,t.data.x1,t.data.x2)
#t.reset('y',0.5,0.9,t.data.y1,t.data.y2)
ln = vcs.createline()
ln.color = [[0,0,0,0]]
t.legend.line = ln
t.box1.priority=0
t.legend.x1 = .91
t.legend.x2 = .99
t.legend.y1 = t.data.y1
t.legend.y2 = t.data.y2
# Set variable name
rms_xyt.id = "RMS"
# Set units of each variables on axis
rms_xyt.getAxis(-2).units = ["mm/day","mm/day","hPa","W/m2","W/m2","W/m2", "K","K","K","m/s","m/s","m/s","m/s","m"]
# Sets title
rms_xyt.title = "Annual Mean Error"
gm.plot(rms_xyt,template=t,bg=bg)
print src
fnm = os.path.join(os.getcwd(), "testParallelCoordinates.png")
x.png(fnm)
ret = vcs.testing.regression.check_result_image(
fnm,
src)
sys.exit(ret)
| 2.03125 | 2 |
SST_doppler.py | CyclingNinja/SST_doppler_calc | 0 | 12788295 | <filename>SST_doppler.py
from __future__ import print_function, division
from sunkitsst.read_cubes import read_cubes
import matplotlib.pyplot as plt
import numpy as np
from astropy.modeling import models, fitting
from scipy.optimize import minimize
#from scipy.signal import argrelextrema
from scipy.special import gammaln
from skimage import img_as_float
from astropy.constants import c
# need to import the _fitter_to_model_params helper function
from astropy.modeling.fitting import _fitter_to_model_params
imfile = '/data_swat/arlimb/crispex.6563.icube'
spfile = '/data_swat/arlimb/crispex.6563.sp.icube'
wave_ind = np.loadtxt('spect_ind.txt')
imheader, icube, spheader, spcube = read_cubes(imfile,spfile)
SST_cad = 2.5
SST_pix = 0.059
l_core = 6562.8
class PoissonlikeDistr(object):
# Daniela: you actually want to input an astropy model here
# not a function
def __init__(self, x, y, model):
self.x = x
self.y = y
self.model = model
return
# Daniela: evaluate needs to take a list of parameters as input
def evaluate(self, params):
# Daniela: set the input parameters in the astropy model using the
# list of parameters in params
_fitter_to_model_params(self.model, params)
# Daniela: make the mean model
mean_model = self.model(x)
# Daniela: not sure what your 'x' in this log-likelihood is, but it should be
# the mean model
loglike = np.sum(-mean_model + self.y*np.log(mean_model) - gammaln(self.y + 1))
return loglike
# Daniela: __call__ needs to take self and params as input
def __call__(self, params):
# Daniela: __call__ should return the log-likelihood
return self.evaluate(params)
small_cube = np.array(icube[100:,:, 600:,450:570])
small_cube = img_as_float(small_cube)
dop_arr = np.zeros(small_cube[0, 0, :, :].shape)
param_arr = np.zeros(small_cube[:, 0, :, :].shape)
plt.ioff()
for T in range(small_cube.shape[0]):
# define the box to do it in
for xi in range(small_cube[0].shape[1]):
for yi in range(small_cube[0].shape[2]):
# flip the y axis data points
y = small_cube[T,:, xi, yi]
ysg = y[:]
ysg -= np.min(y)
x = wave_ind
# SINGLE GAUSSIAN FITTING
# this definitley works (ish)
ysg = ysg*-1 + np.max(y)
# Daniela: is there a reason why there are round brackets around the Gaussian model?
gaus_sing = models.Gaussian1D(amplitude=np.max(ysg), mean=x[19], stddev=np.std(ysg))
# Daniela: instantiate the log-likelihood object;
# please check whether it uses the right arrays for the data
loglike_sing = PoissonlikeDistr(x, ysg, gaus_sing)
# initial parameters
init_params_s = [np.max(ysg), x[19], np.std(ysg)]
# Daniela: for maximum likelihood fitting, we need to define the *negative*
# log-likelihood:
neg_loglike_sing = lambda x: -loglike_sing(x)
# Daniela: here's the optimization:
opt_sing = minimize(neg_loglike_sing, init_params_s,
method="L-BFGS-B", tol=1.e-10)
# Daniela: print the negative log-likelihood:
#print("The value of the negative log-likelihood: " + str(opt_sing.fun))
# Daniela: the parameters at the maximum of the likelihood is in opt.x:
fit_pars = opt_sing.x
# Daniela : now we can put the parameters back into the Gaussian model
_fitter_to_model_params(gaus_sing, fit_pars)
# Bayesian information criterion
# see also: https://en.wikipedia.org/wiki/Bayesian_information_criterion
# bic = -2*loglike + n_params * log(n_datapoints)
# note to myself: opt.fun is -loglike, so we'll just use that here
bic_sing = 2.*opt_sing.fun + fit_pars.shape[0]*np.log(x.shape[0])
# Daniela: from here on, you can do the same for the model with two Gaussians
# Then you can compare the two BICs for a slightly hacky way of model
# comparison
# DOUBLE GAUSSIAN FITTING
ydg = y[:]
Imax = np.max(ydg)
gaus_double = (models.Gaussian1D(amplitude=Imax, mean=x[12], stddev=0.2) +
models.Gaussian1D(amplitude=Imax, mean=x[24], stddev=0.2))
init_params_double = [np.max(ydg), x[12], np.std(ydg),
np.max(ydg), x[24], np.std(ydg)]
loglike_double = PoissonlikeDistr(x, ysg, gaus_double)
neg_loglike_doub = lambda x: -loglike_double(x)
opt_doub = minimize(neg_loglike_doub, init_params_double,
method="L-BFGS-B", tol=1.e-10)
loglike_doub = PoissonlikeDistr(x, ydg, gaus_double)
fit_pars_dg = opt_doub.x
_fitter_to_model_params(gaus_double, fit_pars_dg)
bic_doub = 2.*opt_doub.fun + fit_pars.shape[0]*np.log(x.shape[0])
# use the bic values to assign to fit again and calc the doppler array
if bic_doub < bic_sing:
fit_sing_g_2 = fitting.LevMarLSQFitter()
gs2 = fit_sing_g_2(gaus_sing, x, ysg)
gsg = lambda x: -1 * gs2(x)
ysg = ysg*-1
t_mean = gs2.mean.value
else:
fit_doub_g_2 = fitting.LevMarLSQFitter()
ydg = y[:]
gd2 = fit_doub_g_2(gaus_double, x, ydg)
res = minimize(gd2, [6562.8],
method='L-BFGS-B',
bounds=[[x[19 - 5], x[19 + 5]],])
t_mean = res.x
dop_arr[xi,yi] = t_mean
np.save('/storage2/jet/dop_arrs/dop_arr_{:03d}.npy'.format(T), dop_arr)
print('/storage2/jet/dop_arrs/dop_arr_{:03d}.npy')
# # revert to an interpolation to find the minima
# # need to keep the regualar orientation of the y dist
# if fit_g2.fit_info['param_cov'] is None:
#
# ydg = y[:]
# Imax = np.max(ydg)
#
# g_init = (models.Gaussian1D(amplitude=Imax, mean=x[12], stddev=0.2) +
# models.Gaussian1D(amplitude=Imax, mean=x[24], stddev=0.2))
# fit_gdg = fitting.LevMarLSQFitter()
# gdg = fit_gdg(g_init, x, ydg)
#
# res = minimize(gdg, [6562.8], method='L-BFGS-B', bounds=[[x[19 - 5], x[19 + 5]],])
# t_mean = res.x
# if ((t_mean[0] - l_core) > 1) | ((t_mean[0] - l_core) < -1):
# t_mean = l_core
# dop_arr[T,xi,yi] = t_mean
#np.save('/storage2/jet/SST/dopplergram.npy', dop_arr)
## Plot the data with the best-fit model
#plt.figure(figsize=(8,5))
#plt.plot(x, y, 'ko')
#plt.plot(x, g(x), label='Gaussian')
#plt.xlabel('Position')
#plt.ylabel('Flux')
#
#plt.plot(res.x, g(res.x), 'ro')
#plt.show()
| 2.375 | 2 |
pyrobolearn/control/pid.py | Pandinosaurus/pyrobolearn | 2 | 12788296 | <filename>pyrobolearn/control/pid.py<gh_stars>1-10
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# This defines the PID controller
class PID(object):
r"""Proportional-Integral-Derivative Controller
The PID control scheme is given by:
.. math: u(t) = K_p e(t) + K_i \int_0^t e(\tau) d\tau + K_d \frac{d e(t)}{dt}
where :math:`u(t)` is the controller output, :math:`K_p, K_i, K_d` are respectively the proportional, integral,
and derivative tuning gains (set by the user or an algorithm), :math:`e(t) = (x_{des} - x(t))` is the error
between the desired point :math:`x_{des}` and the current point :math:`x(t)`.
"""
def __init__(self, kp=0, kd=0, ki=0, dt=0.001):
"""
Initialize the PID controller
Args:
kp (float): proportional gain
kd (float): derivative gain
ki (float): integral gain
dt (float): time step
"""
self.kp = kp
self.kd = kd
self.ki = ki
self.dt = dt
self.errorI = 0
self.prev_error = 0
def compute(self, xd, x, dt=None):
"""
Compute the controller output using PID control scheme.
Args:
xd (float, array): desired point
x (float, array): current point
dt (float): time step
Returns:
float, array: control output
"""
error = xd - x
self.errorI += error
errorD = (error - self.prev_error) / dt
self.prev_error = error
u = self.kp * error + self.ki * self.errorI + self.kd * errorD
return u | 3.25 | 3 |
tests/test_class6.py | kinther/ansible_course | 14 | 12788297 | import re
import pytest
from pathlib import Path
from utilities import subprocess_runner, remove_ansible_warnings
TEST_CASES = [
"../class6/collateral/roles_test/test_pb1.yml",
"../class6/collateral/roles_test/test_pb2.yml",
"../class6/collateral/roles_test/test_pb3.yml",
"../class6/collateral/roles_test/test_pb4.yml",
"../class6/collateral/roles_test/test_pb5.yml",
"../class6/collateral/tasks/include_import_tags.yml",
"../class6/collateral/tasks/include_import_when.yml",
"../class6/collateral/tasks/include_tasks_loop.yml",
"../class6/collateral/tasks/standalone_pb.yml",
"../class6/collateral/tasks/standalone_pb2.yml",
"../class6/collateral/tasks/standalone_pb3.yml",
# Expected to fail
# "../class6/collateral/tasks/standalone_pb4.yml",
"../class6/collateral/vars/test_vars1.yml",
"../class6/collateral/vars/test_vars2.yml",
"../class6/collateral/vars/test_vars3.yml",
]
@pytest.mark.parametrize("test_case", TEST_CASES)
def test_runner_collateral(test_case):
path_obj = Path(test_case)
script = path_obj.name
script_dir = path_obj.parents[0]
cmd_list = ["ansible-playbook", script]
std_out, std_err, return_code = subprocess_runner(cmd_list, script_dir)
std_err = remove_ansible_warnings(std_err)
assert return_code == 0
assert std_err == ""
@pytest.mark.parametrize("exercise", ["exercise1a.yml", "exercise1b.yml"])
def test_class6_ex1a_1b(exercise):
base_path = "../class6/exercises/exercise1"
cmd_list = ["ansible-playbook", exercise]
std_out, std_err, return_code = subprocess_runner(cmd_list, exercise_dir=base_path)
std_err = remove_ansible_warnings(std_err)
assert std_err == ""
assert return_code == 0
assert "localhost : ok=9" in std_out
@pytest.mark.parametrize(
"tags,result",
[(None, "ok=13"), ("foo1", "ok=5"), ("foo2", "ok=5"), ("foo3", "ok=5")],
)
def test_class6_ex1c(tags, result):
base_path = "../class6/exercises/exercise1"
if tags:
cmd_list = ["ansible-playbook", "exercise1c.yml", "--tags", tags]
else:
cmd_list = ["ansible-playbook", "exercise1c.yml"]
std_out, std_err, return_code = subprocess_runner(cmd_list, exercise_dir=base_path)
std_err = remove_ansible_warnings(std_err)
assert std_err == ""
assert return_code == 0
assert result in std_out
@pytest.mark.parametrize("exercise", ["exercise2a.yml", "exercise2b.yml"])
def test_class6_ex2a_2b(exercise):
base_path = "../class6/exercises/exercise2"
cmd_list = ["ansible-playbook", exercise]
std_out, std_err, return_code = subprocess_runner(cmd_list, exercise_dir=base_path)
std_err = remove_ansible_warnings(std_err)
assert std_err == ""
assert return_code == 0
assert "localhost : ok=2" in std_out
@pytest.mark.parametrize(
"tags,result",
[(None, "ok=4"), ("foo1", "ok=2"), ("foo2", "ok=2"), ("foo3", "ok=2")],
)
def test_class6_ex2c(tags, result):
base_path = "../class6/exercises/exercise2"
if tags:
cmd_list = ["ansible-playbook", "exercise2c.yml", "--tags", tags]
else:
cmd_list = ["ansible-playbook", "exercise2c.yml"]
std_out, std_err, return_code = subprocess_runner(cmd_list, exercise_dir=base_path)
std_err = remove_ansible_warnings(std_err)
assert std_err == ""
assert return_code == 0
assert result in std_out
def test_class6_ex3():
"""Should be idempotent on the second execution."""
base_path = "../class6/exercises/exercise3"
cmd_list = ["ansible-playbook", "exercise3.yml"]
std_out, std_err, return_code = subprocess_runner(cmd_list, exercise_dir=base_path)
std_err = remove_ansible_warnings(std_err)
assert std_err == ""
assert return_code == 0
# Execute script again
std_out, std_err, return_code = subprocess_runner(cmd_list, exercise_dir=base_path)
std_err = remove_ansible_warnings(std_err)
assert std_err == ""
assert return_code == 0
assert re.search(r"^cisco1\s+:\s+ok=2.*skipped=1.*$", std_out, flags=re.M)
assert re.search(r"^cisco2\s+:\s+ok=2.*skipped=1.*$", std_out, flags=re.M)
assert re.search(r"^cisco5\s+:\s+ok=2.*skipped=1.*$", std_out, flags=re.M)
assert re.search(r"^cisco6\s+:\s+ok=2.*skipped=1.*$", std_out, flags=re.M)
def test_class6_ex4():
"""Should be idempotent on the second execution."""
base_path = "../class6/exercises/exercise4"
cmd_list = ["ansible-playbook", "exercise4.yml", "-f 12"]
std_out, std_err, return_code = subprocess_runner(cmd_list, exercise_dir=base_path)
std_err = remove_ansible_warnings(std_err)
assert std_err == ""
assert return_code == 0
# Execute script again
std_out, std_err, return_code = subprocess_runner(cmd_list, exercise_dir=base_path)
std_err = remove_ansible_warnings(std_err)
assert std_err == ""
assert return_code == 0
assert re.search(r"^cisco1\s+:\s+ok=2.*skipped=3.*$", std_out, flags=re.M)
assert re.search(r"^cisco2\s+:\s+ok=2.*skipped=3.*$", std_out, flags=re.M)
assert re.search(r"^cisco5\s+:\s+ok=2.*skipped=3.*$", std_out, flags=re.M)
assert re.search(r"^cisco6\s+:\s+ok=2.*skipped=3.*$", std_out, flags=re.M)
assert re.search(r"^arista5\s+:\s+ok=2.*skipped=3.*$", std_out, flags=re.M)
assert re.search(r"^arista6\s+:\s+ok=2.*skipped=3.*$", std_out, flags=re.M)
assert re.search(r"^arista7\s+:\s+ok=2.*skipped=3.*$", std_out, flags=re.M)
assert re.search(r"^arista8\s+:\s+ok=2.*skipped=3.*$", std_out, flags=re.M)
assert re.search(r"^nxos1\s+:\s+ok=2.*skipped=3.*$", std_out, flags=re.M)
assert re.search(r"^nxos2\s+:\s+ok=2.*skipped=3.*$", std_out, flags=re.M)
def test_class6_ex5():
"""Should be idempotent on the second execution."""
base_path = "../class6/exercises/exercise5"
cmd_list = ["ansible-playbook", "exercise5.yml", "-f 12"]
std_out, std_err, return_code = subprocess_runner(cmd_list, exercise_dir=base_path)
std_err = remove_ansible_warnings(std_err)
assert std_err == ""
assert return_code == 0
# Execute script again
std_out, std_err, return_code = subprocess_runner(cmd_list, exercise_dir=base_path)
std_err = remove_ansible_warnings(std_err)
assert std_err == ""
assert return_code == 0
assert re.search(r"^cisco1\s+:\s+ok=2.*skipped=2.*$", std_out, flags=re.M)
assert re.search(r"^cisco2\s+:\s+ok=2.*skipped=2.*$", std_out, flags=re.M)
assert re.search(r"^cisco5\s+:\s+ok=2.*skipped=2.*$", std_out, flags=re.M)
assert re.search(r"^cisco6\s+:\s+ok=2.*skipped=2.*$", std_out, flags=re.M)
assert re.search(r"^arista5\s+:\s+ok=2.*skipped=2.*$", std_out, flags=re.M)
assert re.search(r"^arista6\s+:\s+ok=2.*skipped=2.*$", std_out, flags=re.M)
assert re.search(r"^arista7\s+:\s+ok=2.*skipped=2.*$", std_out, flags=re.M)
assert re.search(r"^arista8\s+:\s+ok=2.*skipped=2.*$", std_out, flags=re.M)
assert re.search(r"^nxos1\s+:\s+ok=2.*skipped=2.*$", std_out, flags=re.M)
assert re.search(r"^nxos2\s+:\s+ok=2.*skipped=2.*$", std_out, flags=re.M)
def test_class6_ex6():
base_path = "../class6/exercises/exercise6"
cmd_list = ["ansible-playbook", "exercise6.yml"]
std_out, std_err, return_code = subprocess_runner(cmd_list, exercise_dir=base_path)
std_err = remove_ansible_warnings(std_err)
assert std_err == ""
assert return_code == 0
assert re.search(r"^cisco5\s+:\s+ok=1 ", std_out, flags=re.M)
assert re.search(r"^cisco6\s+:\s+ok=1 ", std_out, flags=re.M)
| 2.109375 | 2 |
pyop4/opbase/__init__.py | cindy0123/duty-util01 | 0 | 12788298 | #
from opmsg import opmsg
from opfilehandle import iterfind, is_writeable, is_readable, tailf, filetail, myreadline, md5sum
from pypyscreen import getoutput, getstderr
from openv import op4env
from opErrCtl import opErrCtl
from opgit import basegit
from opfileformat import opfileformat
from opconfig import opconfig
from opstage import opstage
from opflowcontrol import opflowcontrol
from opxls import opxls
| 1.390625 | 1 |
myshop/cart/cart.py | Devenc234/balajiemitra | 0 | 12788299 | from decimal import Decimal
from django.conf import settings
from shop.models import Product
from coupons.models import Coupon
class Cart(object):
def __init__(self, request):
"""
Initialize the cart
:param request:
"""
self.session = request.session
cart = self.session.get(settings.CART_SESSION_ID)
if not cart:
# save an empty cart in the session
cart = self.session[settings.CART_SESSION_ID] = {}
self.cart = cart
# store current applied coupon
self.coupon_id = self.session.get('coupon_id')
def add(self, product, quantity=1, update_quantity=False):
"""
Add a product to the cart or update quantity of cart
:param product: product which needs to be added
:param quantity: by default 1
:param update_quantity: be default False
:return:
"""
# We use product_id to remember what has been added to cart till now.
# We have converted product_id to string because django uses json to serialize session data
# and json allow only string string for keys. for value part, we can put integers but not decimal
product_id = str(product.id)
if product_id not in self.cart:
self.cart[product_id] = {'quantity': 0,
'price': str(product.price)}
if update_quantity:
self.cart[product_id]['quantity'] = quantity
else:
self.cart[product_id]['quantity'] += quantity
self.save()
def save(self):
# update the session cart
self.session[settings.CART_SESSION_ID] = self.cart
# mark the session as "modified" to make sure it is saved
self.session.modified = True
def remove(self, product):
"""
Remove a product from the cart
:param product: product object which need to removed
:return:
"""
product_id = str(product.id)
if product_id in self.cart:
del self.cart[product_id]
self.save()
def __iter__(self):
"""
Iterate over the product_id in the cart and get the product form the backend
:return:
"""
product_ids = self.cart.keys()
# get the product objects and add them to the cart
products = Product.objects.filter(id__in=product_ids)
for product in products:
self.cart[str(product.id)]['product'] = product
for item in self.cart.values():
item['price'] = Decimal(item['price'])
item['total_price'] = item['price']*item['quantity']
yield item
def __len__(self):
"""
count all the items in the cart
:return:
"""
return sum(item['quantity'] for item in self.cart.values())
# TODO: Need to change UI for get_total_price or get_total_price_after_discount after adding discount
# currently for cart detail and invoice, after applying discount, old total cost printed
def get_total_price(self):
# Because we are not using iter method here, so price comes as string. we have to convert price to decimal
return sum(item['quantity']*Decimal(item['price']) for item in self.cart.values())
def clear(self):
"""
To empty the cart
:return:
"""
self.session[settings.CART_SESSION_ID] = {}
self.session.modified = True
@property
def coupon(self):
if self.coupon_id:
return Coupon.objects.get(id=self.coupon_id)
return None
def get_discount(self):
if self.coupon:
return (self.coupon.discount / Decimal('100')) \
* self.get_total_price()
return Decimal('0')
def get_total_price_after_discount(self):
return self.get_total_price() - self.get_discount()
| 2.640625 | 3 |
huxley/core/migrations/0043_auto_20200820_1401.py | bmun/huxley | 18 | 12788300 | # Generated by Django 2.2.6 on 2020-08-20 14:01
from django.db import migrations, models
import django.utils.timezone
class Migration(migrations.Migration):
dependencies = [
('core', '0042_conference_treasurer'),
]
operations = [
migrations.AddField(
model_name='conference',
name='waiver_avail_date',
field=models.DateField(default=django.utils.timezone.now),
preserve_default=False,
),
migrations.AddField(
model_name='conference',
name='waiver_deadline',
field=models.DateField(default=django.utils.timezone.now),
preserve_default=False,
),
migrations.AddField(
model_name='conference',
name='waiver_link',
field=models.CharField(default='www.bmun.org', max_length=300),
preserve_default=False,
),
]
| 1.640625 | 2 |
nodenet/python/nodenet/variables/nodenet.py | NOOXY-research/NodeNet | 2 | 12788301 | <filename>nodenet/python/nodenet/variables/nodenet.py
# nodenet/variables/nodenet.py
# Description:
# "nodenet.py" provideinformation of current version of nodenet.
# Copyright 2018 NOOXY. All Rights Reserved.
nodenet = {
'Version' : 'NodeNetPy 0.0.0',
'Author' : [
'<NAME>',
],
'Company' : 'NOOXY inc.',
'Copyright' : 'Copyright(c)2017-2019 NOOXY inc. Taiwan.',
'Website' : 'http://nooxy.org',
}
| 1.296875 | 1 |
hypha/apply/review/options.py | killapop/hypha | 20 | 12788302 | from django.utils.translation import gettext as _
NA = 99
RATE_CHOICES = (
(0, _('0. Need more info')),
(1, _('1. Poor')),
(2, _('2. Not so good')),
(3, _('3. Is o.k.')),
(4, _('4. Good')),
(5, _('5. Excellent')),
(NA, _('n/a - choose not to answer')),
)
RATE_CHOICES_DICT = dict(RATE_CHOICES)
RATE_CHOICE_NA = RATE_CHOICES_DICT[NA]
NO = 0
MAYBE = 1
YES = 2
RECOMMENDATION_CHOICES = (
(NO, _('No')),
(MAYBE, _('Maybe')),
(YES, _('Yes')),
)
DISAGREE = 0
AGREE = 1
OPINION_CHOICES = (
(AGREE, _('Agree')),
(DISAGREE, _('Disagree')),
)
PRIVATE = 'private'
REVIEWER = 'reviewers'
VISIBILILTY_HELP_TEXT = {
PRIVATE: _('Visible only to staff.'),
REVIEWER: _('Visible to other reviewers and staff.'),
}
VISIBILITY = {
PRIVATE: _('Private'),
REVIEWER: _('Reviewers and Staff'),
}
| 2.171875 | 2 |
src/clustering.py | krishnaShreedhar/Compositional-embedding-for-speaker-diarization | 0 | 12788303 | """
Simple audio clustering
1. Get the embeddings - at an interval of 0.5s each
2. Get the VAD - variable interval
3. Get embeddings for a VAD interval -> Take average of the embeddings
4. Get the ground truth for embedding for each speaker - marked 0.5s interval
5. L2 Normalize the embeddings before taking a distance measure
6. Clustering - Speaker Verification Task
1. Fix the ground truth embedding as the centroid for each speaker
2. Cluster all the points to the closest centroid
3. Verify the output
"""
import os
import argparse
import json
import yaml
import pickle
import numpy as np
import pandas as pd
import utils
import isat_diarization as isat_d
import constants
def dist_emb(emb_1, emb_2, dist_type="euclid"):
"""
Distance between two embeddings
"""
dist = None
if dist_type == "euclid":
# Euclidean distance
dist = np.linalg.norm(emb_1 - emb_2)
elif dist_type == "cosine":
# Cosine similarity
dist = np.dot(emb_1, emb_2) / (np.linalg.norm(emb_1) * np.linalg.norm(emb_2))
return dist
def cluster_gt(embeddings, vad, dict_gt_speakers):
dict_clusters = {
val: {
"embedding_id": key,
"embedding_val": embeddings[key],
} for key, val in dict_gt_speakers.items()
}
list_emb = [(dict_gt_speakers[key], embeddings[key]) for key, val in dict_gt_speakers.items()]
labels = []
for emb_index, emb_actual in enumerate(embeddings):
min_dist = np.inf
label = "NoSpeaker"
for speaker, emb_ref in list_emb:
dist = dist_emb(emb_ref, emb_actual)
if min_dist > dist:
min_dist = dist
label = speaker
labels.append(label)
return labels
def normalize_embeddings(embeddings):
"""
https://numpy.org/doc/stable/reference/generated/numpy.linalg.norm.html
"""
l2_norm = np.linalg.norm(embeddings, ord=2)
return embeddings
def get_embeddings(audio_path, dir_target, src="gen"):
"""
:param src: "gen" for generate, "file" for read from file
"""
embeddings = None
if src == "gen":
print(f"Generating embeddings")
embeddings = isat_d.gen_embeddings(audio_path, dir_target)
elif src == "file":
embeddings_path = os.path.join(dir_target, "embeddings.pkl")
with open(embeddings_path, "rb") as fh:
embeddings = pickle.load(fh)
print(f"Loaded embeddings from: {embeddings_path}")
print(f"embeddings: type: {type(embeddings)}")
embeddings_data = embeddings.data
return embeddings_data
def get_vad(vad_path):
with open(vad_path, "rb") as fh:
vad = json.load(fh)
print(f"Loaded vad from: {vad_path}")
print(f"vad: type: {type(vad)}")
return vad
def get_gt_emb():
dict_gt = {
0: "A",
20: "B",
30: "C",
}
return dict_gt
def yml_dump():
import yaml
dict_gt = {
0: {
"audio_path": "x.wav",
"output_path": "../outputs",
"num_speakers": 2,
"ground_truths": [
{
"start": 2.1,
"end": 3.1,
"id": 123,
"name": "Krishna"
},
{
"start": 4.4,
"end": 7.1,
"id": 500,
"name": "Gauranga"
}
]
},
1: {
"audio_path": "y.wav",
"output_path": "../outputs",
"num_speakers": 2,
"ground_truths": [
{
"start": 2.1,
"end": 3.1,
"id": 123,
"name": "Krishna"
},
{
"start": 4.4,
"end": 7.1,
"id": 500,
"name": "Gauranga"
}
]
}
}
with open("../data/spkr_diarization_gt_temp.yml", "w") as fh:
yaml.dump(dict_gt, fh)
def round_off_embedding(start_time, float_embed_width=0.5):
"""Round a number to the closest half integer.
round_off_embedding(1.3)
1.5
round_off_embedding(2.6)
2.5
round_off_embedding(3.0)
3.0
round_off_embedding(4.1)
4.0
round_off_embedding(4.1, 0.25)
4.0
"""
reciprocal = int(1 / float_embed_width)
embed_id = round(start_time * reciprocal) / reciprocal
embed_id = round(start_time * reciprocal)
return embed_id
def get_embed_from_start_end(dict_all_gt):
dict_all_embed_gt = {}
for file_index, dict_gt in dict_all_gt.items():
dict_embed_gt = {
"ground_truths": [],
"audio_path": dict_gt["audio_path"],
"output_path": dict_gt["output_path"],
"num_speakers": dict_gt["num_speakers"]
}
list_ground_truths = []
for spkr_index, dict_spkr in enumerate(dict_gt["ground_truths"]):
start = dict_spkr["start"]
# end = dict_spkr["end"]
# id = dict_spkr["id"]
# name = dict_spkr["name"]
embed_start_id = round_off_embedding(start)
dict_gt = {
"embed_start_id": embed_start_id,
"id": dict_spkr["id"],
"name": dict_spkr["name"]
}
list_ground_truths.append(dict_gt)
dict_embed_gt["ground_truths"] = list_ground_truths
dict_all_embed_gt[file_index] = dict_embed_gt
return dict_all_embed_gt
def cluster_all(gt_yml_fp):
dict_all_embed_gt = read_ground_truths(gt_yml_fp)
status = "Done"
for file_index, dict_gt in dict_all_embed_gt.items():
list_ground_truths = dict_gt["ground_truths"]
audio_path = dict_gt["audio_path"]
output_path = dict_gt["output_path"]
dict_emb_gt = {dict_spkr["embed_start_id"]: dict_spkr["name"] for dict_spkr in list_ground_truths}
# for spkr_index, dict_spkr in enumerate(list_ground_truths):
# dict_emb_gt[dict_spkr["embed_start_id"]] = dict_spkr["name"]
if not os.path.exists(output_path):
os.makedirs(output_path)
run_clustering(audio_path, output_path, dict_emb_gt)
return status
def read_ground_truths(gt_yml_fp):
with open(gt_yml_fp, "r") as fh:
dict_all_gt = yaml.load(fh)
print(dict_all_gt)
dict_all_embed_gt = get_embed_from_start_end(dict_all_gt)
print(dict_all_embed_gt)
return dict_all_embed_gt
def run_clustering(audio_path, output_path, dict_gt):
embeddings = get_embeddings(audio_path, output_path)
# vad_path = os.path.join(output_path, "vad.json")
# vad = get_vad(vad_path)
vad = None
labels = cluster_gt(embeddings, vad, dict_gt)
print(utils.print_list(labels, "Clustered Embeddings"))
df = pd.DataFrame()
df["embed_index"] = [x for x in range(len(labels))]
df["labels"] = labels
out_path = os.path.join(output_path, "cluster_labels.csv")
df.to_csv(out_path, index=False)
return df
def run_yaml(args):
gt_yml_fp = args.get("gt_yml_fp", "../data/spkr_diarization_gt.yml")
cluster_all(gt_yml_fp)
def run(args):
audio_path = args.get("audio_path", "../no/audio")
output_path = args.get("output_path", "../outputs")
dict_gt = get_gt_emb()
run_clustering(audio_path, output_path, dict_gt)
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument("--audio_path",
type=str,
help="audio filepath",
default="../data/panel_discussion_0045_15s.wav")
parser.add_argument("--output_path",
type=str,
help="output_path",
default="../outputs/panel_discussion_0045_15s_5")
parser.add_argument("--gt_yml_fp",
type=str,
help="ground truth yaml file path",
default="../data/spkr_diarization_gt.yml")
parser.add_argument("--config_path",
type=str,
help="config_path",
default="../configs/config_5.yml")
# parser.add_argument("-v", "--verbose", action="store_true",
# help="increase output verbosity")
args = parser.parse_args()
dict_args = vars(args)
dir_output = dict_args.get("output_path", "../outputs")
if not os.path.exists(dir_output):
os.makedirs(dir_output)
else:
print(f"ATTENTION: directory: [{dir_output}] already exists.")
return dict_args
def main():
args = parse_args()
run_yaml(args)
# yml_dump()
# print(round_off_embedding(4.1, 0.25))
# print(round_off_embedding(4.1, 0.35))
# print(round_off_embedding(4.1, 0.5))
# print(round_off_embedding(4.35, 0.25))
# print(round_off_embedding(4.35, 0.35))
# print(round_off_embedding(4.35, 0.5))
# read_ground_truths()
if __name__ == '__main__':
main()
| 3.25 | 3 |
day4/lambda.py | lilbond/bitis | 0 | 12788304 | <filename>day4/lambda.py<gh_stars>0
def greeting():
return "Hello"
print(greeting())
greet = lambda : "Hello"
print(greet())
# take arguments
strip_and_upper_case = lambda s: s.strip().upper()
strip_and_upper_case(" Hello ")
# take arbitrary number of arguments / keyword arguments
greeting = lambda x, *args, **kwargs: print(x, args, kwargs)
greeting('hello', 'world', world='world')
l = [1, -2, 3, -4, 5, 7]
l = list(filter(lambda x: x>0, l))
print(l)
my_list = [1, -2, 3, -4, 5, 7]
print(my_list)
l = list(map(lambda x: abs(x), my_list))
print(l)
| 3.5625 | 4 |
code/python/pymir/analytics/riffstation/mayor_minor_chords.py | mfranco/pymir | 1 | 12788305 | <reponame>mfranco/pymir<filename>code/python/pymir/analytics/riffstation/mayor_minor_chords.py<gh_stars>1-10
"""
How many majors and minor chords are in all songs
"""
from pymir import settings
import matplotlib.pyplot as plt
import numpy as np
import os
import pandas as pd
def compute(db):
mayor_minor = []
for song in db:
row = {'mayor': 0, 'minor': 0}
for c in song['chord_vector_detected']:
if 'm' in c.lower():
row['minor'] += 1
else:
row['mayor'] += 1
mayor_minor.append(row)
df = pd.DataFrame(mayor_minor)
fname = (
os.path.join(
settings.IMG_DIR,
'initial_diagnose', 'major_vs_minor_chords_histogram.png'))
total = df.sum().values.tolist()
labels = df.columns.tolist()
ind = np.arange(len(labels)) # the x locations for the groups
fig, ax = plt.subplots()
width = 0.6
ax.bar(ind, total, width)
ax.grid(True)
plt.ylabel('Chords frequency')
ax.set_xticklabels(labels)
ax.set_xticks(ind + width)
plt.savefig(fname)
| 2.890625 | 3 |
app_test/ruuvi_app.py | jutako/ble-gateway | 2 | 12788306 | <gh_stars>1-10
from ruuvitag_sensor.ruuvi import RuuviTagSensor
# List of macs of sensors which data will be collected
# If list is empty, data will be collected for all found sensors
tag1 = 'C3:60:95:50:C6:0E'
def handle_data(found_data):
print('MAC ' + found_data[0])
print(found_data[1])
macs = [tag1]
RuuviTagSensor.get_datas(handle_data, macs)
| 2.859375 | 3 |
easy_food/core/views.py | nahidsaikat/EasyFood-Web | 0 | 12788307 | <reponame>nahidsaikat/EasyFood-Web
"""Route declaration."""
from flask import (
Blueprint,
url_for,
render_template,
redirect,
make_response,
request,
session
)
from flask_login import current_user, login_required
from datetime import datetime as dt
from easy_food import db
from .forms import ContactForm
from .models import User
core_bp = Blueprint(
'core_bp', __name__,
template_folder='templates',
static_folder='static'
)
@core_bp.route('/contact', methods=('GET', 'POST'))
def contact():
form = ContactForm()
if form.validate_on_submit():
return redirect(url_for('success'))
return render_template(
'contact.html',
form=form
)
@core_bp.route('/success')
def success():
return make_response('This is success page', 200, {})
@core_bp.route('/')
# @login_required
def home():
"""Landing page."""
nav = [{'name': 'Home', 'url': 'https://example.com/1'},
{'name': 'About', 'url': 'https://example.com/2'},
{'name': 'Pics', 'url': 'https://example.com/3'}]
session['key'] = 'Show Users'
return render_template('home.html',
nav=nav,
title="Jinja Demo Site",
description="Smarter page templates \
with Flask & Jinja.")
@core_bp.route('/users', methods=['GET'])
def user_records():
"""Create a user via query string parameters."""
username = request.args.get('user')
email = request.args.get('email')
if username and email:
existing_user = User.query.filter(
User.username == username or User.email == email
).first()
if existing_user:
return make_response(
f'{username} ({email}) already created!'
)
new_user = User(
username=username,
email=email,
created=dt.now(),
bio="In West Philadelphia born and raised, \
on the playground is where I spent most of my days",
admin=False
)
db.session.add(new_user) # Adds new User record to database
db.session.commit() # Commits all changes
return make_response(f"{new_user} successfully created!")
else:
return render_template(
'users.html',
users=User.query.all(),
title=session.get("key", "Show Users")
)
| 2.765625 | 3 |
debpackager/packages/conf/configurations.py | urban48/debpackager | 66 | 12788308 | <filename>debpackager/packages/conf/configurations.py
PACKAGE_ROOT = 'debian'
VERSION_REGEX = r'^v(\d\.\d\.\d$)'
STARTING_VERSION = '0.0.0'
VIRTUAL_ENV_PATH = 've'
| 1.359375 | 1 |
page/migrations/0002_content.py | kthaisse/website | 1 | 12788309 | # Generated by Django 2.2.10 on 2020-11-04 21:03
from django.db import migrations, models
import versatileimagefield.fields
class Migration(migrations.Migration):
dependencies = [("page", "0001_initial")]
operations = [
migrations.AddField(
model_name="page", name="in_menu", field=models.BooleanField(default=False)
),
migrations.AddField(
model_name="page",
name="picture",
field=versatileimagefield.fields.VersatileImageField(
blank=True, null=True, upload_to="page/page/", verbose_name="Image"
),
),
]
| 1.703125 | 2 |
main.py | iiithf/ias-device | 0 | 12788310 | <reponame>iiithf/ias-device
def parse_addr(addr):
i = addr.find(':')
host = '' if i<0 else addr[0:i]
port = int(addr if i<0 else addr[i+1:])
return (host, port)
class RequestHandler(BaseHTTPRequestHandler):
def body(self):
size = int(self.headers.get('Content-Length'))
return self.rfile.read(size)
def send(self, code, body=None, headers=None):
self.send_response(code)
for k, v in headers.items():
self.send_header(k, v)
self.end_headers()
if body is not None:
self.wfile.write(body)
def send_json(self, code, body):
heads = {'Content-Type': 'application/json'}
self.send(code, bytes(json.dumps(body), 'utf8'), heads)
def do_GET(self):
handler = self.server.handler
self.send_json(200, handler.addrs)
def do_POST(self):
handler = self.server.handler
if self.path.startswith('/service'):
return handler.handle_service(self)
return handler.handle_forward(self)
| 2.453125 | 2 |
valipede/jsonschema.py | cooper-software/valipede | 0 | 12788311 | <gh_stars>0
from . import fields
try:
basestring
except NameError:
basestring = str
class JSONSchemaSerializer(object):
fallbacks = (
fields.Text, fields.Integer, fields.Float, fields.DateTime,
fields.Boolean, fields.Enum, fields.ListOf, fields.OneOf,
fields.Compound, fields.Anything
)
def serialize(self, schema, title=None):
props = {}
required_props = []
for k,v in schema.fields.items():
props[k] = self.get_property(v)
if v.required:
required_props.append(k)
definition = {
'properties': props
}
if required_props:
definition['required'] = required_props
if title:
definition['title'] = title
return definition
def get_property(self, field):
prop = {}
prop['default'] = field.default
prop['format'] = field.__class__.__name__
if field.label:
prop['title'] = field.label
if field.description:
prop['description'] = field.description
type_name = field.__class__.__name__
method_name = 'handle_%s' % type_name
if not hasattr(self, method_name):
method_name = None
for cls in self.fallbacks:
if isinstance(field, cls):
method_name = 'handle_%s' % cls.__name__
break
if not method_name:
raise ValueError("No handler for %s" % type_name)
getattr(self, method_name)(field, prop)
return prop
def handle_Text(self, field, prop):
prop['type'] = 'string'
if field.maxlength:
prop['maxLength'] = field.maxlength
if field.minlength:
prop['minLength'] = field.minlength
if field.regex:
prop['pattern'] = field.regex.pattern
def handle_Integer(self, field, prop):
prop['type'] = 'integer'
if field.min:
prop['minimum'] = field.min
if field.max:
prop['maximum'] = field.max
def handle_Float(self, field, prop):
self.handle_Integer(field, prop)
prop['type'] = 'number'
def handle_DateTime(self, field, prop):
prop['type'] = 'string'
prop['format'] = 'date-time'
def handle_Email(self, field, prop):
self.handle_Text(field, prop)
prop['format'] = 'email'
del prop['pattern']
def handle_URL(self, field, prop):
self.handle_Text(field, prop)
prop['format'] = 'uri'
del prop['pattern']
def handle_Boolean(self, field, prop):
prop['type'] = 'boolean'
def handle_Enum(self, field, prop):
prop['enum'] = field.values
def handle_ListOf(self, field, prop):
prop['type'] = 'array'
prop['items'] = self.get_property(field.field)
def handle_OneOf(self, field, prop):
prop['anyOf'] = list(map(self.get_property, field.fields))
def handle_Link(self, field, prop):
self.handle_Text(field, prop)
prop['format'] = 'Link'
prop['schema'] = '#/definitions/%s' % field.schema.__name__
def handle_Compound(self, field, prop):
prop['type'] = 'object'
properties = {}
required = []
for k, v in field.fields.items():
properties[k] = self.get_property(v)
if v.required:
required.append(k)
prop['properties'] = properties
if len(required) > 0:
prop['required'] = required
def handle_Anything(self, field, prop):
prop['anyOf'] = [
{'type':'array'},
{'type':'boolean'},
{'type':'null'},
{'type':'object'},
{'type':'string'},
{'type':'number'}
]
def handle_BoundingBox(self, field, prop):
prop['type'] = 'array'
prop['items'] = {
'type': 'float',
'minimum': -180.0,
'maximum': 180.0,
'maxItems': 4,
'minItems': 4
}
def handle_LatLng(self, field, prop):
prop['type'] = 'array'
prop['items'] = {
'type': 'float',
'minimum': -180.0,
'maximum': 180.0,
'maxItems': 2,
'minItems': 2
}
def handle_TypeOf(self, field, prop):
type = field.types[0]
if type == dict:
prop['type'] = 'object'
elif type == list:
prop['type'] = 'array'
elif len(field.types) == 2 and int in field.types and float in field.types:
prop['type'] = 'number'
elif type == int:
prop['type'] = 'integer'
elif type == float:
prop['type'] = 'float'
elif type == basestring:
prop['type'] = 'string'
def to_jsonschema(schema, title=None, serializer=JSONSchemaSerializer):
return serializer().serialize(schema, title=title)
def from_jsonschema(jsonschema):
pass
| 2.421875 | 2 |
src/reports/migrations/0010_auto_20200701_1021.py | smsolima/report | 0 | 12788312 | <reponame>smsolima/report
# Generated by Django 3.0.7 on 2020-07-01 08:21
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('reports', '0009_auto_20200701_0957'),
]
operations = [
migrations.RemoveField(
model_name='dailyreport',
name='activity',
),
migrations.RemoveField(
model_name='dailyreport',
name='area',
),
migrations.RemoveField(
model_name='dailyreport',
name='cp',
),
migrations.RemoveField(
model_name='dailyreport',
name='description',
),
migrations.RemoveField(
model_name='dailyreport',
name='remarks',
),
migrations.RemoveField(
model_name='dailyreport',
name='tag',
),
migrations.CreateModel(
name='ReportActivity',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('description', models.TextField(blank=True)),
('remarks', models.TextField(blank=True)),
('activity', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, to='reports.Activity')),
('area', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, to='reports.Area')),
('cp', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, to='reports.Cp')),
('reportnumber', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, to='reports.DailyReport')),
],
),
]
| 1.578125 | 2 |
app/engine/from_db/utils.py | publichealthengland/coronavirus-dashboard-api-v2-server | 51 | 12788313 | <reponame>publichealthengland/coronavirus-dashboard-api-v2-server<gh_stars>10-100
#!/usr/bin python3
# Imports
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# Python:
from typing import Dict, Iterable
from tempfile import NamedTemporaryFile
from asyncio import Lock
# 3rd party:
from pandas import DataFrame
from orjson import dumps, loads
# Internal:
from app.exceptions import NotAvailable
from app.storage import AsyncStorageClient
from app.utils.operations import Request
from app.utils.assets import MetricData
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
__all__ = [
'format_dtypes',
'format_data',
'format_response',
'cache_response'
]
async def cache_response(func, *, request: Request, **kwargs) -> bool:
kws = {
"container": "apiv2cache",
"path": request.path,
"compressed": False,
"cache_control": "max-age=90, s-maxage=300",
"content_type": request.content_type,
"content_disposition":
f'attachment; filename="{request.area_type}_{request.release:%Y-%m-%d}.'
f'{request.format if request.format != "xml" else "json"}"',
}
prefix, suffix, delimiter = b"", b"", b""
if request.format in ['json', 'xml']:
prefix, suffix, delimiter = b'{"body":[', b']}', b','
current_location = 0
async with AsyncStorageClient(**kws) as blob_client:
try:
# Create an empty blob
await blob_client.upload(b"")
await blob_client.set_tags({"done": "0", "in_progress": "1"})
with NamedTemporaryFile() as fp:
async with blob_client.lock_file(60) as blob_lock:
async for index, item in func(request=request, **kwargs):
async with Lock():
if not (index and current_location):
fp.write(prefix)
fp.write(item)
elif not index and current_location:
fp.seek(0)
tmp = item + fp.read()
fp.seek(0)
fp.truncate(0)
fp.write(tmp)
elif item:
fp.write(delimiter)
fp.write(item)
current_location = fp.tell()
# Renew the lease by after each
# iteration as some processes may
# take longer.
await blob_lock.renew()
async with Lock():
fp.write(suffix)
fp.seek(0)
# Anything below 40 bytes won't contain any
# data and won't be cached.
if fp.tell() == 40:
raise NotAvailable()
await blob_client.upload(fp.read())
tags = request.metric_tag
tags["done"] = "1"
tags["in_progress"] = "0"
await blob_client.set_tags(tags)
except Exception as err:
# Remove the blob on exception - data may be incomplete.
if await blob_client.exists():
await blob_client.delete()
raise err
# return responder
return True
def format_dtypes(df: DataFrame, column_types: Dict[str, object]) -> DataFrame:
json_columns = MetricData.json_dtypes.intersection(column_types)
# Replace `null` string with None. This happens because
# some DB queries convert `null` to `"null"` for type
# consistency.
df = df.replace('null', None)
df.loc[:, json_columns] = (
df
.loc[:, json_columns]
.apply(lambda column: column.map(loads))
)
return df.astype(column_types)
def format_data(df: DataFrame, response_metrics: Iterable[str]) -> DataFrame:
int_response_metrics = list(set(response_metrics).intersection(MetricData.integer_dtypes))
df.loc[:, int_response_metrics] = df.loc[:, int_response_metrics].astype(object)
# Pandas only supports `float` type for columns with `NaN`.
# Convert non-null values int cells to `int`, so that they
# won't be exported with a training `.0` in the response.
for col in int_response_metrics:
notnull = df[col].notnull()
df.loc[notnull, col] = df.loc[notnull, col].astype(int)
# Replace `NaN` with `None`. The former is exported as `NaN`
# in JSON/CSV and is invalid. The latter is exported as `null`
# in JSON and an empty field in CSV.
df = df.where(df.notnull(), None)
str_response_metrics = list(
set(response_metrics)
.intersection(MetricData.string_dtypes)
)
df.loc[:, str_response_metrics] = (
df
.loc[:, str_response_metrics]
.apply(lambda column: column.str.strip('"'))
)
return df
def format_response(df: DataFrame, response_type: str, request: Request,
include_header: bool = True) -> bytes:
if response_type == 'csv':
base_metrics = ["areaCode", "areaName", "areaType", "date"]
if request.area_type == "msoa":
base_metrics = [
"regionCode", "regionName", "UtlaCode", "UtlaName", "LtlaCode", "LtlaName",
*base_metrics
]
if not len(request.nested_metrics):
request_metrics = sorted(request.db_metrics)
metrics = [*base_metrics, *request_metrics]
else:
nested_metric = request.nested_metrics[0]
metrics = [*base_metrics, *MetricData.nested_struct[nested_metric]]
for metric in set(metrics) - set(df.columns):
df = df.assign(**{metric: None})
csv_response = (
df
.loc[:, metrics]
.to_csv(
float_format="%.1f",
date_format="iso",
index=False,
header=include_header
)
)
return csv_response.encode()
df_dict = df.to_dict(orient='records')
if response_type == 'jsonl':
df_jsonl_gen: list[bytes] = list(map(dumps, df_dict))
return bytes.join(b"\n", df_jsonl_gen) + b"\n"
json_response = dumps(df_dict)
# Remove brackets: for JSON response, leading and
# trailing brackets must be added later as a part
# of the streaming process.
return json_response[1:-1]
| 2.015625 | 2 |
genrl/deep/agents/ddpg/ddpg.py | infinitemugen/genrl | 0 | 12788314 | <gh_stars>0
import numpy as np
import torch
import torch.nn as nn
import torch.optim as opt
import gym
from copy import deepcopy
from ...common import (
ReplayBuffer,
get_model,
save_params,
load_params,
get_env_properties,
set_seeds,
venv,
)
from typing import Optional, Any, Tuple, Union, Dict
class DDPG:
"""
Deep Deterministic Policy Gradient algorithm (DDPG)
Paper: https://arxiv.org/abs/1509.02971
:param network_type: The deep neural network layer types ['mlp', 'cnn']
:param env: The environment to learn from
:param gamma: discount factor
:param replay_size: Replay memory size
:param batch_size: Update batch size
:param lr_p: learning rate for policy optimizer
:param lr_q: learning rate for value fn optimizer
:param polyak: polyak averaging weight for target network update
:param epochs: Number of epochs
:param start_steps: Number of exploratory steps at start
:param steps_per_epoch: Number of steps per epoch
:param noise_std: Standard deviation for action noise
:param max_ep_len: Maximum steps per episode
:param start_update: Number of steps before first parameter update
:param update_interval: Number of steps between parameter updates
:param save_interval: Number of steps between saves of models
:param layers: Number of neurons in hidden layers
:param tensorboard_log: the log location for tensorboard
:param seed: seed for torch and gym
:param render: if environment is to be rendered
:param device: device to use for tensor operations; ['cpu','cuda']
:param run_num: model run number if it has already been trained
:param save_model: model save directory
:param load_model: model loading path
:type network_type: string
:type env: Gym environment
:type gamma: float
:type replay_size: int
:type batch_size: int
:type lr_p: float
:type lr_q: float
:type polyak: float
:type epochs: int
:type start_steps: int
:type steps_per_epoch: int
:type noise_std: float
:type max_ep_len: int
:type start_update: int
:type update_interval: int
:type save_interval: int
:type layers: tuple
:type tensorboard_log: string
:type seed: int
:type render: bool
:type device: string
:type run_num: int
:type save_model: string
:type load_model: string
"""
def __init__(
self,
network_type: str,
env: Union[gym.Env, venv],
gamma: float = 0.99,
replay_size: int = 1000000,
batch_size: int = 100,
lr_p: float = 0.0001,
lr_q: float = 0.001,
polyak: float = 0.995,
epochs: int = 100,
start_steps: int = 10000,
steps_per_epoch: int = 4000,
noise: Optional[Any] = None,
noise_std: float = 0.1,
max_ep_len: int = 1000,
start_update: int = 1000,
update_interval: int = 50,
layers: Tuple = (32, 32),
tensorboard_log: str = None,
seed: Optional[int] = None,
render: bool = False,
device: Union[torch.device, str] = "cpu",
run_num: int = None,
save_model: str = None,
load_model: str = None,
save_interval: int = 5000,
):
self.network_type = network_type
self.env = env
self.gamma = gamma
self.replay_size = replay_size
self.batch_size = batch_size
self.lr_p = lr_p
self.lr_q = lr_q
self.polyak = polyak
self.epochs = epochs
self.start_steps = start_steps
self.steps_per_epoch = steps_per_epoch
self.noise = noise
self.noise_std = noise_std
self.max_ep_len = max_ep_len
self.start_update = start_update
self.update_interval = update_interval
self.save_interval = save_interval
self.layers = layers
self.tensorboard_log = tensorboard_log
self.seed = seed
self.render = render
self.run_num = run_num
self.save_model = save_model
self.load_model = load_model
self.save = save_params
self.load = load_params
# Assign device
if "cuda" in device and torch.cuda.is_available():
self.device = torch.device(device)
else:
self.device = torch.device("cpu")
# Assign seed
if seed is not None:
set_seeds(seed, self.env)
# Setup tensorboard writer
self.writer = None
if self.tensorboard_log is not None: # pragma: no cover
from torch.utils.tensorboard import SummaryWriter
self.writer = SummaryWriter(log_dir=self.tensorboard_log)
self.create_model()
def create_model(self) -> None:
"""
Initialize the model
Initializes optimizer and replay buffers as well.
"""
state_dim, action_dim, discrete, _ = get_env_properties(self.env)
if discrete:
raise Exception(
"Discrete Environments not supported for {}.".format(__class__.__name__)
)
if self.noise is not None:
self.noise = self.noise(
np.zeros_like(action_dim), self.noise_std * np.ones_like(action_dim)
)
self.ac = get_model("ac", self.network_type)(
state_dim, action_dim, self.layers, "Qsa", False
).to(self.device)
# load paramaters if already trained
if self.load_model is not None:
self.load(self)
self.ac.load_state_dict(self.checkpoint["weights"])
for key, item in self.checkpoint.items():
if key not in ["weights", "save_model"]:
setattr(self, key, item)
print("Loaded pretrained model")
self.ac_target = deepcopy(self.ac).to(self.device)
# freeze target network params
for param in self.ac_target.parameters():
param.requires_grad = False
self.replay_buffer = ReplayBuffer(self.replay_size)
self.optimizer_policy = opt.Adam(self.ac.actor.parameters(), lr=self.lr_p)
self.optimizer_q = opt.Adam(self.ac.critic.parameters(), lr=self.lr_q)
def select_action(
self, state: np.ndarray, deterministic: bool = True
) -> np.ndarray:
"""
Selection of action
:param state: Observation state
:param deterministic: Action selection type
:type state: int, float, ...
:type deterministic: bool
:returns: Action based on the state and epsilon value
:rtype: int, float, ...
"""
with torch.no_grad():
action, _ = self.ac.get_action(
torch.as_tensor(state, dtype=torch.float32).to(self.device),
deterministic=deterministic,
)
action = action.detach().cpu().numpy()
# add noise to output from policy network
if self.noise is not None:
action += self.noise()
return np.clip(
action, self.env.action_space.low[0], self.env.action_space.high[0]
)
def get_q_loss(
self,
state: np.ndarray,
action: np.ndarray,
reward: float,
next_state: np.ndarray,
done: bool,
) -> torch.Tensor:
"""
Computes loss for Q-Network
:param state: environment observation
:param action: agent action
:param: reward: environment reward
:param next_state: environment next observation
:param done: if episode is over
:type state: int, float, ...
:type action: float
:type: reward: float
:type next_state: int, float, ...
:type done: bool
:returns: the Q loss value
:rtype: float
"""
q = self.ac.critic.get_value(torch.cat([state, action], dim=-1))
with torch.no_grad():
q_pi_target = self.ac_target.get_value(
torch.cat(
[next_state, self.ac_target.get_action(next_state, True)[0]], dim=-1
)
)
target = reward + self.gamma * (1 - done) * q_pi_target
return nn.MSELoss()(q, target)
def get_p_loss(self, state: np.ndarray) -> torch.Tensor:
"""
Computes policy loss
:param state: Environment observation
:type state: int, float, ...
:returns: Policy loss
:rtype: float
"""
q_pi = self.ac.get_value(
torch.cat([state, self.ac.get_action(state, True)[0]], dim=-1)
)
return -torch.mean(q_pi)
def update_params(
self,
state: np.ndarray,
action: np.ndarray,
reward: float,
next_state: np.ndarray,
done: bool,
) -> None:
"""
Takes the step for optimizer.
"""
self.optimizer_q.zero_grad()
loss_q = self.get_q_loss(state, action, reward, next_state, done)
loss_q.backward()
self.optimizer_q.step()
# freeze critic params for policy update
for param in self.ac.critic.parameters():
param.requires_grad = False
self.optimizer_policy.zero_grad()
loss_p = self.get_p_loss(state)
loss_p.backward()
self.optimizer_policy.step()
# unfreeze critic params
for param in self.ac.critic.parameters():
param.requires_grad = True
# update target network
with torch.no_grad():
for param, param_target in zip(
self.ac.parameters(), self.ac_target.parameters()
):
param_target.data.mul_(self.polyak)
param_target.data.add_((1 - self.polyak) * param.data)
def learn(self) -> None: # pragma: no cover
state, episode_reward, episode_len, episode = self.env.reset(), 0, 0, 0
total_steps = self.steps_per_epoch * self.epochs
if self.noise is not None:
self.noise.reset()
for t in range(total_steps):
# execute single transition
if t > self.start_steps:
action = self.select_action(state, deterministic=True)
else:
action = self.env.action_space.sample()
next_state, reward, done, _ = self.env.step(action)
if self.render:
self.env.render()
episode_reward += reward
episode_len += 1
# don't set done to True if max_ep_len reached
done = False if episode_len == self.max_ep_len else done
self.replay_buffer.push((state, action, reward, next_state, done))
state = next_state
if done or (episode_len == self.max_ep_len):
if self.noise is not None:
self.noise.reset()
if episode % 20 == 0:
print(
"Episode: {}, Reward: {}, Timestep: {}".format(
episode, episode_reward, t
)
)
if self.tensorboard_log:
self.writer.add_scalar("episode_reward", episode_reward, t)
state, episode_reward, episode_len = self.env.reset(), 0, 0
episode += 1
# update params
if t >= self.start_update and t % self.update_interval == 0:
for _ in range(self.update_interval):
batch = self.replay_buffer.sample(self.batch_size)
states, actions, next_states, rewards, dones = (
x.to(self.device) for x in batch
)
self.update_params(states, actions, next_states, rewards, dones)
if self.save_model is not None:
if t >= self.start_update and t % self.save_interval == 0:
self.checkpoint = self.get_hyperparams()
self.save(self, t)
print("Saved current model")
self.env.close()
if self.tensorboard_log:
self.writer.close()
def get_hyperparams(self) -> Dict[str, Any]:
hyperparams = {
"network_type": self.network_type,
"gamma": self.gamma,
"batch_size": self.batch_size,
"replay_size": self.replay_size,
"polyak": self.polyak,
"noise_std": self.noise_std,
"lr_policy": self.lr_p,
"lr_value": self.lr_q,
"weights": self.ac.state_dict(),
}
return hyperparams
if __name__ == "__main__":
env = gym.make("Pendulum-v0")
algo = DDPG("mlp", env)
algo.learn()
algo.evaluate(algo)
| 2.171875 | 2 |
distributions/non_parametric.py | kitteltom/probabilistic-energy-forecasting | 2 | 12788315 | import numpy as np
from distributions.distribution import Distribution
class NonParametric(Distribution):
"""
Provides functions for a non-parametric forecast distribution.
"""
@staticmethod
def pdf(x, pdf_x, x_eval):
pass
@staticmethod
def cdf(x, cdf_x, x_eval):
"""
Computes the CDF of the non-parametric distribution at x given the CDF at evaluation points,
by linear interpolation.
"""
# Linear interpolation
insertion_points = np.searchsorted(x_eval, x)
r = np.minimum(insertion_points, len(x_eval) - 1)
l = np.maximum(0, insertion_points - 1)
idx = np.arange(len(x))
slope = (cdf_x[r, idx] - cdf_x[l, idx]) / np.maximum(x_eval[r] - x_eval[l], 1e-6)
return cdf_x[l, idx] + slope * (x - x_eval[l])
@staticmethod
def mean(pdf_x, x_eval):
"""
Computes the mean of the non-parametric distribution by integrating the PDF at evaluation points,
using the trapezoidal rule.
"""
return np.trapz(
y=x_eval[:, np.newaxis] * pdf_x,
x=x_eval[:, np.newaxis],
axis=0
)
@staticmethod
def var(pdf_x, x_eval):
"""
Computes the variance of the non-parametric distribution by integrating the PDF at evaluation points,
using the trapezoidal rule.
"""
return np.trapz(
y=x_eval[:, np.newaxis] ** 2 * pdf_x,
x=x_eval[:, np.newaxis],
axis=0
) - np.trapz(
y=x_eval[:, np.newaxis] * pdf_x,
x=x_eval[:, np.newaxis],
axis=0
) ** 2
@staticmethod
def percentile(p, cdf_x, x_eval):
"""
Computes the p-percentile of the non-parametric distribution given the CDF at evaluation points,
by linear interpolation.
"""
# Linear interpolation
insertion_points = []
for i in range(cdf_x.shape[1]):
insertion_points.append(np.searchsorted(cdf_x[:, i], p / 100))
insertion_points = np.array(insertion_points)
r = np.minimum(insertion_points, len(cdf_x) - 1)
l = np.maximum(0, insertion_points - 1)
idx = np.arange(cdf_x.shape[1])
slope = (x_eval[r] - x_eval[l]) / np.maximum(cdf_x[r, idx] - cdf_x[l, idx], 1e-6)
return x_eval[l] + slope * (p / 100 - cdf_x[l, idx])
@staticmethod
def crps(x, cdf_x, x_eval):
"""
Computes the Continuous Ranked Probability Score (CRPS) of the non-parametric distribution with true value x,
using the trapezoidal rule.
"""
return np.trapz(
y=(cdf_x - (x_eval[:, np.newaxis] >= x[np.newaxis, :])) ** 2,
x=x_eval[:, np.newaxis],
axis=0
)
| 3.609375 | 4 |
examples/variational_autoencoder/variational_autoencoder_deconv_horovod.py | avolkov1/keras_experiments | 92 | 12788316 | <reponame>avolkov1/keras_experiments
'''This script demonstrates how to build a variational autoencoder
with Keras and deconvolution layers.
Reference: "Auto-Encoding Variational Bayes" https://arxiv.org/abs/1312.6114
Using Horovod.
original implementation:
https://github.com/fchollet/keras/blob/master/examples/variational_autoencoder_deconv.py
run:
mpirun -np 2 --map-by ppr:4:socket python \
variational_autoencoder_deconv_horovod.py --epochs=30
TMPDIR=/tmp mpirun --report-bindings --bind-to none --map-by slot -np 4 \
python ./examples/variational_autoencoder/variational_autoencoder_deconv_horovod.py \
--epochs=30 --nranks_per_gpu=1
# Below via singularity. NGPUS is GPUs per node
NNODES=1 NGPUS=1 RANKS_PER_GPU=2 && \
time TMPDIR=/tmp mpirun --report-bindings -mca btl_tcp_if_exclude docker0,lo \
--bind-to none --map-by slot -np $(($NNODES * $RANKS_PER_GPU * $NGPUS)) \
run_psgcluster_singularity.sh \
--container=/cm/shared/singularity/tf1.4.0_hvd_ompi3.0.0-2017-11-23-154091b4d08c.img \
--venvpy=~/.virtualenvs/py-keras-gen \
--scripts=./examples/variational_autoencoder/variational_autoencoder_deconv_horovod.py \
--epochs=4 \
--nranks_per_gpu=$RANKS_PER_GPU
NNODES=2 NGPUS=8 RANKS_PER_GPU=1 && \
time TMPDIR=/tmp mpirun --report-bindings -mca btl_tcp_if_exclude docker0,lo \
--bind-to none --map-by slot -np $(($NNODES * $RANKS_PER_GPU * $NGPUS)) \
run_psgcluster_singularity.sh \
--container=/cm/shared/singularity/tf17.12_tf1.4.0_hvd_ompi3.0.0_ibverbs-2018-02-01-5540d30e4dc5.img \
--venvpy=~/.virtualenvs/py-keras-gen \
--scripts=./examples/variational_autoencoder/variational_autoencoder_deconv_horovod.py \
--epochs=4 \
--nranks_per_gpu=$RANKS_PER_GPU
'''
import sys
import argparse as ap
import numpy as np
# try:
# import Tkinter # @UnusedImport
# import matplotlib.pyplot as plt
# except ImportError:
# import matplotlib
# matplotlib.use('Agg')
# import matplotlib.pyplot as plt
import matplotlib
try:
matplotlib.use('Agg')
except Exception:
raise
import matplotlib.pyplot as plt
from scipy.stats import norm
import tensorflow as tf
import horovod.tensorflow as hvd
import horovod.keras as hvd_keras
try:
# Initialize Horovod.
hvd.init()
except Exception:
raise
from keras import backend as K
from keras.datasets import mnist
from keras.optimizers import TFOptimizer
from keras_exp.callbacks.timing import BatchTiming, SamplesPerSec
from vae_common import CustomFormatter, make_vae_and_codec
def parser_(desc):
parser = ap.ArgumentParser(description=desc,
formatter_class=CustomFormatter)
parser.add_argument('--epochs', type=int, default=5,
help='Number of epochs to run training for.')
parser.add_argument(
'--nranks_per_gpu', type=int, default=1,
help='S|Number of ranks to run on each GPUs. Use this parameter to\n'
'oversubscribe a GPU. When oversubscribing a GPU use in combination\n'
'with MPS (multi-process service). Default: %(default)s')
args = parser.parse_args()
return args
def main(argv=None):
'''
'''
main.__doc__ = __doc__
argv = sys.argv if argv is None else sys.argv.extend(argv)
desc = main.__doc__ # .format(os.path.basename(__file__))
# CLI parser
args = parser_(desc)
nranks_per_gpu = args.nranks_per_gpu
local_rank = hvd.local_rank()
gpu_local_rank = local_rank // nranks_per_gpu
print('local_rank, GPU_LOCAL_RANK: {}, {}'.format(
local_rank, gpu_local_rank))
# Pin GPU to be used to process local rank (one GPU per process)
config = tf.ConfigProto()
config.gpu_options.allow_growth = True
# config.gpu_options.visible_device_list = str(hvd.local_rank())
config.gpu_options.visible_device_list = str(gpu_local_rank)
K.set_session(tf.Session(config=config))
# input image dimensions
img_rows, img_cols, img_chns = 28, 28, 1
# number of convolutional filters to use
filters = 64
# convolution kernel size
num_conv = 3
hvdsize = hvd.size()
batch_size = 128 # 100
if K.image_data_format() == 'channels_first':
original_img_size = (img_chns, img_rows, img_cols)
else:
original_img_size = (img_rows, img_cols, img_chns)
latent_dim = 2
intermediate_dim = 128
epsilon_std = 1.0
epochs = args.epochs # 5
# train the VAE on MNIST digits
(x_train, _), (x_test, y_test) = mnist.load_data()
# Data split if going for reduction in each iteration step. Using
# tf-queue or dataset is better to preserve uniform random sampling.
# nsamples = x_train.shape[0]
# mysamples = nsamples // hvdsize
# start_sam = hvd.local_rank() * mysamples
# stop_sam = min((hvd.local_rank() + 1) * mysamples, nsamples)
# x_train = x_train[start_sam:stop_sam, ...]
x_train = x_train.astype('float32') / 255.
x_train = x_train.reshape((x_train.shape[0],) + original_img_size)
x_test = x_test.astype('float32') / 255.
x_test = x_test.reshape((x_test.shape[0],) + original_img_size)
if hvd.rank() == 0:
print('x_train.shape:', x_train.shape)
vae, encoder, generator = make_vae_and_codec(
original_img_size, img_chns, img_rows, img_cols, batch_size,
filters, num_conv, intermediate_dim, latent_dim, epsilon_std)
# : :type vae: Model
lr = 0.001 # * hvdsize
opt = tf.train.RMSPropOptimizer(lr)
# Add Horovod Distributed Optimizer.
opt = hvd.DistributedOptimizer(opt) # , use_locking=True)
opt = TFOptimizer(opt)
vae.compile(optimizer=opt, loss=None)
if hvd.rank() == 0:
vae.summary()
# callbacks = []
callbacks = [hvd_keras.callbacks.BroadcastGlobalVariablesCallback(0)]
if hvd.rank() == 0:
callbacks += [BatchTiming(), SamplesPerSec(batch_size * hvdsize)]
sess = K.get_session()
sess.run(hvd.broadcast_global_variables(0))
vae.fit(x_train,
shuffle=True,
epochs=epochs,
batch_size=batch_size,
verbose=hvd.local_rank() == 0,
validation_data=(x_test, None),
callbacks=callbacks)
if hvd.rank() == 0:
vae_val = vae
loss = vae_val.evaluate(x=x_test, y=None, batch_size=batch_size)
print('\n\nVAE VALIDATION LOSS: {}'.format(loss))
# display a 2D plot of the digit classes in the latent space
x_test_encoded = encoder.predict(x_test, batch_size=batch_size)
plt.figure(figsize=(6, 6))
plt.scatter(x_test_encoded[:, 0], x_test_encoded[:, 1], c=y_test)
plt.colorbar()
# plt.show()
plt.savefig('vae_scatter.ps')
plt.close()
# display a 2D manifold of the digits
n = 15 # figure with 15x15 digits
digit_size = 28
figure = np.zeros((digit_size * n, digit_size * n))
# Linearly spaced coordinates on the unit square were transformed
# through the inverse CDF (ppf) of the Gaussian
# To produce values of the latent variables z, since the prior of the
# latent space is Gaussian
grid_x = norm.ppf(np.linspace(0.05, 0.95, n))
grid_y = norm.ppf(np.linspace(0.05, 0.95, n))
for i, yi in enumerate(grid_x):
for j, xi in enumerate(grid_y):
z_sample = np.array([[xi, yi]])
z_sample = np.tile(z_sample, batch_size).reshape(batch_size, 2)
x_decoded = generator.predict(z_sample, batch_size=batch_size)
digit = x_decoded[0].reshape(digit_size, digit_size)
figure[i * digit_size: (i + 1) * digit_size,
j * digit_size: (j + 1) * digit_size] = digit
plt.figure(figsize=(10, 10))
plt.imshow(figure, cmap='Greys_r')
# plt.show()
plt.savefig('vae_digit.ps')
plt.close()
K.clear_session()
if __name__ == '__main__':
main()
| 2.703125 | 3 |
smallprox/core.py | alairock/small-prox | 17 | 12788317 | <filename>smallprox/core.py
import asyncio
import os
import logging
import re
import dns.resolver
logging.basicConfig()
from .server import HTTPServer
from .mapper import update_config, add_container
logger = logging.getLogger('small-prox')
NO_HTTPS_REDIRECT = os.getenv('NO_HTTPS_REDIRECT', 'false').lower() == 'true'
def _get_local_address():
# Pull the local address from the environment
addr = os.environ.get('LOCAL_ADDRESS')
if addr:
return addr
resolver = dns.resolver.Resolver()
try:
resolver.query(f'host.docker.internal')
return f'host.docker.internal'
except:
pass
# must be on linux, get host ip
result = os.popen('ip r').read()
ip = re.match('default via (.*?)\s', result).groups(1)[0]
return ip
def _get_remote_mapping(port_mapping):
local_host, remote_host = port_mapping.split('=')
return local_host + f'=0', remote_host
def main():
config = {}
if os.getenv('DEBUG') == 'true':
logger.setLevel('DEBUG')
loop = asyncio.get_event_loop()
local_ports = os.getenv('LOCAL_PORTS', [])
local_ports = local_ports and [port.strip() for port in local_ports.split(',')]
remote_ports = os.getenv('REMOTE_PORTS', [])
remote_ports = remote_ports and [port.strip() for port in remote_ports.split(',')]
for port in remote_ports:
mapping, ip = _get_remote_mapping(port)
add_container(None, mapping, config, ip=ip)
if NO_HTTPS_REDIRECT:
config['_dont_ssl_redirect'] = True
config['_local_ports'] = local_ports
config['_local_address'] = _get_local_address()
logger.debug('Current container map: %s', config)
server = HTTPServer(loop, config)
loop.run_until_complete(server.start())
loop.create_task(update_config(config))
loop.run_forever()
| 2.125 | 2 |
task22.py | kazh98/pe2019 | 0 | 12788318 | <reponame>kazh98/pe2019
import math
class Triangle(object):
def __init__(self, a, b, C):
self.a = a
self.b = b
self.C = C * math.pi / 180
def getHeight(self):
return self.b * math.sin(self.C)
def getArea(self):
return (self.a * self.getHeight()) / 2
def getPerimeter(self):
return self.a + self.b + math.sqrt(self.a ** 2 + self.b ** 2 - 2 * self.a * self.b * math.cos(self.C))
a = [int(e) for e in input().split()]
tri = Triangle(a[0], a[1], a[2])
print(tri.getArea())
print(tri.getPerimeter())
print(tri.getHeight())
| 3.828125 | 4 |
utilities/execute_sql.py | Nunie123/transom | 0 | 12788319 | <filename>utilities/execute_sql.py<gh_stars>0
import sqlalchemy as sa
def execute_sql_string(connection_manager, raw_sql_string):
'''
This function can execute multiple SQL statements.
The output is a list of a list of tuples. e.g. [ [ (one, two), (three, four) ], [ (1,2), (3,4) ] ]
The first tuple in the inner list is the headers.
'''
sql_list = raw_sql_string.split(';')
results = []
cleaned_sql_list = [sql for sql in sql_list if sql]
for sql_statement in cleaned_sql_list:
connection_manager.connect()
result = connection_manager.conn.execute(sql_statement)
headers = result.keys()
result_list = [tuple(headers)] + result.fetchall()
results.append(result_list)
connection_manager.close()
return results
def execute_sql_from_file(connection_manager, filepath):
with open(filepath, 'r') as sql_file:
raw_sql_string = sql_file.read()
result = execute_sql_string(connection_manager, raw_sql_string)
return result
def execute_stored_procedure(connection_manager, sp_name, sp_arguments_list=None):
sp_arguments = ', '.join(sp_arguments_list) if sp_arguments_list else ''
if connection_manager.db_type == 'mysql':
sql_string = f'call {sp_name}({sp_arguments})'
else:
raise AssertionError(f'Stored procedures not supported for database type {connection_manager.db_type}')
result = execute_sql_string(connection_manager, sql_string)
return result
| 3.328125 | 3 |
src/sentry/snuba/sessions.py | pierredup/sentry | 0 | 12788320 | <reponame>pierredup/sentry
from __future__ import absolute_import
import pytz
from datetime import datetime, timedelta
from sentry.utils.snuba import raw_query, parse_snuba_datetime
from sentry.utils.dates import to_timestamp, to_datetime
from sentry.snuba.dataset import Dataset
DATASET_BUCKET = 3600
def _convert_duration(val):
if val != val:
return None
return val / 1000.0
def _get_conditions_and_filter_keys(project_releases, environments):
conditions = [["release", "IN", list(x[1] for x in project_releases)]]
if environments is not None:
conditions.append(["environment", "IN", environments])
filter_keys = {"project_id": list(set(x[0] for x in project_releases))}
return conditions, filter_keys
def get_changed_project_release_model_adoptions(project_ids):
"""Returns the last 72 hours worth of releases."""
start = datetime.now(pytz.utc) - timedelta(days=3)
rv = []
# Find all releases with adoption in the last 48 hours
for x in raw_query(
dataset=Dataset.Sessions,
selected_columns=["project_id", "release", "users"],
groupby=["release", "project_id"],
start=start,
filter_keys={"project_id": project_ids},
)["data"]:
rv.append((x["project_id"], x["release"]))
return rv
def get_oldest_health_data_for_releases(project_releases):
"""Returns the oldest health data we have observed in a release
in 90 days. This is used for backfilling.
"""
conditions = [["release", "IN", [x[1] for x in project_releases]]]
filter_keys = {"project_id": [x[0] for x in project_releases]}
rows = raw_query(
dataset=Dataset.Sessions,
selected_columns=[["min", ["started"], "oldest"], "project_id", "release"],
groupby=["release", "project_id"],
start=datetime.utcnow() - timedelta(days=90),
conditions=conditions,
filter_keys=filter_keys,
)["data"]
rv = {}
for row in rows:
rv[row["project_id"], row["release"]] = row["oldest"]
return rv
def check_has_health_data(project_releases):
conditions = [["release", "IN", list(x[1] for x in project_releases)]]
filter_keys = {"project_id": list(set(x[0] for x in project_releases))}
return set(
(x["project_id"], x["release"])
for x in raw_query(
dataset=Dataset.Sessions,
selected_columns=["release", "project_id"],
groupby=["release", "project_id"],
start=datetime.utcnow() - timedelta(days=90),
conditions=conditions,
filter_keys=filter_keys,
)["data"]
)
def get_project_releases_by_stability(
project_ids, offset, limit, scope, stats_period=None, environments=None
):
"""Given some project IDs returns adoption rates that should be updated
on the postgres tables.
"""
if stats_period is None:
stats_period = "24h"
# Special rule that we support sorting by the last 24h only.
if scope.endswith("_24h"):
scope = scope[:-4]
stats_period = "24h"
_, stats_start, _ = get_rollup_starts_and_buckets(stats_period)
orderby = {
"crash_free_sessions": [["divide", ["sessions_crashed", "sessions"]]],
"crash_free_users": [["divide", ["users_crashed", "users"]]],
"sessions": ["-sessions"],
"users": ["-users"],
}[scope]
conditions = []
if environments is not None:
conditions.append(["environment", "IN", environments])
filter_keys = {"project_id": project_ids}
rv = []
for x in raw_query(
dataset=Dataset.Sessions,
selected_columns=["project_id", "release"],
groupby=["release", "project_id"],
orderby=orderby,
start=stats_start,
offset=offset,
limit=limit,
conditions=conditions,
filter_keys=filter_keys,
)["data"]:
rv.append((x["project_id"], x["release"]))
return rv
def _make_stats(start, rollup, buckets, default=0):
rv = []
start = int(to_timestamp(start) // rollup + 1) * rollup
for x in range(buckets):
rv.append([start, default])
start += rollup
return rv
STATS_PERIODS = {
"1h": (3600, 1),
"24h": (3600, 24),
"1d": (3600, 24),
"48h": (3600, 48),
"2d": (3600, 48),
"7d": (86400, 7),
"14d": (86400, 14),
"30d": (86400, 30),
"90d": (259200, 30),
}
def get_rollup_starts_and_buckets(period):
if period is None:
return None, None, None
if period not in STATS_PERIODS:
raise TypeError("Invalid stats period")
seconds, buckets = STATS_PERIODS[period]
start = datetime.now(pytz.utc) - timedelta(seconds=seconds * buckets)
return seconds, start, buckets
def get_release_adoption(project_releases, environments=None, now=None):
"""Get the adoption of the last 24 hours (or a difference reference timestamp)."""
conditions, filter_keys = _get_conditions_and_filter_keys(project_releases, environments)
if now is None:
now = datetime.now(pytz.utc)
start = now - timedelta(days=1)
total_conditions = []
if environments is not None:
total_conditions.append(["environment", "IN", environments])
total_users = {}
for x in raw_query(
dataset=Dataset.Sessions,
selected_columns=["project_id", "users"],
groupby=["project_id"],
start=start,
conditions=total_conditions,
filter_keys=filter_keys,
)["data"]:
total_users[x["project_id"]] = x["users"]
rv = {}
for x in raw_query(
dataset=Dataset.Sessions,
selected_columns=["release", "project_id", "users", "sessions"],
groupby=["release", "project_id"],
start=start,
conditions=conditions,
filter_keys=filter_keys,
)["data"]:
total = total_users.get(x["project_id"])
if not total:
adoption = None
else:
adoption = float(x["users"]) / total * 100
rv[x["project_id"], x["release"]] = {
"adoption": adoption,
"users_24h": x["users"],
"sessions_24h": x["sessions"],
}
return rv
def get_release_health_data_overview(
project_releases,
environments=None,
summary_stats_period=None,
health_stats_period=None,
stat=None,
):
"""Checks quickly for which of the given project releases we have
health data available. The argument is a tuple of `(project_id, release_name)`
tuples. The return value is a set of all the project releases that have health
data.
"""
if stat is None:
stat = "sessions"
assert stat in ("sessions", "users")
_, summary_start, _ = get_rollup_starts_and_buckets(summary_stats_period or "24h")
conditions, filter_keys = _get_conditions_and_filter_keys(project_releases, environments)
stats_rollup, stats_start, stats_buckets = get_rollup_starts_and_buckets(health_stats_period)
missing_releases = set(project_releases)
rv = {}
for x in raw_query(
dataset=Dataset.Sessions,
selected_columns=[
"release",
"project_id",
"duration_quantiles",
"users",
"sessions",
"sessions_errored",
"sessions_crashed",
"users_crashed",
],
groupby=["release", "project_id"],
start=summary_start,
conditions=conditions,
filter_keys=filter_keys,
)["data"]:
rp = {
"duration_p50": _convert_duration(x["duration_quantiles"][0]),
"duration_p90": _convert_duration(x["duration_quantiles"][1]),
"crash_free_users": (
100 - x["users_crashed"] / float(x["users"]) * 100 if x["users"] else None
),
"crash_free_sessions": (
100 - x["sessions_crashed"] / float(x["sessions"]) * 100 if x["sessions"] else None
),
"total_users": x["users"],
"total_sessions": x["sessions"],
"sessions_crashed": x["sessions_crashed"],
"sessions_errored": x["sessions_errored"],
"has_health_data": True,
}
if health_stats_period:
rp["stats"] = {
health_stats_period: _make_stats(stats_start, stats_rollup, stats_buckets)
}
rv[x["project_id"], x["release"]] = rp
missing_releases.discard((x["project_id"], x["release"]))
# Add releases without data points
if missing_releases:
# If we're already looking at a 90 day horizont we don't need to
# fire another query, we can already assume there is no data.
if summary_stats_period != "90d":
has_health_data = check_has_health_data(missing_releases)
else:
has_health_data = ()
for key in missing_releases:
rv[key] = {
"duration_p50": None,
"duration_p90": None,
"crash_free_users": None,
"crash_free_sessions": None,
"total_users": 0,
"total_sessions": 0,
"sessions_crashed": 0,
"sessions_errored": 0,
"has_health_data": key in has_health_data,
}
if health_stats_period:
rv[key]["stats"] = {
health_stats_period: _make_stats(stats_start, stats_rollup, stats_buckets)
}
# Fill in release adoption
release_adoption = get_release_adoption(project_releases, environments)
for key in rv:
adoption_info = release_adoption.get(key) or {}
rv[key]["adoption"] = adoption_info.get("adoption")
rv[key]["total_users_24h"] = adoption_info.get("users_24h")
rv[key]["total_sessions_24h"] = adoption_info.get("sessions_24h")
if health_stats_period:
for x in raw_query(
dataset=Dataset.Sessions,
selected_columns=["release", "project_id", "bucketed_started", stat],
groupby=["release", "project_id", "bucketed_started"],
rollup=stats_rollup,
start=stats_start,
conditions=conditions,
filter_keys=filter_keys,
)["data"]:
time_bucket = int(
(parse_snuba_datetime(x["bucketed_started"]) - stats_start).total_seconds()
/ stats_rollup
)
rv[x["project_id"], x["release"]]["stats"][health_stats_period][time_bucket][1] = x[
stat
]
return rv
def get_crash_free_breakdown(project_id, release, start, environments=None):
filter_keys = {"project_id": [project_id]}
conditions = [["release", "=", release]]
if environments is not None:
conditions.append(["environment", "IN", environments])
now = datetime.now(pytz.utc)
def _query_stats(end):
row = raw_query(
dataset=Dataset.Sessions,
selected_columns=["users", "users_crashed", "sessions", "sessions_crashed"],
end=end,
start=start,
conditions=conditions,
filter_keys=filter_keys,
)["data"][0]
return {
"date": end,
"total_users": row["users"],
"crash_free_users": 100 - row["users_crashed"] / float(row["users"]) * 100
if row["users"]
else None,
"total_sessions": row["sessions"],
"crash_free_sessions": 100 - row["sessions_crashed"] / float(row["sessions"]) * 100
if row["sessions"]
else None,
}
last = None
rv = []
for offset in (
timedelta(days=1),
timedelta(days=2),
timedelta(days=7),
timedelta(days=14),
timedelta(days=30),
):
item_start = start + offset
if item_start > now:
if last is None or (item_start - last).days > 1:
rv.append(_query_stats(now))
break
rv.append(_query_stats(item_start))
last = item_start
return rv
def get_project_release_stats(project_id, release, stat, rollup, start, end, environments=None):
assert stat in ("users", "sessions")
# since snuba end queries are exclusive of the time and we're bucketing to
# a full hour, we need to round to the next hour since snuba is exclusive
# on the end.
end = to_datetime((to_timestamp(end) // DATASET_BUCKET + 1) * DATASET_BUCKET)
filter_keys = {"project_id": [project_id]}
conditions = [["release", "=", release]]
if environments is not None:
conditions.append(["environment", "IN", environments])
buckets = int((end - start).total_seconds() / rollup)
stats = _make_stats(start, rollup, buckets, default=None)
totals = {stat: 0, stat + "_crashed": 0, stat + "_abnormal": 0, stat + "_errored": 0}
for rv in raw_query(
dataset=Dataset.Sessions,
selected_columns=[
"bucketed_started",
stat,
stat + "_crashed",
stat + "_abnormal",
stat + "_errored",
"duration_quantiles",
],
groupby=["bucketed_started"],
start=start,
end=end,
rollup=rollup,
conditions=conditions,
filter_keys=filter_keys,
)["data"]:
ts = parse_snuba_datetime(rv["bucketed_started"])
bucket = int((ts - start).total_seconds() / rollup)
stats[bucket][1] = {
stat: rv[stat],
stat + "_crashed": rv[stat + "_crashed"],
stat + "_abnormal": rv[stat + "_abnormal"],
stat + "_errored": rv[stat + "_errored"] - rv[stat + "_crashed"],
"duration_p50": _convert_duration(rv["duration_quantiles"][0]),
"duration_p90": _convert_duration(rv["duration_quantiles"][1]),
}
# Session stats we can sum up directly without another query
# as the data becomes available.
if stat == "sessions":
for k in totals:
totals[k] += rv[k]
for idx, bucket in enumerate(stats):
if bucket[1] is None:
stats[idx][1] = {
stat: 0,
stat + "_crashed": 0,
stat + "_abnormal": 0,
stat + "_errored": 0,
"duration_p50": None,
"duration_p90": None,
}
# For users we need a secondary query over the entire time range
if stat == "users":
rows = raw_query(
dataset=Dataset.Sessions,
selected_columns=["users", "users_crashed", "users_abnormal", "users_errored"],
start=start,
end=end,
conditions=conditions,
filter_keys=filter_keys,
)["data"]
if rows:
rv = rows[0]
totals = {
"users": rv["users"],
"users_crashed": rv["users_crashed"],
"users_abnormal": rv["users_abnormal"],
"users_errored": rv["users_errored"] - rv["users_crashed"],
}
return stats, totals
| 2.484375 | 2 |
nanome_matryx/menus/CreationsMenu.py | nanome-ai/plugin-matryx | 0 | 12788321 | <filename>nanome_matryx/menus/CreationsMenu.py<gh_stars>0
from functools import partial
import nanome
import utils
from nanome.util import Logs
class CreationsMenu():
def __init__(self, plugin, fth_menu, on_close):
self._plugin = plugin
menu = nanome.ui.Menu.io.from_json('menus/json/my_creations.json')
menu.register_closed_callback(on_close)
self._menu = menu
self._creation_menu = nanome.ui.Menu.io.from_json('menus/json/creation.json')
self._creation_menu.register_closed_callback(on_close)
menu.root.find_node('First To Hash').add_child(fth_menu._menu.root)
self._commit_list = menu.root.find_node('Commit List').get_content()
self._fth_menu = fth_menu
self._fth_menu.use_as_component()
self._prefab_commit_item = menu.root.find_node('Prefab Commit Item')
def populate_my_creations(self):
commits = self._plugin._cortex.get_commits(self._plugin._account.address)
self._commit_list.items = []
for commit in commits:
clone = self._prefab_commit_item.clone()
clone.find_node('Label').get_content().text_value = 'Commit ' + commit['hash'][2:10]
btn = clone.find_node('View').get_content()
btn.register_pressed_callback(partial(self.open_creation_menu, commit['hash']))
btn = clone.find_node('Submit').get_content()
callback = partial(self._plugin._menu_tournament.open_submit_menu, commit['hash'])
btn.register_pressed_callback(callback)
self._commit_list.items.append(clone)
def open_my_creations(self, button):
self.populate_my_creations()
self._plugin.open_menu(self._menu)
def open_create_submission(self, button):
self.populate_my_creations()
self._fth_menu.display_selected(True, button)
self._plugin.open_menu(self._menu)
def open_creation_menu(self, commit_hash, button):
commit = self._plugin._cortex.get_commit(commit_hash)
# TODO: Why is this crashing?
address = self._creation_menu.root.find_node('Address').get_content()
address.text_value = 'Commit ' + commit['hash'][2:10]
author = self._creation_menu.root.find_node('Author').get_content()
author.text_value = 'by ' + utils.short_address(commit['owner'])
submit_time = self._creation_menu.root.find_node('Time').get_content()
submit_time.text_value = utils.timestamp_to_date(commit['timestamp'])
child_list = self._creation_menu.root.find_node('Children List').get_content()
child_list.items = []
btn_view_files = self._creation_menu.root.find_node('View Files').get_content()
callback = partial(self._plugin._menu_files.load_files, commit['ipfsContent'])
btn_view_files.register_pressed_callback(callback)
commit['children'] = [commit] * 10
for child in commit['children']:
item = nanome.ui.LayoutNode()
btn = item.add_new_button()
btn.set_all_text(child['hash'][2:10])
btn.register_pressed_callback(partial(self.open_creation_menu, child['hash']))
child_list.items.append(item)
self._plugin.open_menu(self._creation_menu) | 2.078125 | 2 |
self-learning/based/00000023.py | vladspirin/python-learning | 1 | 12788322 | # from collections import ChainMap
# food_types = {'Vegetables': 15, 'Dairy': 20, 'Meat': 3, 'Cereals': 9, 'Fruits': 11, 'Fish': 7}
# countries = {'USA': 25, 'Australia': 15, 'Canada': 15, 'France': 6, 'India': 4}
# discount = {'gold': 20, 'regular': 10}
# chain = ChainMap(food_types, countries)
# food_types['Sweets'] = 10
# # some missing lines
# countries['USA'] = 35
# chain = chain.new_child(discount)
# print(chain)
# def range_sum(numbers, start, end):
# return sum([x for x in numbers if start <= x <= end])
# input_numbers = [int(i) for i in input().split()]
# a, b = map(int, input().split())
# print(range_sum(input_numbers, a, b))
# passwords = input().split()
# # passwords = ['<PASSWORD>', '<PASSWORD>', '<PASSWORD>', '<PASSWORD>', '<PASSWORD>']
# passwords.sort(key=len)
# for i in passwords:
# print(i, len(i))
from datetime import datetime
def get_weekday(datetime_obj):
d = datetime.strptime(datetime_obj, "%Y-%m-%d")
return d.strftime("%A")
print(get_weekday('2019-12-31'))
# JetBrains Academy solution
# def get_weekday(datetime_obj):
# return datetime_obj.strftime("%A")
def get_release_date(release_str):
s = release_str.replace("Day of release: ", "")
release = datetime.strptime(s, "%d %B %Y")
return release.strftime("%Y-%m-%d %H:%M:%S")
print(get_release_date("Day of release: 4 July 2019")) | 3.421875 | 3 |
flask/test/patches.py | spaudanjo/boxtribute | 0 | 12788323 | from functools import wraps
from unittest.mock import patch
from auth import get_user_token_string
def mock_decorator(f):
"""Fake decorator for mocking other decorators."""
@wraps(f)
def decorated_function(*args, **kwargs):
return f(*args, **kwargs)
return decorated_function
def mock_auth_test(test_for, **kwargs):
"""Fake auth function for testing"""
return True
def mock_function_that_does_nothing(var):
return
get_auth_string_patch = patch(
"boxwise_flask.auth_helper.get_auth_string_from_header", get_user_token_string
)
requires_auth_patch = patch("boxwise_flask.auth_helper.requires_auth", mock_decorator)
authorization_test_patch = patch(
"boxwise_flask.auth_helper.authorization_test", mock_auth_test
)
add_user_to_request_context_patch = patch(
"boxwise_flask.auth_helper.add_user_to_request_context",
mock_function_that_does_nothing,
)
| 2.90625 | 3 |
vnpy/api/lhang/__init__.py | firekay/vnpy | 5 | 12788324 | # encoding: UTF-8
from vnlhang import LhangApi | 0.960938 | 1 |
python/fuzzy_classification/classifiers/RandomFuzzyTree.old.py | oljubuncic1/fuzzy-classification | 0 | 12788325 | import numpy as np
from math import log, sqrt, ceil
import random
import string
from copy import copy
import pyximport
from tabulate import tabulate
pyximport.install()
from ..util import math_functions
import matplotlib.pyplot as plt
import textwrap
from textwrap import dedent
from multiprocessing import Pool
from multiprocessing.pool import ThreadPool
from joblib import Parallel, delayed
class FuzzyNode:
feature = None
is_terminal=None
classification=None
# __slots__ = ['is_terminal',
# 'classification',
# 'feature'
# 'partitioning']
class FuzzyPartitioning:
def __init__(self):
self.partitions = []
self.gain = None
# __slots__ = ['partitions', 'gain']
class FuzzyPartition:
__slots__ = ['f', 'node', 'properties', 'ranges']
class FuzzySetProperties:
__slots__ = ['cardinality',
'entropy',
'data',
'memberships']
# noinspection PyAttributeOutsideInit,PyPropertyAccess,PyUnresolvedReferences
class RandomFuzzyTree:
def __init__(self,
n_jobs=1,
p="sqrt",
terminal_n_threshold=10,
categorical_features=[],
a_cut = 0.5,
test_generation_file=None,
test_indendation_level=1):
self.test_generation_file = test_generation_file
self.test_cases_generated = 0
self.n_jobs = n_jobs
self.p = p
self.is_fit = False
self.terminal_n_threshold = terminal_n_threshold
self.categorical_features = categorical_features
self.a_cut = a_cut
self.test_indentation_level = test_indendation_level
def fit(self, data, ranges, copy_data=False, classes=(1, 2)):
self.classes = classes
if copy_data:
data = np.copy(data)
self.ranges = ranges
self.n_feature = self.count_features(data)
if self.p == "sqrt":
self.p = ceil(sqrt(self.n_feature))
elif self.p == "log":
self.p = ceil(log(self.n_feature, 2))
elif self.p == "all":
self.p = self.n_feature
tree = self.build_tree(data, np.array([1.0 for d in data]))
self.root = tree
self.is_fit = True
def predict(self, x):
assert self.is_fit
memberships = self.predict_memberships(x)
return max(memberships)
def predict_memberships(self, x):
memberships = dict([(c, 0) for c in self.classes])
self.forward_pass(memberships, x, self.root)
return memberships
def score(self, data):
correct = 0
for x in data:
if self.predict(x[:-1]) == x[-1]:
correct += 1
return correct / data.shape[0]
def build_tree(self, data, memberships, lvl=0, ranges=None):
if ranges == None:
ranges = self.ranges
# print("\t\t Bulting tree lvl %d" % (lvl + 1) )
regular_features = self.get_regular_features(data)
if len(regular_features) != 0:
node = self.select_partitioning(data, memberships, regular_features, ranges)
else:
node = self.generate_leaf(data, memberships)
if node.is_terminal or self.is_terminal(node, data, memberships):
node.is_terminal = True
node.classification = self.classification(data, memberships)
else:
for p in node.partitioning.partitions:
next_ranges = copy(ranges)
next_ranges[node.feature] = p.ranges[node.feature]
p.node = self.build_tree(p.properties.data,
p.properties.memberships,
lvl + 1,
next_ranges)
return node
def generate_leaf(self, data, memberships):
node = FuzzyNode()
node.is_terminal = True
node.classification = self.classification(data, memberships)
return node
def select_partitioning(self, data, memberships, regular_features, ranges):
node = FuzzyNode()
features = np.random.choice(regular_features,
min(self.p, len(regular_features)),
replace=False)
feature_partitionings = {}
for feature in features:
feature_partitionings[feature] = \
self.best_partitioning(feature, data, memberships, ranges)
node.feature = max(feature_partitionings,
key=lambda x: feature_partitionings[x].gain)
node.partitioning = feature_partitionings[node.feature]
node.partitioning.gain = self._fuzzy_entropy(data, memberships)# + node.partitioning.gain
return node
def get_regular_features(self, data):
regular_features = []
for i in range(len(self.ranges)):
curr_range = self.ranges[i]
inds = np.logical_and(data[:, i] != curr_range[0], data[:, i] != curr_range[1]).nonzero()[0]
if curr_range[0] != curr_range[1] and inds.shape[0] != 0:
regular_features.append(i)
return regular_features
def is_terminal(self, node, data, memberships):
if memberships.shape[0] == 0:
return True
empty_partitions = 0
for partition in node.partitioning.partitions:
if partition.properties.memberships.shape[0] <= 1:
empty_partitions += 1
if empty_partitions >= 2:
return True
data_classes = data[:, -1]
all_same = True
for i in range(1, data_classes.shape[0]):
if int(data_classes[i]) != int(data_classes[0]):
all_same = False
break
if all_same:
return True
if abs(node.partitioning.gain) <= 0.000001:
return True
else:
return False
def forward_pass(self,
result_memberships,
x,
node,
membership=1):
if node.is_terminal:
for c in self.classes:
result_memberships[c] += node.classification[c] * membership
else:
for partition in node.partitioning.partitions:
next_membership = membership * partition.f(x[node.feature])
next_node = partition.node
self.forward_pass(result_memberships,
x,
next_node,
next_membership)
@staticmethod
def count_features(data):
return data.shape[1] - 1
def classification(self, data, memberships):
classification_val = {}
for c in self.classes:
inds = (data[:, -1] == c).nonzero()[0]
classification_val[c] = np.sum(memberships[inds])
return classification_val
def best_partitioning(self, feature, data, memberships, ranges):
if feature in self.categorical_features:
max_partitioning = FuzzyPartitioning()
max_category = int(self.ranges[feature][1])
min_category = int(self.ranges[feature][0])
for category in range(min_category, max_category + 1):
partition = FuzzyPartition()
partition.properties = FuzzySetProperties()
def f(x):
if int(x) == category:
return 1
else:
return 0
partition.f = f
inds = (data[:, feature] == category).nonzero()[0]
partition.properties.data = data[inds, :]
max_partitioning.partitions.append(partition)
self.set_properties(max_partitioning.partitions,
data,
feature,
memberships)
max_partitioning.gain = \
self.gain(max_partitioning.partitions, memberships)
else:
points = np.unique(data[:, feature])
L, U = self.ranges[feature]
point_partitionings = {}
regular_point_occured = False
last_point = None
meaningful_length = (U - L) / 10
for p in points:
if last_point is None or p - last_point > meaningful_length:
if p != L and p != U:
curr_partitioning = self.partitioning(data, feature, p, memberships, ranges)
if self.count_zero(curr_partitioning) < 2:
regular_point_occured = True
point_partitionings[p] = \
curr_partitioning
last_point = p
if not regular_point_occured:
midpoint = L + (U - L) / 2
max_partitioning = self.partitioning(data,
feature,
midpoint,
memberships,
ranges)
max_partitioning.midpoint = midpoint
else:
max_partitioning_key = max(point_partitionings,
key=lambda x: point_partitionings[x].gain)
max_partitioning = point_partitionings[max_partitioning_key]
max_partitioning.midpoint = max_partitioning_key
self.print_partitioning(max_partitioning, data, feature, ranges)
return max_partitioning
def count_zero(self, partitioning):
cnt = 0
for part in partitioning.partitions:
if part.properties.entropy == 0:
cnt += 1
return cnt
def partitioning(self, data, feature, p, memberships, ranges):
part = FuzzyPartitioning()
L, U = self.ranges[feature]
W_left = 2 * (p - L)
W_middle_left = (p - L)
W_middle_right = (U - p)
W_right = 2 * (U - p)
# TODO: generalize to more
left_partition = FuzzyPartition()
left_partition.f = math_functions.triangular(L,
W_left)
left_partition.ranges = copy(ranges)
left_partition.ranges[feature] = L, p
left_partition.properties = []
middle_partition = FuzzyPartition()
middle_partition.f = \
math_functions.composite_triangular(p,
W_middle_left,
W_middle_right)
middle_partition.ranges = copy(ranges)
middle_partition.ranges[feature] = L, U
middle_partition.properties = []
right_partition = FuzzyPartition()
right_partition.f = math_functions.triangular(U,
W_right)
right_partition.ranges = copy(ranges)
right_partition.ranges[feature] = p, U
right_partition.properties = []
part.partitions = [left_partition,
middle_partition,
right_partition]
self.set_properties(part.partitions, data, feature, memberships)
part.gain = self.gain(part.partitions, memberships)
return part
def print_partitioning(self, partitioning, data, feature, ranges):
rng = ranges[feature]
data = data[data[:, feature].argsort()]
data_table = []
for d in data:
data_arr = [d[-1]]
for partition in partitioning.partitions:
data_arr.append(round(partition.f(d[feature]), 2))
data_table.append(data_arr)
print(tabulate(data_table,
headers=['Class', 'First', 'Second', 'Third'],
tablefmt='orgtbl'))
for partition in partitioning.partitions:
partition_sums = {}
for d in data:
for c in self.classes:
if d[-1] in partition_sums:
if partition.f(d[feature]) >= 0.5:
partition_sums[d[-1]] += partition.f(d[feature])
else:
partition_sums[d[-1]] = 0
print(partition_sums)
print("Gain: ", partitioning.gain)
xs = np.arange(rng[0], rng[1], 0.05).tolist()
for partition in partitioning.partitions:
ys = []
for x in xs:
ys.append(partition.f(x))
plt.plot(xs, ys, color="g")
xs = []
ys = []
zs = []
for d in data:
xs.append(d[feature])
ys.append(0.5)
zs.append(d[-1])
plt.scatter(xs, ys, c=zs)
plt.show()
def set_properties(self, partitions, data, feature, memberships):
for partition in partitions:
prop = self._fuzzy_set_properties(data,
feature,
partition,
memberships)
partition.properties = prop
def gain(self, partitions, memberships):
data_cardinality = np.sum(memberships)
if len(partitions) == 0:
raise ValueError("Empty partitions")
properties = [part.properties for part in partitions]
gain_value = 0
for prop in properties:
gain_value -= (prop.cardinality / data_cardinality) * prop.entropy
return gain_value
def _fuzzy_set_properties(self, data, feature, partition, memberships):
if data.shape.__contains__(0):
raise ValueError("Empty array")
membership_f = np.vectorize(partition.f)
data_at_feature = np.copy(data[:, feature])
set_memberships = membership_f(data_at_feature)
set_memberships = np.multiply(memberships, set_memberships)
non_zero_inds = (set_memberships >= self.a_cut).nonzero()[0]
set_memberships = set_memberships[non_zero_inds]
set_data = data[non_zero_inds, :]
cardinality = np.sum(set_memberships)
entropy = self._fuzzy_entropy(set_data,
set_memberships,
cardinality)
properties = FuzzySetProperties()
properties.cardinality = cardinality
properties.entropy = entropy
non_zero_inds = (set_memberships >= self.a_cut).nonzero()[0]
set_data = data[non_zero_inds, :]
set_memberships = set_memberships[non_zero_inds]
properties.data = set_data
properties.memberships = set_memberships
return properties
def _fuzzy_entropy(self, data, memberships, cardinality=None):
if self.should_generate_tests(data):
self.generate_fuzzy_entropy_test(data,
memberships,
cardinality)
if data.shape.__contains__(0):
return 0
# raise ValueError("Empty array")
entropy = 0
if cardinality is None:
cardinality = np.sum(memberships)
if cardinality != 0:
for c in self.classes:
inds = (data[:, -1] == c).nonzero()[0]
memberships_at_inds = memberships[inds]
proba = np.sum(memberships_at_inds) / cardinality
if proba != 0:
entropy -= proba * log(proba, 2)
return entropy
def should_generate_tests(self, data):
return self.test_generation_file is not None and \
20 < data.shape[0] < 50 and \
self.test_cases_generated < 3
def generate_fuzzy_entropy_test(self, data, memberships, cardinality):
self.test_cases_generated += 1
test_cases_file = open(self.test_generation_file, "a")
print("\t\tGenerating tests")
data = data[:, (-2, -1)].tolist()
memberships = memberships.tolist()
indentation = [" " for i in range(self.test_indentation_level)]
indentation = "".join(indentation)
print("", file=test_cases_file)
test_id = ''.join(random.choice(string.ascii_uppercase + string.digits) for _ in range(5))
print("%sdef testFuzzyEntropy_generated_%s(self):" % (indentation, test_id), file=test_cases_file)
wrapper = textwrap.TextWrapper(initial_indent="%s " % indentation, width=80,
subsequent_indent=' ' * 24)
data_str = "data = np.array(%s)" % (data)
print(wrapper.fill(data_str), file=test_cases_file)
memberships_str = "memberships= np.array(%s)" % (memberships)
print(wrapper.fill(memberships_str), file=test_cases_file)
print("%s cardinality = %s" % (indentation, cardinality), file=test_cases_file)
result = "self.tree._fuzzy_entropy(data, memberships, cardinality)"
print("%s self.assertAlmostEqual(%s, 0, 2)" % (indentation, result), file=test_cases_file)
print("", file=test_cases_file)
test_cases_file.close()
def __str__(self):
raise NotImplementedError()
| 2.09375 | 2 |
uri/1040_media_3.py | thekilian/Python-pratica | 0 | 12788326 | '''
Leia quatro números (N1, N2, N3, N4), cada um deles com uma casa decimal, correspondente às quatro notas de um aluno. Calcule a média com pesos 2, 3, 4 e 1, respectivamente, para cada uma destas notas e mostre esta média acompanhada pela mensagem "Media: ". Se esta média for maior ou igual a 7.0, imprima a mensagem "Aluno aprovado.". Se a média calculada for inferior a 5.0, imprima a mensagem "Aluno reprovado.". Se a média calculada for um valor entre 5.0 e 6.9, inclusive estas, o programa deve imprimir a mensagem "Aluno em exame.".
No caso do aluno estar em exame, leia um valor correspondente à nota do exame obtida pelo aluno. Imprima então a mensagem "Nota do exame: " acompanhada pela nota digitada. Recalcule a média (some a pontuação do exame com a média anteriormente calculada e divida por 2). e imprima a mensagem "Aluno aprovado." (caso a média final seja 5.0 ou mais ) ou "Aluno reprovado.", (caso a média tenha ficado 4.9 ou menos). Para estes dois casos (aprovado ou reprovado após ter pego exame) apresente na última linha uma mensagem "Media final: " seguido da média final para esse aluno.
**Input**
A entrada contém quatro números de ponto flutuante correspendentes as notas dos alunos.
**Output**
Todas as respostas devem ser apresentadas com uma casa decimal. As mensagens devem ser impressas conforme a descrição do problema. Não esqueça de imprimir o enter após o final de cada linha, caso contrário obterá "Presentation Error".
| Input Sample | Output Samples |
| --------------- | ------------------------ |
| 2.0 4.0 7.5 8.0 | Media: 5.4 |
| 6.4 | Aluno em exame. |
| | Nota do exame: 6.4 |
| | Aluno aprovado. |
| | Media final: 5.9 |
| 2.0 6.5 4.0 9.0 | Media: 4.8 |
| | Aluno reprovado. |
| 9.0 4.0 8.5 9.0 | Media: 7.3 |
| | Aluno aprovado. |
'''
'''
# <NAME>
Mp = [(N1 x P1) + (N2 x P2) + (N3 x P3) + ... (Nx x Px)] ÷ (P1 + P2 + P3 + ... Px)
Sendo que:
- Mp é a média ponderada (o resultado que você quer descobrir)
- N é cada valor do conjunto
- P é o peso correspondente de cada valor do conjunto.
'''
notas = input().split()
n1 = float(notas[0])
n2 = float(notas[1])
n3 = float(notas[2])
n4 = float(notas[3])
#outra forma de pegar os dados do input:
#n1, n2, n3, n4 = notas
p1 = 2
p2 = 3
p3 = 4
p4 = 1
media = ((n1 * p1) + (n2 * p2) + (n3 * p3) + (n4 * p4)) / (p1 + p2 + p3 + p4)
print("Media: {:.1f}".format(media))
if media >= 7.0:
print("Aluno aprovado.")
elif media < 5.0:
print("Aluno reprovado.")
elif media >= 5.0 and media <= 6.9:
print("Aluno em exame.")
exame = float(input())
print("Nota do exame: {:.1f}".format(exame))
new_media = (exame + media) / 2
if new_media >= 5.0:
print("Aluno aprovado.")
print("Media final: {:.1f}".format(new_media))
elif new_media <= 4.9:
print("Aluno reprovado.")
print("Media final: {:.1f}".format(new_media)) | 3.4375 | 3 |
ctrl/user_ctrl.py | k4t0mono/hashtag_analysis | 0 | 12788327 | <reponame>k4t0mono/hashtag_analysis
from models import User
from get_tweets import session, logger
class User_Ctrl():
def new_user(self, user_):
return User(
id=user_.id,
screen_name=user_.screen_name,
created_at=user_.created_at,
)
def add_user(self, user):
try:
session.add(user)
session.commit()
except Exception as e:
logger.debug(e)
session.rollback()
else:
logger.info("Added user {}:{}".format(user.id, user.screen_name))
def add_users(self, user_list):
for user in user_list:
if self.get_user(user.id):
continue
session.add(user)
session.commit()
def get_user(self, id_):
try:
return session.query(User).filter(User.id == id_).one()
except:
return None | 2.578125 | 3 |
reader/management/commands/train_data.py | AymaneZizi/dailyreader | 0 | 12788328 | from reader.models import *
from django.core.management.base import BaseCommand, CommandError
import os
from topia.termextract import extract
import naive_bayes_classifier
from constants.app_constants import *
def get_pcategory(category_name,pcategory_objects):
other_category = None
category = None
for element in pcategory_objects:
if element.name == category_name:
category= element
if element.name== "OTHER_KEY":
other_category=element
if category:
return category
else:
return other_category
class Command(BaseCommand):
def handle(self,*args, **options):
lines_documents=[]
# delete features, pcategory and articles
Features.objects.all().delete()
PCategory.objects.all().delete()
Article.objects.all().delete()
with open(os.getcwd()+"/reader/management/commands/"+"training_data.csv","r") as file:
lines=file.readlines()
list_training_data=[]
for line in lines:
data=line.rsplit(",",1)
document=naive_bayes_classifier.Document()
document.text=data[0]
document.category_name=data[1]
list_training_data.append(document)
training = naive_bayes_classifier.TrainingData(list_training_data)
training.train_data()
dict_categories = training.get_all_categories()
dict_features = training.get_all_features()
listCategoriesModel=[0]*NO_OF_PCATEGORY
for key in dict_categories:
pcategory=PCategory()
pcategory.name=dict_categories[key].name
listCategoriesModel[dict_categories[key].index]=pcategory
listFeaturesModel=[]
for key in dict_features:
for key_category in dict_categories:
category=dict_categories[key_category]
feature=Features()
feature.name=key
index = category.index
feature.pcategory= listCategoriesModel[index]
category_name=feature.pcategory.name
#print category_name
if str(category_name) in dict_features[key].dict_category_probability:
feature.probability=dict_features[key].dict_category_probability[str(category_name)]
else:
feature.probability=0
if not feature.pcategory:
print index
print listCategoriesModel[index]
listFeaturesModel.append(feature)
PCategory.objects.bulk_create(listCategoriesModel)
pcategory_objects = PCategory.objects.all()
for element in listFeaturesModel:
category_name = element.pcategory.name
#print category_name
pcategory = get_pcategory(category_name,pcategory_objects )
element.pcategory=pcategory
pcategory=PCategory()
pcategory.name=OTHER_KEY
pcategory.save()
Features.objects.bulk_create(listFeaturesModel)
| 2.1875 | 2 |
Preprocessing Scripts/createS-4Split.py | Zero-Shot/Zero-Shot-Learning | 24 | 12788329 | <reponame>Zero-Shot/Zero-Shot-Learning
import csv
import sys
import warnings
import numpy as np
def get_number_of_words(txt_file):
with open(txt_file) as file:
return sum(1 for _ in file)
def get_number_of_columns(csv_file):
with open(csv_file) as file:
reader = csv.reader(file, delimiter=',', skipinitialspace=True)
return len(next(reader))
def create_alphabet_dictionary(csv_file):
alphabet_dict = dict()
with open(csv_file) as file:
reader = csv.reader(file, delimiter=',', skipinitialspace=True)
for index, line in enumerate(reader):
alphabet_dict[line[0]] = index
return alphabet_dict
def write_s_file(write_file, matrix, words):
with open(write_file, "w+") as file:
for row_number, row in enumerate(matrix):
file.write(words[row_number] + "," + ",".join(np.char.mod('%f', row)) + "\n")
def half_string(string):
print(string)
half_1 = string[: int(len(string) / 2)]
half_2 = string[int(len(string) / 2):]
return half_1, half_2
def quarter_string(string):
print(string)
first_half, second_half = half_string(string)
print(first_half)
one, two = half_string(first_half)
three, four = half_string(second_half)
return [one, two, three, four]
def __main__():
if len(sys.argv) < 4:
exit("Not enough arguments given, needs alphabet csv, label/word txt and output s matrix")
alphabet_csv = sys.argv[1]
word_txt = sys.argv[2]
s_matrix_csv = sys.argv[3]
number_of_words = get_number_of_words(word_txt)
alphabet_dict = create_alphabet_dictionary(alphabet_csv)
csv_num_cols = get_number_of_columns(alphabet_csv)
numpy_csv = np.genfromtxt(alphabet_csv, dtype=float, delimiter=",", filling_values=1)
# First column is letter count, then 4 times the features
s_matrix = np.zeros((number_of_words, csv_num_cols * 4 - 3))
word_list = []
with open(word_txt, "r") as file:
for word_index, line in enumerate(file):
split_line = line.split(maxsplit=1)
class_index = split_line[0]
word = split_line[1].rstrip()
word_list.append(word)
numpy_word = None
word_parts = quarter_string(word)
for part_index, part in enumerate(word_parts):
# 0,12 - 12, 23 - 23, 34 - 34,45
numpy_start = 1
if part_index == 0:
start_column = 1
end_column = csv_num_cols
numpy_start = 0
elif part_index == 1:
start_column = csv_num_cols
end_column = (csv_num_cols * 2) - 1
elif part_index == 2:
start_column = (csv_num_cols * 2) - 1
end_column = (csv_num_cols * 3) - 2
elif part_index == 3:
start_column = (csv_num_cols * 3) - 2
end_column = (csv_num_cols * 4) - 3
print(start_column, end_column)
for letter in part:
if letter is '\n':
continue
try:
letter_index = alphabet_dict[letter]
print(len(s_matrix[word_index][start_column:end_column]),
len(numpy_csv[letter_index][numpy_start:]))
s_matrix[word_index][start_column:end_column] += numpy_csv[letter_index][1:]
s_matrix[word_index][0] += 1
except:
warnings.warn("Key '%s' not found in dictionary" % letter)
divider = s_matrix[word_index][0]
s_matrix[word_index][0] = 1 / divider
for col_index in range(1, csv_num_cols * 4 - 3):
s_matrix[word_index][col_index] = s_matrix[word_index][col_index] / divider
write_s_file(s_matrix_csv, s_matrix, word_list)
| 3.140625 | 3 |
utils.py | Li357/instrument-classifier | 0 | 12788330 | <gh_stars>0
from math import ceil
def print_progressbar(progress, msg=''):
print('\r{0}: [{1:50s}] {2:.1f}%'.format(msg, '#' * int(ceil(progress * 50)), progress * 100),
end='',
flush=True)
| 2.78125 | 3 |
py/soa/token.py | tslnc04/soa | 0 | 12788331 | <reponame>tslnc04/soa
"""
Copyright 2017 <NAME>
token.py contains the code for token types and some basic character tests
"""
ERROR = 0
EOF = 1
EOL = 2
REGISTER = 3
INT = 4
SET = 5
OUT = 6
ADD = 7
EXIT = 8
IF = 9
FI = 10
TOKEN_NAMES = [
"error",
"eof",
"eol",
"register",
"int",
"set",
"out",
"add",
"exit",
"if",
"fi"
]
def is_eol(char):
"is_eol tests if a character denotes an end of line"
return char == "\n" or char == "\r"
def is_digit(char):
"is_digit tests is a given character is a numerical digit"
return (ord(char) in [(ord("0") + x) for x in range(10)] or ord(char) == ord("-"))
def print_tokens(tokens):
"print_tokens is a prettier way of printing the tokens array"
for token in tokens:
print("POS", token["Pos"], "TYP", token["Typ"], "VAL", token["Val"])
| 3.078125 | 3 |
examples/wsecho.py | VladimirKuzmin/werkzeug | 4,200 | 12788332 | <reponame>VladimirKuzmin/werkzeug
"""Shows how you can implement a simple WebSocket echo server using the
wsproto library.
"""
from werkzeug.exceptions import InternalServerError
from werkzeug.serving import run_simple
from werkzeug.wrappers import Request
from werkzeug.wrappers import Response
from wsproto import ConnectionType
from wsproto import WSConnection
from wsproto.events import AcceptConnection
from wsproto.events import CloseConnection
from wsproto.events import Message
from wsproto.events import Ping
from wsproto.events import Request as WSRequest
from wsproto.events import TextMessage
from wsproto.frame_protocol import CloseReason
@Request.application
def websocket(request):
# The underlying socket must be provided by the server. Gunicorn and
# Werkzeug's dev server are known to support this.
stream = request.environ.get("werkzeug.socket")
if stream is None:
stream = request.environ.get("gunicorn.socket")
if stream is None:
raise InternalServerError()
# Initialize the wsproto connection. Need to recreate the request
# data that was read by the WSGI server already.
ws = WSConnection(ConnectionType.SERVER)
in_data = b"GET %s HTTP/1.1\r\n" % request.path.encode("utf8")
for header, value in request.headers.items():
in_data += f"{header}: {value}\r\n".encode()
in_data += b"\r\n"
ws.receive_data(in_data)
running = True
while True:
out_data = b""
for event in ws.events():
if isinstance(event, WSRequest):
out_data += ws.send(AcceptConnection())
elif isinstance(event, CloseConnection):
out_data += ws.send(event.response())
running = False
elif isinstance(event, Ping):
out_data += ws.send(event.response())
elif isinstance(event, TextMessage):
# echo the incoming message back to the client
if event.data == "quit":
out_data += ws.send(
CloseConnection(CloseReason.NORMAL_CLOSURE, "bye")
)
running = False
else:
out_data += ws.send(Message(data=event.data))
if out_data:
stream.send(out_data)
if not running:
break
in_data = stream.recv(4096)
ws.receive_data(in_data)
# The connection will be closed at this point, but WSGI still
# requires a response.
return Response("", status=204)
if __name__ == "__main__":
run_simple("localhost", 5000, websocket)
| 2.734375 | 3 |
scripts/mkdiff.py | charlie45000/corunners-example | 0 | 12788333 | #! /usr/bin/env python3
import argparse
from pathlib import Path
import json
import sys
from scriptutil import calc
C0_OFF = "Task: C0, Corunner: OFF"
C0_ON = "Task: C0, Corunner: ON"
C1_OFF = "Task: C1, Corunner: OFF"
C1_ON = "Task: C1, Corunner: ON"
def getopts(argv):
parser = argparse.ArgumentParser()
parser.add_argument("file1", type=Path)
parser.add_argument("file2", type=Path)
return parser.parse_args(argv[1:])
def gen_stats(data):
text = r"""
\begin{tabular}{ |c|r|r|r||r|r|r| }\hline
& \multicolumn{3}{c||}{\textbf{Core 1}} & \multicolumn{3}{c|}{\textbf{Core 2}} \\\hline
\textbf{EA} & \textbf{max(a)} \textit{(ms)} & \textbf{max(b)} \textit{(ms)} & %
$\bm{R(a, b)}$ \textit{(\%)}& %
\textbf{max(c)} \textit{(ms)} & \textbf{max(d)} \textit{(ms)} & %
$\bm{R(c, d)}$ \textit{(\%)} \\\hline
"""
for ea, info in sorted(data.items()):
values = {
C0_OFF: 0.0,
C0_ON: 0.0,
C1_OFF: 0.0,
C1_ON: 0.0,
}
for value, sample in zip(info["values"], info["sample"]):
assert sample in values, f"Unknown sample {sample}"
values[sample] = max(values[sample], value)
r0 = calc(values[C0_OFF], values[C0_ON])
r1 = calc(values[C1_OFF], values[C1_ON])
text += f"${ea}$ & "
text += f"{values[C0_OFF]:.3f} & {values[C0_ON]:.3f} & "
if r0 > 0.01:
text += r'\textbf{' + f"{r0:.3f} " + r'}'
else:
text += f"{r0:.3f}"
text += ' & '
text += f"{values[C1_OFF]:.3f} & {values[C1_ON]:.3f} &"
if r1 > 0.01:
text += r'\textbf{' + f"{r1:.3f} " + r'} '
else:
text += f"{r1:.3f}"
text += ' \\\\\n'
text += r"""\hline
\end{tabular}
"""
print(text)
def main(argv):
args = getopts(argv)
with open(args.file1, "r") as inp:
d1 = json.load(inp)
with open(args.file2, "r") as inp:
d2 = json.load(inp)
def collect_values(info):
values = {
C0_OFF: 0.0,
C0_ON: 0.0,
C1_OFF: 0.0,
C1_ON: 0.0,
}
for value, sample in zip(info["values"], info["sample"]):
assert sample in values, f"Unknown sample {sample}"
values[sample] = max(values[sample], value)
return values
text = r"""
\begin{tabular}{ |c|r|r|r||r|r|r| }\hline
& \multicolumn{3}{c||}{\textbf{Core 1}} & \multicolumn{3}{c|}{\textbf{Core 2}} \\\hline
\textbf{EA} & $\Delta_{max(a)}$ \textit{(ms)} & $\Delta_{max(b)}$ \textit{(ms)} & %
$\Delta_{R(a, b)}$ \textit{(\%)}& %
$\Delta_{max(c)}$ \textit{(ms)} & $\Delta_{max(d)}$ \textit{(ms)} & %
$\Delta_{R(c, d)}$ \textit{(\%)} \\\hline
"""
for ea in sorted(d1):
info1 = d1[ea]
info2 = d2[ea]
vals1 = collect_values(info1)
vals2 = collect_values(info2)
r0_1 = calc(vals1[C0_OFF], vals1[C0_ON])
r0_2 = calc(vals2[C0_OFF], vals2[C0_ON])
r1_1 = calc(vals1[C1_OFF], vals1[C1_ON])
r1_2 = calc(vals2[C1_OFF], vals2[C1_ON])
text += f"${ea}$ & "
text += f"{vals1[C0_OFF]-vals2[C0_OFF]:+.3f} & {vals1[C0_ON]-vals2[C0_ON]:+.3f} & "
text += f"{r0_1-r0_2:+.3f} & "
text += f"{vals1[C1_OFF]-vals2[C1_OFF]:+.3f} & {vals1[C1_ON]-vals2[C1_ON]:+.3f} & "
text += f"{r1_1-r1_2:+.3f}"
text += ' \\\\\n'
text += r"""\hline
\end{tabular}
"""
print(text)
if __name__ == "__main__":
main(sys.argv)
| 2.46875 | 2 |
backend/eaws/ergonomics_struct.py | ramp-eu/Pose_Recognition_Correction | 0 | 12788334 | <gh_stars>0
import time
ergonomics_template = {
'body_angle': {
'type': 'Float'
},
'upper_limbs_angle': {
'type': 'Float'
},
'lower_limbs_angle': {
'type': 'Float'
},
'pose_1': {
'type': 'Float'
},
'pose_3': {
'type': 'Float'
},
'pose_4': {
'type': 'Float'
},
'pose_5': {
'type': 'Float'
},
'pose_6': {
'type': 'Float'
},
'pose_7': {
'type': 'Float'
},
'pose_9': {
'type': 'Float'
},
'pose_10': {
'type': 'Float'
},
'pose_11': {
'type': 'Float'
},
'pose_12': {
'type': 'Float'
},
'pose_13': {
'type': 'Float'
},
'pose_14': {
'type': 'Float'
},
'pose_15': {
'type': 'Float'
},
'eaws_score': {
'type': 'Float'
},
'time': {
'type': 'Integer'
},
'session': {
'type': 'String'
}
}
def get_ergonomics_skeleton():
return {
'body_angle': 0.0,
'upper_limbs_angle': 0.0,
'lower_limbs_angle': 0.0,
'pose_1': 0.0,
'pose_3': 0.0,
'pose_4': 0.0,
'pose_5': 0.0,
'pose_6': 0.0,
'pose_7': 0.0,
'pose_9': 0.0,
'pose_10': 0.0,
'pose_11': 0.0,
'pose_12': 0.0,
'pose_13': 0.0,
'pose_14': 0.0,
'pose_15': 0.0,
'eaws_score': 0.0,
'time': round(time.time() * 1000),
'session': ''
}
def build_struct(data):
res = ergonomics_template.copy()
for k, v in data.items():
res[k]['value'] = v
return res
| 1.820313 | 2 |
tools/coverage_merge/merge.py | edcote/fc4sc | 30 | 12788335 | <reponame>edcote/fc4sc
"""
/******************************************************************************
Copyright 2003-2018 AMIQ Consulting s.r.l.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
******************************************************************************/
/******************************************************************************
Original Authors: <NAME> and <NAME>,
AMIQ Consulting s.r.l. (<EMAIL>)
Date: 2018-Oct-07
******************************************************************************/
"""
import os
import sys
import xml.etree.ElementTree as ET
import argparse
from fnmatch import fnmatch
sys.path.append(os.path.join(os.path.dirname(__file__), '..'))
from ucis_parser import UCIS_DB_Parser
"""
Merging steps:
1) Parse covergroup types: for each "instanceCoverages" element:
if this element does not exist in the mergeDBtree:
add this element to the mergeDBtree directly under the root element
else: goto step 2
Note: same covergroup type condition: equality of the 'moduleName' attribute
2) Parse covergroup instances: for each "covergroupCoverage/cgInstance" element:
if this element does not exist in the "covergroupCoverage" element of mergeDBtree:
add this element under the "covergroupCoverage" element
else: goto step 3
Note: same covergroup instance condition: equality of the 'name' attribute
3) Parse coverpoints: for each "coverpoint" element:
if this element does not exist in the "cgInstance" element of mergeDBtree:
raise exception: coverpoint not present in the mergeDBtree!
else: goto step 4
Note: same coverpoint condition: equality of the 'name' attribute
4) Parse bins: for each "bin" element:
if this element does not exist in the "coverpoint" element of mergeDBtree:
add this element under the "coverpoint" element
else: goto step 5
Note: same bin condition: equality of the 'name' attribute
5) Sum the bin ranges' hit counts: for each "range" element:
if this element does not exist in the "bin" element:
raise exception: bin is different than expected!
else:
add to the coverageCount
Note: same range condition: equality of the 'name' attribute
Note2: if 2 bins have the same name, but of different from, to or type attribute values => error
6) Parse crosses: for each "cross" element:
if this element does not exist in the "cgInstance" element of mergeDBtree:
raise exception: cross not present in the mergeDBtree!
else: goto step 7
Note: same cross condition: equality of the 'name' attribute
7) Parse crosses bins: for each "crossBin" element:
if this element does not exist in the "cross" element of mergeDBtree:
add this element under the "cross" element
else: goto step 8
Note: same crossBin condition: the list of index elements have the same value, in the
same order
"""
class UCIS_DB_Merger(UCIS_DB_Parser):
def pre_write_operations(self):
"""
FIXME: Known problems
1) Some Coverpoint bins can be present in one covergroup instance, but not in another.
In other words, different covergroup instances of the the same type can have bins which
might not be present in a certain coverpoint. This creates a problem when merging crosses,
as the current implementation does NOT account for this case!
2) Top level UCIS element attributes of the resulted merged DB have to be updated!
====================
parentId="200"
logicalName="string"
physicalName="string"
kind="string"
testStatus="true"
simtime="1.051732E7"
timeunit="string"
runCwd="string"
cpuTime="1.051732E7"
seed="string"
cmd="string"
args="string"
compulsory="string"
date="2004-02-14T19:44:14"
userName="string"
cost="1000.00"
toolCategory="string"
ucisVersion="string"
vendorId="string"
vendorTool="string"
vendorToolVersion="string"
sameTests="42"
comment="string"
====================
3) parse the resulted DB and change the "UCIS ID" attributes to be unique
can use an ElementTree tree walker for this task!
"""
pass
def write_merged_db(self, merged_db_path):
self.pre_write_operations()
self.mergeDBtree.write(file_or_filename = merged_db_path,
encoding = "UTF-8",
xml_declaration = True)
def process_xml(self, filename):
parseTree = ET.parse(filename)
parseRoot = parseTree.getroot()
tagstr = parseRoot.tag[-len("UCIS"):]
if tagstr != "UCIS":
return False
if self.mergeDBtree is None:
print("First UCIS XML found set as base DB:\n\t{0}\n".format(filename))
self.mergeDBtree = parseTree
self.mergeDBroot = self.mergeDBtree.getroot()
else:
print("Found XML file: {0}".format(filename))
# TODO: update exceptions to be more verbose in function parseXML
# Needed information:
# Context: full path to element which produces error on parsing
# Info: error description
# Source XML files where the element(s) is/are found
# TODO: surround by try-catch and handle thrown exceptions
self.parse_xml(parseRoot)
return True
def parse_xml(self, parseRoot):
""" Parse covergroup types """
for instanceCoverages in self.findall_ucis_children(parseRoot, "instanceCoverages"):
cgTypeNameAttrib = 'moduleName'
cgTypeName = instanceCoverages.get(cgTypeNameAttrib)
xpath_query = ".//" + self.format_et_query("instanceCoverages", cgTypeNameAttrib, cgTypeName)
xpath_query += "/" + self.format_et_query("covergroupCoverage")
# search the same element in the resulted merged database
searchElement = self.find_merge_element_by_query(xpath_query)
print("Parsing covergroup type: {0}".format(cgTypeName))
if searchElement is not None:
covergroupCoverage = self.find_ucis_element(instanceCoverages, "covergroupCoverage")
self.parse_covergroup_type(covergroupCoverage, xpath_query)
print("\n")
else:
print("Found new coverage type [{0}]".format(cgTypeName))
mergeParent = self.mergeDBroot
mergeParent.append(instanceCoverages) # add the element to the mergedDB under root element
def parse_covergroup_type(self, covergroupCoverage, parent_query):
""" Parse covergroup instance """
for cgInstance in self.findall_ucis_children(covergroupCoverage, "cgInstance"):
cgInstNameAttrib = 'name'
cgInstName = cgInstance.get(cgInstNameAttrib)
xpath_query = parent_query + "/" + self.format_et_query("cgInstance", cgInstNameAttrib, cgInstName)
# search the same element in the resulted merged database
searchElement = self.find_merge_element_by_query(xpath_query)
if searchElement is not None:
print ("\t[cgInstance] {0}".format(cgInstName))
self.parse_coverpoints(cgInstance, xpath_query)
self.parse_crosses(cgInstance, xpath_query)
else:
print("\tFound new coverage instance [{0}]".format(cgInstName))
mergeParent = self.find_merge_element_by_query(parent_query)
mergeParent.append(cgInstance) # add the element to the covergroup
def parse_coverpoints(self, cgInstance, parent_query):
""" Parse coverpoint """
for coverpoint in self.findall_ucis_children(cgInstance, "coverpoint"):
cvpNameAttrib = 'name'
cvpName = coverpoint.get(cvpNameAttrib)
xpath_query = parent_query + "/" + self.format_et_query("coverpoint", cvpNameAttrib, cvpName)
# search the same element in the resulted merged database
searchElement = self.find_merge_element_by_query(xpath_query)
print ("\t\t[coverpoint] {0}".format(cvpName))
if searchElement is not None:
self.parse_coverpoint_bins(coverpoint, xpath_query)
else:
raise ValueError("coverpoint not present in the mergeDBtree!")
def parse_coverpoint_bins(self, coverpoint, parent_query):
""" Parse bins """
for bin in self.findall_ucis_children(coverpoint, "coverpointBin"):
binNameAttrib = 'name'
binName = bin.get(binNameAttrib)
xpath_query = parent_query + "/" + self.format_et_query("coverpointBin", binNameAttrib, binName)
binMergeElement = self.find_merge_element_by_query(xpath_query)
if binMergeElement is not None:
self.merge_bin_hits(bin, binMergeElement, xpath_query)
else:
print("\t\tFound new bin [{0}]".format(binName))
mergeParent = self.find_merge_element_by_query(parent_query)
mergeParent.append(bin) # add the bin to the covergpoint
def merge_bin_hits(self, bin, binMergeElement, parent_query):
""" Sum the bin ranges' hit counts """
# merge hits for bins which are present in both the parsed DB and mergeDBtree
for range in self.findall_ucis_children(bin, "range"):
contents = self.find_ucis_element(range, "contents")
rangeHitCount = int(contents.get('coverageCount'))
xpath_query = parent_query + "/" + self.format_et_query("range")
searchElement = self.find_merge_element_by_query(xpath_query)
if searchElement is None:
raise ValueError("Range not found! Bin contents differ between mergeDBtree and parsed XML!")
sameFrom = searchElement.get('from') == range.get('from')
sameTo = searchElement.get('to') == range.get('to')
if not (sameFrom and sameTo):
raise ValueError("Range limits differ between mergeDBtree and parsed XML!")
mergeContentsElement = self.find_ucis_element(searchElement, 'contents')
parsedContentsElement = self.find_ucis_element(range, 'contents')
totalhits = int(mergeContentsElement.get('coverageCount'))
parsedHits = int(parsedContentsElement.get('coverageCount'))
totalhits += parsedHits
# NOTE: alias attribute is set in the coverpointBin element because the
# javascript gui application uses this field for showing the number of hits!
binMergeElement.set('alias', str(totalhits))
mergeContentsElement.set('coverageCount', str(totalhits))
print ("\t\t\t[bin:{1}] {0} -> {2}".format(
bin.get('name'), bin.get('type'), totalhits))
def parse_crosses(self, cgInstance, parent_query):
for cross in self.findall_ucis_children(cgInstance, "cross"):
crossNameAttrib = 'name'
crossName = cross.get(crossNameAttrib)
xpath_query = parent_query + "/" + self.format_et_query("cross", crossNameAttrib, crossName)
mergeCrossElement = self.find_merge_element_by_query(xpath_query)
print ("\t\t[cross] {0}".format(crossName))
if mergeCrossElement is None:
raise ValueError("cross not present in the mergeDBtree!")
continue # skip processing the sub-elements
# skip processing crosses with no hits in the parse XML
if self.find_ucis_element(cross, 'crossBin') is None:
print("\t\t\tParsed cross is empty; skipping...")
continue
# the number of coverpoints crossed by this element
numCvps = len(self.findall_ucis_children(mergeCrossElement, 'crossExpr'))
""" Parse cross bins """
mergeMap = {}
# parse the mergeDBtree and store all existing cross bins and their associated hit count
# then, parse the current XML and update the map with the new information
# then, remove all the the crossBin elements from the cross
# then, create new crossBins elements matching the information stored in the map!
for crossBin in self.findall_ucis_children(mergeCrossElement, 'crossBin'):
binIndexes = []
for index in self.findall_ucis_children(crossBin, 'index'):
binIndexes.append(int(index.text))
contentsElement = self.find_ucis_element(crossBin, 'contents')
hitCount = int(contentsElement.get('coverageCount'))
if len(binIndexes) != numCvps:
raise ValueError("Found crossBin of bigger size than the number of coverpoints!")
tupleIndexes = tuple(binIndexes)
mergeMap[tupleIndexes] = hitCount
# remove crossBin
mergeCrossElement.remove(crossBin)
for crossBin in self.findall_ucis_children(cross, 'crossBin'):
binIndexes = []
for index in self.findall_ucis_children(crossBin ,'index'):
binIndexes.append(int(index.text))
contentsElement = self.find_ucis_element(crossBin, 'contents')
hitCount = int(contentsElement.get('coverageCount'))
tupleIndexes = tuple(binIndexes)
if tupleIndexes in mergeMap:
mergeMap[tupleIndexes] = mergeMap[tupleIndexes] + hitCount
else:
mergeMap[tupleIndexes] = hitCount
crossBinString = """<{0}:crossBin name="" key="0" type="default" xmlns:{0}="{1}">\n"""
for _ in range(numCvps):
crossBinString += "<{0}:index>0</{0}:index>\n"
crossBinString += """<{0}:contents coverageCount="0"></{0}:contents>\n"""
crossBinString += "</{0}:crossBin>\n"
crossBinString = crossBinString.format(self.ucis_ns, self.ns_map[self.ucis_ns])
# update crossBins element and append it to the mergeCrossElement
for indexesTuple in mergeMap:
# create new crossBin element to be added to the cross
crossBinElement = ET.fromstring(crossBinString)
print("\t\t\t" + str(indexesTuple) + " -> " + str(mergeMap[indexesTuple]))
# get a generator for the index elements contained by this crossBin;
# we will need to manually iterate through this generator when updating the indexes
indexElementGen = iter(self.findall_ucis_children(crossBinElement, 'index'))
for i in range(len(indexesTuple)):
# update index element value
indexElementValue = indexesTuple[i]
indexElement = next(indexElementGen)
indexElement.text = str(indexElementValue)
# update the contents element with the merged data
contentsElement = self.find_ucis_element(crossBinElement, 'contents')
contentsElement.set('coverageCount', str(mergeMap[indexesTuple]))
# add the contents element to the cross in the mergeDBtree
mergeCrossElement.append(crossBinElement)
# move the user attribute element to the end of the cross
userAttrElement = self.find_ucis_element(mergeCrossElement, 'userAttr')
mergeCrossElement.remove(userAttrElement)
mergeCrossElement.append(userAttrElement)
def find_xmls(directory):
for rootdir, _, files in os.walk(directory):
for fname in files:
if fnmatch(fname, '*.xml'):
filename = os.path.join(rootdir, fname)
yield filename
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='FC4SC merge tool')
parser.add_argument('--merge_to_db', type=str, help='Name of resulting merged db')
parser.add_argument('other_args', nargs=argparse.REMAINDER)
# the search top directory is by default the execution directory
args = parser.parse_args()
if args.merge_to_db:
merger = UCIS_DB_Merger()
for filename in args.other_args:
filename = filename.rstrip("\n\r")
if not merger.process_xml(filename):
print("Non-UCIS DB XML file skipped [{0}]".format(filename))
continue
merger.write_merged_db(args.merge_to_db)
exit(0) ;
else:
search_top_dir = os.getcwd()
merged_db_name = "coverage_merged_db.xml"
if len(args.other_args) > 1: # if specified file path
search_top_dir = args.other_args[0]
if len(sys.argv) > 2: # if specified merged database name
merged_db_name = args.other_args[1]
merged_db_path = os.path.join(search_top_dir, merged_db_name)
# the master ucis DB which will be "merged" into when parsing additional DBs
merger = UCIS_DB_Merger()
# list of the file names that are successfully parsed and merged
filelist = []
for filename in find_xmls(search_top_dir):
# found file matches the output file; skip it
if filename == merged_db_path:
print("Warning! Input File: \n{0}\nmatches output target file => will not be parsed!".format(filename))
continue
if not merger.process_xml(filename):
print("Non-UCIS DB XML file skipped [{0}]".format(filename))
continue
filelist.append(filename)
if not filelist:
print("Error! No XML files found under " + search_top_dir)
exit(1)
merger.write_merged_db(merged_db_path)
print("Done!");
print("Searching was done recursively under directory: \n{0}\n".format(search_top_dir))
print("List of merged UCIS DB files:")
for f in filelist:
print(f)
print("\nResulted merged UCIS DB can be found at:\n" + merged_db_path) | 1.492188 | 1 |
setup.py | alella/browserhist | 1 | 12788336 | <filename>setup.py
from setuptools import setup, find_packages
setup(
name="browserhist",
version="0.4.1",
author="<NAME>",
packages=find_packages(),
install_requires=[
"elasticsearch",
"python-dateutil",
"click",
"coloredlogs"
],
entry_points={
'console_scripts': ['browserhist = browserhist.cli:cli']
}
)
| 1.429688 | 1 |
python--exercicios/ex053.py | Eliezer2000/python | 0 | 12788337 | <reponame>Eliezer2000/python<filename>python--exercicios/ex053.py
frase = str(input('Digie uma frase : ')).strip().upper()
palavras = frase.split()
junto = ''.join(palavras)
print('Você digitou a frase {}'.format(junto))
inverso = ''
for letra in range(len(junto) - 1, - 1, - 1):
inverso += junto[letra]
if inverso == junto:
print('Temos um Palindromo')
else:
print('A frase digitada não é um palindro !')
frase = str(input('Digite uma frase : ')).strip().upper()
palavras = frase.split()
junto = ''.join(palavras)
inverso = ''
for letra in range(len(junto) -1, -1, -1):
inverso += junto[letra]
if inverso == junto:
print('Temos um palindromo')
else:
print('A frase digitada não é um palindro')
frase = str(input('Digite uma frase : ')).strip().upper()
palavras = frase.split()
junto = ''.join(palavras)
inverso = ''
for letra in range(len(junto) -1, -1, -1):
inverso += junto[letra]
if inverso == junto:
print('Temos um palindromo')
else:
print('A frase diditada não é um palindromo ')
| 4.1875 | 4 |
utils/job_db.py | yutiansut/cilantro | 0 | 12788338 | <gh_stars>0
import os
import datetime
from pymongo import MongoClient, DESCENDING, ReturnDocument
class JobDb:
job_db_url = os.environ['JOB_DB_URL']
job_db_port = int(os.environ['JOB_DB_PORT'])
job_db_name = os.environ['JOB_DB_NAME']
first_object_id = int(os.environ['FIRST_OBJECT_ID'])
def __init__(self):
self.db = self._get_db_client()
def close(self):
self.db.client.close()
def start_db(self):
self._create_index()
self._set_first_object_id()
def generate_unique_object_identifier(self):
"""
Create the unique object identifier.
This is NOT the whole object id, just the last part to ensure every object
is unique and the last characters are digits.
:return:
"""
try:
return self.db.objects.find_one_and_update(
{'next_object_id': {'$exists': True}},
{'$inc': {'next_object_id': 1}},
return_document=ReturnDocument.AFTER
)['next_object_id']
except KeyError:
raise RuntimeError("The database doesn't seem to be initialized"
"properly")
def get_jobs_for_user(self, user):
"""
Find all jobs of the passed user in the job database.
:param str user: username to find jobs belonging to
:return: list of job objects
"""
job_list = []
for job in self.db.jobs.find({"user": user, "parent_job_id": None}, {'_id': False}):
job_list.append(
self._expand_child_information(job)
)
return job_list
def get_job_by_id(self, job_id):
"""
Find job with the given job_id.
:param str job_id: job-id to be queried
:return: job object
"""
job = self._expand_child_information(self.db.jobs.find_one(
{"job_id": job_id}, {'_id': False}))
return job
def add_job(self, job_id, user, job_type, parent_job_id, child_job_ids, parameters):
"""
Add a job to the job database.
:param str job_id: Cilantro-ID of the job
:param str user: username which started the job
:param str job_type: type of job, i.e. 'ingest_journals'
:param str parent_job_id: Cilantro-IDs of the parent job
:param list child_job_ids: Cilantro-IDs of the child jobs
:param dict parameters: Issue parameters
:return: None
"""
timestamp = datetime.datetime.now()
job = {'job_id': job_id,
'user': user,
'job_type': job_type,
'name': f"{job_type}-{job_id}",
'parent_job_id': parent_job_id,
'child_job_ids': child_job_ids,
'state': 'new',
'created': timestamp,
'started': None,
'updated': timestamp,
'parameters': parameters,
'errors': []
}
self.db.jobs.insert_one(job)
def update_job_state(self, job_id, state, error=None):
"""
Update a job to the job database with new state and updated timestamp.
If there is an error object passed then that is added to the list
of errors of that task. The errors are a list to make it
possible to keep executing the task chain even though some tasks
throw errors. The errors are put into the job entry in the database
and can be collected later.
:param str job_id: Cilantro-ID of the job
:param str state: new state of the job
:param dict error: (optional) dict containig task name and error message
:return: None
"""
timestamp = datetime.datetime.now()
updated_values = {'state': state, 'updated': timestamp}
if state == 'started':
updated_values['started'] = timestamp
self.db.jobs.update_many({"job_id": job_id},
{'$set': updated_values})
if error:
self.db.jobs.update_many({"job_id": job_id},
{'$push': {'errors': error}})
def set_job_children(self, job_id, child_job_ids):
timestamp = datetime.datetime.now()
updated_values = {'child_job_ids': child_job_ids, 'updated': timestamp}
self.db.jobs.update_many({"job_id": job_id},
{'$set': updated_values})
def add_job_error(self, job_id, error_message):
timestamp = datetime.datetime.now()
self.db.jobs.update_many({"job_id": job_id},
{'$push': {'errors': error_message}, '$set': {'updated': timestamp}})
def _create_index(self):
"""
Create index for faster lookup in database.
The 2 fields that are used for lookup/update are indexed.
"""
self.db.jobs.create_index([("job_id", DESCENDING), ("user", DESCENDING)])
def _set_first_object_id(self):
if not self.db.objects.find_one({'next_object_id': {'$exists': True}}):
self.db.objects.insert_one({'next_object_id': self.first_object_id})
def _get_db_client(self):
return MongoClient(self.job_db_url, self.job_db_port)[self.job_db_name]
def _expand_child_information(self, job):
"""
Expand child job information for parent job
:param job: Parent job to be expanded
:return: job object
"""
if 'child_job_ids' in job:
children_with_status = []
for child_id in job['child_job_ids']:
child = self.db.jobs.find_one({'job_id': child_id}, {'_id': False})
children_with_status += [{'job_id': child_id,
'state': child['state'],
'type': child['job_type']}]
del job['child_job_ids']
job['children'] = children_with_status
if 'parent_job_id' in job and job['parent_job_id'] is None:
del job['parent_job_id']
return job
| 2.875 | 3 |
reference/Python/lang/*and**_in_function.py | steadylearner/code | 4 | 12788339 | def print_some(a, b):
print(a, b)
list_of_numbers = [1, 2]
print_some(*list_of_numbers)
# a = {'a': "one", 'b': "two", 'c': "three" }]
# print_some(*a) key
# print_some(**a) value
| 3.578125 | 4 |
src/genie/libs/parser/iosxe/tests/ShowPlatformHardwareQfpActiveFeatureAppqoe/cli/equal/175_golden_output_expected.py | balmasea/genieparser | 0 | 12788340 | expected_output = {
"feature":{
"appqoe":{
"global":{
"ip_non_tcp_pkts":9485,
"not_enabled":0,
"cft_handle_pkt":0,
"sdvt_divert_req_fail":41,
"sn_data_pkts_processed":0,
"sdvt_global_stats":{
"within_sdvt_syn_policer_limit":1562354
}
},
"sn_index":{
"0 (Green)":{
"ip":"192.168.2.2",
"oce_id":3367962768,
"del":0,
"key":"0x0301",
"id":1,
"ver":1,
"status":1,
"type":3,
"sng":0,
"appnav_stats":{
"to_sn":{
"packets":260453790,
"bytes":164529550955
},
"from_sn":{
"packets":324977684,
"bytes":290245930396
}
},
"sdvt_count_stats":{
"active_connections":3539,
"decaps":323418847,
"encaps":260453790,
"expired_connections":1558815,
"decap_messages":{
"processed_control_messages":1558837,
"delete_requests_recieved":1558837,
"deleted_protocol_decision":1558837
}
},
"sdvt_packet_stats":{
"divert":{
"packets":260453790,
"bytes":152027769035
},
"reinject":{
"packets":323418342,
"bytes":264476829284
}
},
"sdvt_drop_cause_stats":{
},
"sdvt_errors_stats":{
}
},
"Default":{
"sdvt_count_stats":{
"packets_unmarked_in_ingress":41
},
"sdvt_packet_stats":{
},
"sdvt_drop_cause_stats":{
},
"sdvt_errors_stats":{
}
}
}
}
}
}
| 1.25 | 1 |
pyExSi/__init__.py | ladisk/pyExSi | 3 | 12788341 | __version__ = '0.42'
from .signals import *
| 1.054688 | 1 |
playwright/event_context_manager.py | tumregels/playwright-python | 2 | 12788342 | <reponame>tumregels/playwright-python
# Copyright (c) Microsoft Corporation.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import asyncio
from typing import Any, Coroutine, Generic, Optional, TypeVar, cast
T = TypeVar("T")
class EventInfoImpl(Generic[T]):
def __init__(self, coroutine: Coroutine) -> None:
self._value: Optional[T] = None
self._task = asyncio.get_event_loop().create_task(coroutine)
self._done = False
@property
async def value(self) -> T:
if not self._done:
self._value = await self._task
self._done = True
return cast(T, self._value)
class EventContextManagerImpl(Generic[T]):
def __init__(self, coroutine: Coroutine) -> None:
self._event: EventInfoImpl = EventInfoImpl(coroutine)
async def __aenter__(self) -> EventInfoImpl[T]:
return self._event
async def __aexit__(self, *args: Any) -> None:
await self._event.value
| 2.328125 | 2 |
keras_based/exchange/multi_ex_ui.py | TianyuDu/AnnEconForecast | 7 | 12788343 | """
Multivariate Version of exchange prediciton.
"""
import os
os.system("clear")
import sys
sys.path.append("./core/containers/")
sys.path.append("./core/models/")
sys.path.append("./core/tools/")
import datetime
import keras
import pandas as pd
import numpy as np
import matplotlib
# TODO: add auto-detect
# for mac OS: os.name == "posix" and sys.platform == "darwin"
# Use this identifier to automatically decide the following.
on_server = bool(int(input("Are you on a server wihtout graphic output? [0/1] >>> ")))
if on_server:
matplotlib.use(
"agg",
warn=False,
force=True
)
from matplotlib import pyplot as plt
import sklearn
from bokeh.plotting import figure
from bokeh.layouts import row, column
from bokeh.models import HoverTool
from bokeh.io import show, output_file
from typing import Union, List
# import config
# import methods
# from methods import *
# from models import *
from multi_config import *
from multivariate_container import MultivariateContainer
from multivariate_lstm import MultivariateLSTM
from bokeh_visualize import advanced_visualize as bvis
def train_new_model():
"""
Train a new model.
"""
print(f"Control: Building new container from {file_dir}...")
print(f"\tTarget is {target}")
# Build up containers.
container = MultivariateContainer(
file_dir,
target,
load_multi_ex,
CON_config)
print(chr(9608))
print("Control: Building up models...")
model = MultivariateLSTM(container, NN_config)
print(chr(9608))
model.fit_model(epochs=int(input("Training epochs >>> ")))
save_destination = input("Folder name to save model? [Enter] Using default >>> ")
print("Control: Saving model training result...")
if save_destination == "":
model.save_model()
else:
model.save_model(file_dir=save_destination)
print(chr(9608))
def visualize_training_result():
print(f"Contro;: Building up container from {file_dir}...")
container = MultivariateContainer(
file_dir,
target,
load_multi_ex,
CON_config)
print(chr(9608))
print("Control: Building empty model...")
model = MultivariateLSTM(container, NN_config, create_empty=True)
print(chr(9608))
load_target = input("Model folder name >>> ")
load_target = f"./saved_models/{load_target}/"
print(f"Control: Loading model from {load_target}...")
model.load_model(
folder_dir=load_target
)
print(chr(9608))
# Forecast testing set.
yhat = model.predict(model.container.test_X)
yhat = model.container.invert_difference(
yhat,
range(
model.container.num_obs-len(yhat),
model.container.num_obs
),
fillnone=True
)
# Forecast trainign set.
train_yhat = model.predict(model.container.train_X)
train_yhat = model.container.invert_difference(
train_yhat, range(len(train_yhat)), fillnone=True
)
# Visualize
plt.close()
plt.plot(yhat, linewidth=0.6, alpha=0.6, label="Test set yhat")
plt.plot(train_yhat, linewidth=0.6, alpha=0.6, label="Train set yhat")
plt.plot(model.container.ground_truth_y, linewidth=1.2, alpha=0.3, label="actual")
plt.legend()
action = input("Plot result? \n\t[P] plot result. \n\t[S] save result. \n\t>>>")
assert action.lower() in ["p", "s"], "Invalid command."
if action.lower() == "p":
plt.show()
elif action.lower() == "s":
fig_name = str(datetime.datetime.now())
plt.savefig(f"./figure/{fig_name}.svg")
print(f"Control: figure saved to ./figure/{fig_name}.svg")
if __name__ == "__main__":
print("""
=====================================================================
Hey, you are using the Multivariate Exchange Rate Forecasting Model
This is a neural network developed to forecast economic indicators
The model is based on Keras
@Spikey
Version. 0.0.1, Sep. 11 2018
Important files
Configuration file: ./multi_config.py
Model definition file: ./models.py
""")
task = input("""
What to do?
[N] Train new model.
[R] Restore saved model and continue training.
[V] Visualize training result using matplotlib.
[B] Visualize training result using bokeh.
[Q] Quit.
>>> """)
assert task.lower() in ["n", "r", "v", "q", "b"], "Invalid task."
if task.lower() == "n":
train_new_model()
elif task.lower() == "r":
raise NotImplementedError
elif task.lower() == "v":
visualize_training_result()
elif task.lower() == "b":
bvis(
file_dir=file_dir,
target=target,
load_multi_ex=load_multi_ex,
CON_config=CON_config,
NN_config=NN_config
)
elif task.lower() == "q":
quit()
| 2.609375 | 3 |
examples/BioASQ/extra_modules/bioasq/KMeansOrderer.py | paritoshgpt1/BOOM | 29 | 12788344 | <reponame>paritoshgpt1/BOOM
from nltk.corpus import stopwords
from nltk.tokenize import word_tokenize
from .SentenceOrderer import SentenceOrderer
import re, sys
import numpy as np
class KMeansOrderer(SentenceOrderer):
def __init__(self):
pass
def setup(self, sentences, snippets=None, k=2, max_iter=10, max_tokens=300):
self.k = k
self.max_iter = max_iter
self.max_tokens = max_tokens
self.uniq_sentences = list(set(sentences))
self.snippets = snippets
self.sentences = []
self.word_length = 0
self.stopwords = set(stopwords.words('english'))
for sentence in self.uniq_sentences:
if self.word_length + len(word_tokenize(sentence)) <= self.max_tokens and len(sentence) > 3:
self.sentences.append(sentence)
self.word_length += len(word_tokenize(sentence))
self.vocab_size, self.indices = self.index()
def index(self):
word_count = 0
word2idx = {}
for sentence in self.sentences:
for word in sentence.split(' '):
token = re.sub(r'\W+', '', word).lower()
if token not in self.stopwords and token != '':
if token not in word2idx:
word2idx[token] = word_count
word_count += 1
return word_count, word2idx
def vectorize(self):
vecs = []
for sentence in self.sentences:
arr = np.zeros(self.vocab_size, dtype=np.int8)
for word in sentence.split(' '):
token = re.sub(r'\W+', '', word).lower()
if token not in self.stopwords and token != '':
arr[self.indices[token]] += 1
vecs.append(arr)
return vecs
def kmeans(self, vecs):
pairs = list(zip(vecs, range(0, len(vecs))))
centroid_all = np.mean(vecs, axis=0)
centroids = vecs[-self.k:]
for i in range(0, self.max_iter):
clusters = []
for j in range(0, self.k):
clusters.append([])
for pair in pairs:
min_dist = 2.0
min_centroid = -1
for j in range(0, len(centroids)):
dist = np.cos(pair[0], centroids[j])
if dist < min_dist:
min_dist = dist
min_centroid = j
clusters[min_centroid].append(pair)
for j in range(0, self.k):
arr = list(map(lambda x: x[0], clusters[j]))
centroids[j] = np.mean(arr, axis=0)
ordered_sentences = []
ordered_clusters = sorted(clusters, key=lambda x: cosine(centroid_all, np.mean(list(map(lambda y: y[0], x)), axis=0)))
for i in range(0, len(ordered_clusters)):
for j in sorted(ordered_clusters[i], key=lambda x: cosine(x[0], centroids[i])):
ordered_sentences.append(j)
return ordered_sentences
def toText(self, clusters):
result = []
for c in clusters:
result.append(self.sentences[c[1]])
return result
def orderSentences(self, sentences, snippets=None, k=2, info_dict={'k':2,'max_iter':10,'max_tokens':300}):
self.setup(sentences, snippets, k, info_dict['max_iter'],
info_dict['max_tokens'])
if len(self.sentences) < 2:
return self.tileSentences(self.sentences)
vecs = self.vectorize()
clusters = self.kmeans(vecs)
return self.toText(clusters)
| 2.59375 | 3 |
scripts/lexicons.py | opener-project/opinion_miner_deluxe | 1 | 12788345 | <reponame>opener-project/opinion_miner_deluxe
#!/usr/bin/env python
import sys
import os
import csv
from subprocess import Popen,PIPE
def create_lexicons(path_to_script, training_file,exp_filename, tar_filename):
cmd = ['python']
cmd.append(path_to_script)
cmd.append('-exp_csv')
cmd.append(exp_filename)
cmd.append('-tar_csv')
cmd.append(tar_filename)
cmd.append('-l')
cmd.append(training_file)
folder = os.path.dirname(exp_filename)
log_out = open(os.path.join(folder,'log.out'),'wb')
log_err = open(os.path.join(folder,'log.err'),'wb')
lexicon_generator = Popen(' '.join(cmd),stdout=log_out, stderr=log_err, shell=True)
ret_code = lexicon_generator.wait()
log_out.close()
log_err.close()
print>>sys.stderr,' Lexicons created, on',folder,' ret code:',ret_code
def load_lexicon(lexicon_filename):
### LEXICON FROM THE DOMAIN
fd = open(lexicon_filename,'rb')
##dialect = csv.Sniffer().sniff(fd.read(1024))
##fd.seek(0)
#lex_reader = csv.reader(fd,dialect)
lex_reader = csv.reader(fd,delimiter=';')
my_lexicon = {}
for n,row in enumerate(lex_reader):
if n != 0:
text_type,ratio,rel_freq,over_freq,lemmas,postags,freqwords = row
this_pos = text_type.rfind('#')
text = text_type[:this_pos]
my_type = text_type[this_pos+1:]
my_lexicon[text.decode('utf-8')] = my_type.decode('utf-8')
return my_lexicon
| 2.109375 | 2 |
tests/examples/test_pandas_operations.py | BBVA/python-etl | 20 | 12788346 | <reponame>BBVA/python-etl
# Copyright 2017 BBVA
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Example of integration with pandas
import pandas as pd
from datarefinery.TupleOperations import fusion, wrap
def pandas_dataframe_operator(df, operation):
if df is None or operation is None:
return None, None
raw_df = pd.DataFrame(df.apply(operation, axis=1), columns=["res"])
raw_df['ok'] = raw_df["res"].apply(lambda x: x[0])
raw_df['ko'] = raw_df["res"].apply(lambda x: x[1])
raw_df = pd.concat([df, raw_df[['ok', 'ko']]], axis=1)
ok_filter = raw_df["ok"].apply(lambda x: x is not None)
ok_df = raw_df[ok_filter]
ok_df = pd.DataFrame(ok_df["ok"].apply(pd.Series))
ko_df = raw_df[~ok_filter]
ko_df = pd.concat([ko_df, pd.DataFrame(ko_df["ko"].apply(pd.Series))], axis=1)
del ko_df['ok']
del ko_df['ko']
return ok_df, ko_df
def test_empty():
(res, err) = pandas_dataframe_operator(None, None)
assert res is None
assert err is None
def test_some():
greet = ["hello", "hi", "greetings"]
people = ["Tom", "Alex", "Unihorn"]
df = pd.DataFrame.from_dict({'greet': greet, 'people': people})
operation = fusion(["greet", "people"], "salute", wrap(lambda x: x[0] + " " + x[1]))
(res, err) = pandas_dataframe_operator(df, operation)
assert res is not None
assert isinstance(res, pd.DataFrame)
assert err is not None
assert isinstance(err, pd.DataFrame)
assert res['salute'].tolist() == ['hello Tom', 'hi Alex', 'greetings Unihorn']
def test_some_error():
greet = ["hello", "hi", "greetings"]
people = ["Tom", "Alex", "Unihorn"]
df = pd.DataFrame.from_dict({'greet': greet, 'people': people})
operation = fusion(["gredo", "people"], "salute", wrap(lambda x: x[0] + " " + x[1]))
(res, err) = pandas_dataframe_operator(df, operation)
assert res is not None
assert isinstance(res, pd.DataFrame)
assert err is not None
assert isinstance(err, pd.DataFrame)
assert err['salute'].tolist() == ['gredo not found', 'gredo not found', 'gredo not found']
| 2.515625 | 3 |
fmojinja/chpi/__main__.py | Taro-Imahiro/fmojinja | 0 | 12788347 | <reponame>Taro-Imahiro/fmojinja<filename>fmojinja/chpi/__main__.py<gh_stars>0
from ..mixin import SubCommands
from .vpi import Vpi
from .input import Input
SubCommands.main_proc({
"vpi": Vpi,
"input": Input,
})
| 1.34375 | 1 |
spider/douban.py | yhfyhf/wang_fm | 30 | 12788348 | <reponame>yhfyhf/wang_fm
#!/usr/bin/env python
# encoding: utf-8
'''
Ref:
https://github.com/zonyitoo/doubanfm-qt/wiki/%E8%B1%86%E7%93%A3FM-API
'''
import json
import random
import traceback
import requests
import requests.exceptions
from model.channel import get_channel, add_channel, update_channel
from model.music import get_music, add_music
from config import DOUBAN_USER_NAME, DOUBAN_USER_PASSWORD
DOUBAN_SPIDER_NAME = 'radio_desktop_win'
DOUBAN_SPIDER_VERSION = '100'
DOUBAN_CHANNEL_UUID_FORMAT = 'douban-%d' # % (channel_id)
DOUBAN_MUSIC_UUID_FORMAT = 'douban-%d-%d' # % (aid, sid)
_user_id = None
_token = None
_expire = None
def login():
payload = {'app_name': DOUBAN_SPIDER_NAME,
'version': DOUBAN_SPIDER_VERSION,
'email': DOUBAN_USER_NAME,
'password': <PASSWORD>}
try:
r = requests.post("http://www.douban.com/j/app/login", data=payload)
except requests.exceptions.ConnectionError:
return False
except requests.exceptions.Timeout:
return False
r = json.loads(r.text)
if r['r'] != 0:
print 'spider.douban.login: failed. r=', r
return False
global _user_id, _token, _expire
_user_id = r['user_id']
_token = r['token']
_expire = r['expire']
return True
def update_channel_list():
r = requests.get("http://www.douban.com/j/app/radio/channels")
r = json.loads(r.text)
channel_list = []
assert 'channels' in r
for channel in r['channels']:
cid = int(channel['channel_id'])
uuid = DOUBAN_CHANNEL_UUID_FORMAT % (cid)
if cid != 0 and len(get_channel(uuid=uuid)) == 0:
# not private list and not in db
new_channel = add_channel(channel['name'], uuid)
channel_list.append(new_channel)
return channel_list
def _update_channel_once(channel, max_num=10):
'''update music in channel. max is the max number it will update
return updated music
please login before this function'''
global _user_id, _token, _expire
# TODO
# maybe need a better method to assert and get cid
assert channel.uuid.startswith(DOUBAN_CHANNEL_UUID_FORMAT.split('-')[0])
cid = int(channel.uuid.split('-')[1])
if channel.music_list == []:
payload = {'app_name': DOUBAN_SPIDER_NAME,
'version': DOUBAN_SPIDER_VERSION,
'user_id': _user_id,
'expire': _expire,
'token': _token,
'channel': cid,
'type': 'n'}
else:
uuid = get_music(key=random.choice(channel.music_list))[0].uuid
sid = uuid.split('-')[2]
payload = {'app_name': DOUBAN_SPIDER_NAME,
'version': DOUBAN_SPIDER_VERSION,
'user_id': _user_id,
'expire': _expire,
'token': _token,
'channel': cid,
'type': 'p',
'sid': sid}
# # mark as listened
# mark_payload = {'app_name': DOUBAN_SPIDER_NAME,
# 'version': DOUBAN_SPIDER_VERSION,
# 'user_id': _user_id,
# 'expire': _expire,
# 'token': _token,
# 'channel': cid,
# 'type': 'e',
# 'sid': sid}
# try:
# requests.get("http://www.douban.com/j/app/radio/people", params=mark_payload, timeout=5)
# except:
# pass
# # don't play again
# mark_payload = {'app_name': DOUBAN_SPIDER_NAME,
# 'version': DOUBAN_SPIDER_VERSION,
# 'user_id': _user_id,
# 'expire': _expire,
# 'token': _token,
# 'channel': cid,
# 'type': 'b',
# 'sid': sid}
# try:
# requests.get("http://www.douban.com/j/app/radio/people", params=mark_payload, timeout=5)
# except:
# pass
try:
print 'getting list'
r = requests.get("http://www.douban.com/j/app/radio/people", params=payload, timeout=5)
except requests.exceptions.ConnectionError:
traceback.print_exc()
return []
except requests.exceptions.Timeout:
traceback.print_exc()
return []
r = json.loads(r.text)
assert r['r'] == 0
update_music = []
#channel_music_list = channel.music_list
for song in r['song']:
try:
uuid = DOUBAN_MUSIC_UUID_FORMAT % (int(song['aid']), int(song['sid']))
except:
# ads
continue
print uuid
music = None
if len(get_music(uuid=uuid)) == 0:
try:
print 'getting song'
cover_fd = requests.get(song['picture'], stream=True, timeout=5).raw
audio_fd = requests.get(song['url'], stream=True, timeout=5).raw
except requests.exceptions.ConnectionError:
traceback.print_exc()
continue
except requests.exceptions.Timeout:
traceback.print_exc()
continue
music = add_music(song['title'], song['artist'], song['albumtitle'],
song['company'], song['public_time'], song['kbps'],
cover_fd, audio_fd, uuid)
else:
music = get_music(uuid=uuid)[0]
if music and music.key not in channel.music_list:
channel_music_list = channel.music_list
channel_music_list.append(music.key)
update_channel(channel, music_list=channel_music_list)
update_music.append(music)
if len(update_music) >= max_num:
break
return update_music
def update_music_by_channel(channel, num):
'''update the music in channel, music count is num'''
updated_music = []
retry = 0
while num > 0:
music_list = _update_channel_once(channel, num)
updated_music.extend(music_list)
num -= len(music_list)
if music_list == []:
retry += 1
if retry > 5:
break
return updated_music
| 2.3125 | 2 |
books/grokking_deeplearning/ch3/numpy_dables.py | gerritjvv/deeplearning | 0 | 12788349 | import numpy as np
a = np.array([
[1, 2, 3],
[4, 5, 6]
])
print("print(a)")
print(a)
print()
print("print(a.T)")
print(a.T)
print()
print("print(a.dot(2))")
print(a.dot(2))
print()
print("print(a.dot(np.array([2, 2, 2])))")
print(a.dot(np.array([2, 2, 2])))
print()
| 3.390625 | 3 |
main.py | glombard/python-plugin-experiment | 0 | 12788350 | """
Sample Python 3.5 application that has plugin support.
It dynamically loads plugins from the 'plugins' directory.
Two types of plugins are supported: commands and hooks.
A command is executed if it matches a cmdline argument.
A hook is executed before and after each command...
Example usage:
main.py print upper print lower print
"""
import sys
import logging
from app.args import get_args
from app.processor import process_commands
def main(argv):
print('My Plugin Demo')
logging.basicConfig(level=logging.DEBUG)
logging.debug("Starting")
args = get_args(argv)
input_obj = "HeLLo WOrLD!" # TODO: this could perhaps be stdin...
process_commands(input_obj, args.commands)
if __name__ == '__main__':
sys.exit(main(sys.argv))
| 3.390625 | 3 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.