max_stars_repo_path
stringlengths 3
269
| max_stars_repo_name
stringlengths 4
119
| max_stars_count
int64 0
191k
| id
stringlengths 1
7
| content
stringlengths 6
1.05M
| score
float64 0.23
5.13
| int_score
int64 0
5
|
---|---|---|---|---|---|---|
gmdc_import.py | djalex88/blender-gmdc | 1 | 12795751 | <reponame>djalex88/blender-gmdc<gh_stars>1-10
#!BPY
"""
Name: 'GMDC (.gmdc, .5gd)'
Blender: 249
Group: 'Import'
Tooltip: 'Import TS2 GMDC file' """
#-------------------------------------------------------------------------------
# Copyright (C) 2016 DjAlex88 (https://github.com/djalex88/)
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
#-------------------------------------------------------------------------------
from gmdc_tools import *
from itertools import chain
import bpy, Blender
from Blender import Draw
from Blender.Mathutils import Vector as BlenderVector
########################################
## Importer
########################################
def create_objects(geometry, transform_tree, settings):
#---------------------------------------
# subroutines
def create_mesh(name, V, I, T1, T2):
# create mesh
#
mesh = Blender.Mesh.New(name)
mesh.verts.extend(V)
mesh.faces.extend(I, ignoreDups=True, smooth=True)
# since Blender recalculates normals, setting original normals is useless
# instead, calculate normals
mesh.calcNormals()
if T1:
mesh.addUVLayer('UVMap')
# assign texture coords
#
for f, t in zip(mesh.faces, T1):
f.uv = tuple(BlenderVector(u, 1-v) for u, v in t) # Direct3D -> OpenGL
if T2:
mesh.addUVLayer('UVMap2')
mesh.activeUVLayer = 'UVMap2'
for f, t in zip(mesh.faces, T2):
f.uv = tuple(BlenderVector(u, 1-v) for u, v in t)
mesh.activeUVLayer = 'UVMap'
return mesh
def add_bones_to_armature(transform_nodes, parent_bone=None):
for node in transform_nodes:
if id(node) in node_ids:
_bone = Blender.Armature.Editbone()
_bone.head = BlenderVector(node.abs_transform.loc.to_tuple())
# compute tail pos as arithmetic mean
v = [_n.abs_transform.loc for _n in node.child_nodes if (id(_n) in node_ids and _n.bone_index in bone_set)]
v = sum(v, Vector())*(1./len(v)) if (v and node.bone_index in bone_set) else node.abs_transform.loc
# the bone's length must not be 0, otherwise Blender ignores it
if (node.abs_transform.loc-v).len() < 0.025:
v = node.abs_transform.loc + node.abs_transform.rot.get_matrix().col(2)*0.05
_bone.tail = BlenderVector(v.to_tuple())
if parent_bone: _bone.parent = parent_bone
name = make_unique_bone_name(node.name, node.bone_index, armature.bones.keys())
# add bone and its children
armature.bones[name] = _bone
add_bones_to_armature(node.child_nodes, _bone)
##
## armature, node_ids and bone_set are defined at the bottom
def make_unique_bone_name(name, idx, collection):
idx = '#%i'%idx if idx!=None else ''
s = name[:30-len(idx)] + idx # max - 31 characters (?)
i = 1
while s in collection:
s = '.%i'%i + idx
s = name[:30-len(s)] + s
i+= 1
return s
#---------------------------------------
# get active scene
scene = bpy.data.scenes.active
#
# add mesh objects (main geometry)
#
mesh_objects = []
for group in geometry.index_groups:
log( 'Index group "%s"' % group.name )
data_group = geometry.data_groups[group.data_group_index]
# define index mapping
S = {} # { old_index -> new_index }
for i, x in enumerate(sorted(set(chain(*group.indices)))): S[x] = i
# map indices
I = [(S[i], S[j], S[k]) for i, j, k in group.indices]
# filtering function
def select_data(data):
return [x for i, x in enumerate(data) if i in S]
V = select_data(data_group.vertices)
# texture coords
if data_group.tex_coords:
T1 = select_data(data_group.tex_coords)
T1 = [(T1[i], T1[j], T1[k]) for i, j, k in I]
if data_group.tex_coords2:
T2 = select_data(data_group.tex_coords2)
T2 = [(T2[i], T2[j], T2[k]) for i, j, k in I]
else:
T2 = None
else:
T1 = group.tex_coords and group.tex_coords[:] # copy or None
T2 = group.tex_coords2 and group.tex_coords2[:]
# also, Blender does not like triangles with zero-index vertex on 3rd position
# as well as degenerate triangles (i.e., less than 3 different indices):
# https://www.blender.org/api/249PythonDoc/Mesh.MFaceSeq-class.html#extend
#
w = []
for i, t in enumerate(I):
if 0 == t[2]:
I[i] = (t[2], t[0], t[1])
log( '--Triangle # %i reordered:' % i, t, '->', I[i] )
if T1:
uv1, uv2, uv3 = T1[i]
T1[i] = (uv3, uv1, uv2)
if T2:
uv1, uv2, uv3 = T2[i]
T2[i] = (uv3, uv1, uv2)
if len(set(t)) < 3:
w.append(i)
log( '--Triangle # %i' % i, t, 'removed' )
for i in reversed(w):
del I[i]
if T1:
del T1[i]
if T2:
del T2[i]
w = None
log( '--Creating mesh object (vertices: %i, triangles: %i)...' % (len(V), len(I)) )
# create mesh and add it to the scene
mesh = create_mesh(group.name, V, I, T1, T2)
obj = scene.objects.new(mesh)
obj.name = group.name # max - 21 characters
# save original name and flags
assert type(group.name) == str
obj.addProperty('name', group.name) # Blender does not like Unicode here
obj.addProperty('flags', '%08X' % group.flags)
mesh_objects.append(obj) # save reference to current object
log( '--Rigging:', data_group.bones and 'yes' or 'no' )
# rigging
#
if data_group.bones:
B = select_data(data_group.bones)
W = select_data(data_group.weights)
log( '--Assigning vertices to vertex groups...' )
# map bones
B = [tuple(group.bones[j] for j in b) for b in B]
dd = dict() # { index -> unique_bone_name }
for idx in group.bones:
name = transform_tree and transform_tree.get_node(idx).name or 'bone'
dd[idx] = name = make_unique_bone_name(name, idx, dd.values())
# add vertex group
mesh.addVertGroup(name)
v_group_names = [dd.get(j) for j in xrange(max(dd)+1)]
# assign vertices
for i, (b, w) in enumerate(zip(B, W)):
for wi, j in enumerate(b):
if wi == 3:
f = 1.0 - sum(w)
else:
f = w[wi]
mesh.assignVertsToGroup(v_group_names[j], [i], f, 1) # 1 - Blender.Mesh.AssignModes.REPLACE
v_group_names = dd = None
# shape keys
#
if data_group.keys:
log( '--Adding shape keys...' )
keys = select_data(data_group.keys)
dV = map(select_data, data_group.dVerts)
log( '\x20\x20--Length of dV: (%i, %i, %i, %i)' % tuple(map(len, dV)) )
# basis
obj.insertShapeKey()
for idx, s in enumerate(geometry.morph_names):
_keys_f = filter(lambda t: idx in t[1], enumerate(keys))
if _keys_f:
s = '::'.join(s)
log( '\x20\x20--Key "%s"' % s )
obj.insertShapeKey()
mesh.key.blocks[-1].name = s # set name
block_verts = mesh.key.blocks[-1].data
# modify mesh with dV
#
for i, key in _keys_f:
j = key.index(idx)
v = dV[j]
if v:
block_verts[i]+= BlenderVector(*v[i])
obj.activeShape = 1 # return to basis
#<- groups
#
# add bounding geometry
#
if settings['import_bmesh']:
if geometry.static_bmesh:
log( 'Creating static bounding mesh...' )
V, I = geometry.static_bmesh
mesh = Blender.Mesh.New('b_mesh')
mesh.verts.extend(V)
mesh.faces.extend(I)
obj = scene.objects.new(mesh)
obj.name = 'b_mesh'
if geometry.dynamic_bmesh:
log( 'Creating dynamic bounding mesh...' )
mesh = Blender.Mesh.New('b_mesh')
obj = scene.objects.new(mesh)
obj.name = 'b_mesh'
v_group_names = set()
for idx, part in enumerate(geometry.dynamic_bmesh):
if part:
V, I = part
S = {} # { old_index -> new_index }
j = len(mesh.verts)
for i, x in enumerate(sorted(set(chain(*I)))): S[x] = i+j
rot, loc = geometry.inverse_transforms[idx]
t = Transform(loc, rot).get_inverse()
V = [t.transformPoint(Vector(*x)).to_tuple() for i, x in enumerate(V) if i in S]
I = [(S[i], S[j], S[k]) for i, j, k in I]
mesh.verts.extend(V)
mesh.faces.extend(I)
name = transform_tree and transform_tree.get_node(idx).name or 'bone'
name = make_unique_bone_name(name, idx, v_group_names)
v_group_names.add(name)
mesh.addVertGroup(name)
mesh.assignVertsToGroup(name, S.values(), 1.0, 1)
mesh.calcNormals()
v_group_names = None
mesh_objects.append(obj)
#
# load inverse transforms (if any)
#
if geometry.inverse_transforms:
v = tuple(chain(*chain(*geometry.inverse_transforms)))
try:
w = tuple(scene.properties['gmdc_inverse_transforms'])
log( 'Scene already has inverse transforms (%i) stored in scene.properties["gmdc_inverse_transforms"]' % (len(w)/7) )
if v != w and display_menu('The file has a different set of inverse transforms. Replace?',
['Yes, replace inverse transforms.', 'No, keep previously loaded inverse transforms.'], choice_required=True) == 0:
raise Exception()
except:
log( 'Saving inverse transforms in scene.properties["gmdc_inverse_transforms"]' )
scene.properties['gmdc_inverse_transforms'] = v
#
# add armature (if any)
#
if transform_tree:
bone_set = set(chain(*(group.bones or [] for group in geometry.index_groups)))
if settings['all_bones']:
node_ids = set(map(id, transform_tree))
else:
node_ids = set()
for j in bone_set:
node = transform_tree.get_node(j)
assert not isinstance(node, tuple)
# include all nodes down to root
while node and id(node) not in node_ids:
node_ids.add(id(node))
node = node.parent
if node_ids:
log( 'Creating armature...' )
log( '--Number of transform nodes (%i)' % len(node_ids) )
armature = Blender.Armature.New()
armature.envelopes = False
armature.vertexGroups = True
armature.drawType = Blender.Armature.STICK
arm_obj = scene.objects.new(armature) # create armature object
arm_obj.drawMode |= Blender.Object.DrawModes.XRAY
# add bones
armature.makeEditable()
add_bones_to_armature(transform_tree.root_nodes)
armature.update()
log( '--Adding armature modifier(s)...' )
# assign armature modifier
#
for obj in mesh_objects:
modifier = obj.modifiers.append(Blender.Modifier.Types.ARMATURE)
modifier[Blender.Modifier.Settings.VGROUPS ] = True # use vertex groups
modifier[Blender.Modifier.Settings.ENVELOPES] = False # not envelopes
modifier[Blender.Modifier.Settings.OBJECT ] = arm_obj
scene.update()
#<- end
def begin_import():
settings = {
'import_bmesh': btn_import_bmesh.val,
'remove_doubles': btn_remove_doubles.val,
'all_bones': btn_all_bones.val,
}
_save_log = bool(btn_save_log.val)
gmdc_filename = str_gmdc_filename.val.strip()
cres_filename = str_cres_filename.val.strip()
if not gmdc_filename:
display_menu('Error!', ['Select GMDC file.'])
return
# create log file (if needed)
if _save_log:
s = gmdc_filename + '.import_log.txt'
log( 'Opening log file "%s" for writing... ' % s )
try:
f = open(s, 'w')
except IOError as e:
error(e)
display_menu('Error!', ['Could not open log file for writing.'])
return
# Ok
set_log_file(f)
#
# begin import
#
log( '==Geometry Data Container Importer======' )
log( 'GMDC file:', gmdc_filename )
log( 'CRES file:', cres_filename )
log( 'Settings:' )
log( '--Import bounding geometry:', settings['import_bmesh'] )
log( '--Remove doubles: ', settings['remove_doubles'] )
log( '--Import all bones: ', settings['all_bones'] )
log()
# load geometry
log( 'Opening GMDC file "%s"...' % gmdc_filename )
try:
res = load_resource(gmdc_filename, _save_log and 2 or 1)
except:
print_last_exception()
res = False
if not res or res.nodes[0].type != 'cGeometryDataContainer':
res and error( 'Not a GMDC file!' )
close_log_file()
display_menu('Error!', ['Could not load geometry file. See log for details.'])
return
geometry = res.nodes[0].geometry
log()
transform_tree = None
if cres_filename:
# load skeleton
log( 'Opening CRES file "%s"...' % cres_filename )
try:
res = load_resource(cres_filename, _save_log and 2 or 1)
if res and res.nodes[0].type == 'cResourceNode':
transform_tree = build_transform_tree(res.nodes)
else:
res and error( 'Not a CRES file!' )
except:
print_last_exception()
if not transform_tree:
close_log_file()
display_menu('Error!', ['Could not load resource node file. See log for details.'])
return
log()
if _save_log:
log( '==SKELETON==============================' )
log( transform_tree )
log()
try:
if settings['remove_doubles']:
log( 'Removing doubles...' )
geometry.remove_doubles()
log()
log( 'Creating objects...' )
create_objects(geometry, transform_tree, settings)
except:
print_last_exception()
display_menu('Error!', ['An error has occured. See log for details.'])
else:
# Ok
log( 'Finished!' )
Blender.Redraw()
# exit prompt
if display_menu("Import complete!", ['Quit']) == 0: Draw.Exit()
finally:
close_log_file()
########################################
## GUI
########################################
def display_menu(caption, items, choice_required=False):
b = True
while b:
choice = Draw.PupMenu('%s%%t|'%caption + "|".join('%s%%x%i'%(s, i) for i, s in enumerate(items)), 0x100)
b = choice_required and choice < 0
return choice
def draw_gui():
global str_gmdc_filename, str_cres_filename, btn_import_bmesh, btn_all_bones, btn_remove_doubles, btn_save_log
pos_y = 230 ; MAX_PATH = 200
# frame
Blender.BGL.glColor3f(0.75, 0.75, 0.75)
Blender.BGL.glRecti(10, 10, 430, pos_y)
pos_y-= 30
# plugin's header
s = "GMDC Importer (TS2)"
Blender.BGL.glColor3f(0.8, 0.8, 0.8)
Blender.BGL.glRecti(10, pos_y, 430, pos_y+30)
Draw.Label(s, 20, pos_y, 400, 30)
pos_y-= 30
# GMDC file selector
Draw.Label("GMDC file", 20, pos_y, 200, 20)
pos_y-= 20
Draw.BeginAlign()
str_gmdc_filename = Draw.String("", 0x10, 20, pos_y, 300, 20, str_gmdc_filename.val, MAX_PATH, "Path to GMDC file")
Draw.PushButton("Select file", 0x11, 320, pos_y, 100, 20, "Open file browser")
Draw.EndAlign()
pos_y-= 30
# resource node file selector
Draw.Label("Resource node file (optional)", 20, pos_y, 200, 20)
pos_y-= 20
Draw.BeginAlign()
str_cres_filename = Draw.String("", 0x20, 20, pos_y, 300, 20, str_cres_filename.val, MAX_PATH, "Path to resource node file (CRES; optional, but recommended)")
Draw.PushButton("Select file", 0x21, 320, pos_y, 100, 20, "Open file browser")
Draw.EndAlign()
pos_y-= 35
# options
Draw.BeginAlign()
btn_import_bmesh = Draw.Toggle("Bound. mesh", 0x31, 20, pos_y, 100, 20, btn_import_bmesh.val, "Import bounding geometry")
btn_remove_doubles = Draw.Toggle("Rm. doubles", 0x32, 120, pos_y, 100, 20, btn_remove_doubles.val, "If some vertices differ only in texture coordinates, then they are merged together (removes seams)")
btn_all_bones = Draw.Toggle("All bones", 0x33, 220, pos_y, 100, 20, btn_all_bones.val, "Import all bones/transforms; otherwise, used bones only")
btn_save_log = Draw.Toggle("Save log", 0x34, 320, pos_y, 100, 20, btn_save_log.val, "Write script's log data into file *.import_log.txt")
Draw.EndAlign()
pos_y-= 45
# buttons
Draw.BeginAlign()
Draw.PushButton("Import", 1, 120, pos_y, 100, 30, "Import geometry (Ctrl + Enter)")
Draw.PushButton("Exit", 0, 220, pos_y, 100, 30, "Terminate the script (Esc)")
Draw.EndAlign()
#---------------------------------------
# event handlers
l_ctrl_key_pressed = 0
r_ctrl_key_pressed = 0
def set_gmdc_filename(filename):
global gmdc_filename
str_gmdc_filename.val = filename
def set_cres_filename(filename):
global cres_filename
str_cres_filename.val = filename
def event_handler(evt, val):
global l_ctrl_key_pressed, r_ctrl_key_pressed
if evt == Draw.ESCKEY and val:
Draw.Exit()
elif evt == Draw. LEFTCTRLKEY: l_ctrl_key_pressed = val
elif evt == Draw.RIGHTCTRLKEY: r_ctrl_key_pressed = val
elif evt == Draw.RETKEY and val and (l_ctrl_key_pressed or r_ctrl_key_pressed):
begin_import()
l_ctrl_key_pressed = 0
r_ctrl_key_pressed = 0
def button_events(evt):
if evt == 0:
Draw.Exit()
elif evt == 1:
begin_import()
elif evt == 0x11:
Blender.Window.FileSelector(set_gmdc_filename, 'Select')
elif evt == 0x21:
Blender.Window.FileSelector(set_cres_filename, 'Select')
#-------------------------------------------------------------------------------
# set default values for gui elements and run event loop
str_gmdc_filename = Draw.Create("")
str_cres_filename = Draw.Create("")
btn_import_bmesh = Draw.Create(0)
btn_remove_doubles = Draw.Create(1)
btn_all_bones = Draw.Create(0)
btn_save_log = Draw.Create(0)
Draw.Register(draw_gui, event_handler, button_events)
| 1.726563 | 2 |
scripts/layers.py | hchoi405/dppm | 3 | 12795752 | <reponame>hchoi405/dppm
"""
BSD 2-Clause License
Copyright (c) 2021, CGLAB
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
1. Redistributions of source code must retain the above copyright notice, this
list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""
import copy
import tensorflow as tf
from tensorflow.keras import layers, activations
from tensorflow.keras.initializers import GlorotUniform, zeros
_module = tf.load_op_library('./_weightaverage_ops.so')
@tf.RegisterGradient("WeightedAverage")
def _weighted_average_grad(op, grad):
image = op.inputs[0]
weights = op.inputs[1]
grads = _module.weighted_average_grad(grad, image, weights)
grads = tf.clip_by_value(grads, -1000000, 1000000)
return [None, grads]
weighted_average = _module.weighted_average
def kernelPredictingWeights(z):
# z: (B, H, W, kernelArea)
# [-inf, 0], for numerical stability
w = z - tf.reduce_max(z)
# [0, 1]
w = activations.softmax(w)
return w
def conv2d(x, config):
return layers.Conv2D(filters=config['numFilters'],
kernel_size=(config['convSize'], config['convSize']),
activation=config["convActivation"],
padding='same',
strides=(1, 1),
kernel_initializer=GlorotUniform(),
bias_initializer=zeros())(x)
def conv2d_last(x, config):
return layers.Conv2D(
filters=config['numOutput'],
kernel_size=(config['convSize'], config['convSize']),
padding='same', strides=(1, 1),
kernel_initializer=GlorotUniform(),
bias_initializer=zeros())(x) # Constant to make initial radius to be 1
def ConvolutionNet(config, x):
# x: (B, H, W, numInputChannels)
# x: (Batch, H, W, 100)
x = conv2d(x, config)
for i in range(8):
# x: (Batch, H, W, 100)
x = conv2d(x, config)
# x: (Batch, H, W, numOutput)
x = conv2d_last(x, config)
return x
def MainNet(config, input):
# input: (B, H, W, numChannels)
N = input[:, :, :, config['ITERATION_POS']:config['ITERATION_POS'] + 1]
albedo = input[:, :, :, config['ALBEDO_POS']:config['ALBEDO_POS'] + 3]
normal = input[:, :, :, config['NORMAL_POS']:config['NORMAL_POS'] + 3]
depth = input[:, :, :, config['DEPTH_POS']:config['DEPTH_POS'] + 1]
var = input[:, :, :, config['VARIANCE_POS']:config['VARIANCE_POS'] + config['numCandidates']]
candidates = input[:, :, :, config['CANDIDATE_POS']
:config['CANDIDATE_POS'] + 3 * config['numCandidates']]
# x: (B, H, W, numInputChannels)
x = tf.concat([albedo, normal, depth, var, candidates], axis=3)
# x: (B, H, W, numOutput)
x = ConvolutionNet(config, x)
# (B, H, W, kernelArea * (numCandidates-1))
denoisingWeights = activations.relu(x) + 1e-4 # to prevent all zero
denoisingWeights = denoisingWeights / tf.reduce_sum(denoisingWeights, axis=-1, keepdims=True)
lastCandidIdx = 3 * (config['numCandidates'] - 1)
# (B, H, W, 3): the candidate with least radius
yi = candidates[:, :, :, lastCandidIdx:lastCandidIdx + 3]
# (B, H, W, 3)
output = tf.zeros_like(albedo)
for i in range(config['numCandidates'] - 1):
start = i * config['kernelArea']
end = (i + 1) * config['kernelArea']
# (B, H, W, kernelArea)
wb = denoisingWeights[:, :, :, start:end]
# (B, H, W, 3)
zb = candidates[:, :, :, i * 3:i * 3 + 3]
# (B, H, W, 3)
denoised = weighted_average(yi - zb, wb)
output += denoised
# (B, H, W, 1)
sumWeights = tf.reduce_sum(wb, axis=-1, keepdims=True)
output += zb * sumWeights
return output, denoisingWeights
| 1.351563 | 1 |
mapr/ojai/ojai_utils/ojai_list.py | mapr/maprdb-python-client | 3 | 12795753 | from __future__ import unicode_literals
from __future__ import print_function
from __future__ import division
from __future__ import absolute_import
from future import standard_library
standard_library.install_aliases()
from builtins import *
class OJAIList(list):
def __init__(self):
super(OJAIList, self).__init__()
@staticmethod
def set_list(value, tags=False):
from mapr.ojai.ojai.OJAITagsBuilder import OJAITagsBuilder
ojai_list = []
if tags:
dump_document = OJAITagsBuilder()
else:
from mapr.ojai.ojai.OJAIDocument import OJAIDocument
dump_document = OJAIDocument()
for elem in value:
if isinstance(elem, list):
if isinstance(dump_document, OJAITagsBuilder):
nested_list = OJAIList.set_list(elem, tags=True)
else:
nested_list = OJAIList.set_list(elem)
ojai_list.append(nested_list)
elif isinstance(elem, dict) and bool(elem):
tmp_dict = {}
for k, v in list(elem.items()):
if isinstance(v, list):
tmp_dict[k] = OJAIList.set_list(v)
else:
internal_value = dump_document.set('dump', v).as_dictionary()['dump']
tmp_dict[k] = internal_value
dump_document.clear()
ojai_list.append(tmp_dict)
else:
ojai_list.append(dump_document.set('dump', elem).as_dictionary()['dump'])
dump_document.clear()
return ojai_list
| 2.171875 | 2 |
estimagic/__init__.py | vishalbelsare/estimagic | 0 | 12795754 | from estimagic import utilities
from estimagic.differentiation.derivatives import first_derivative
from estimagic.estimation.estimate_msm import estimate_msm
from estimagic.estimation.msm_weighting import get_moments_cov
from estimagic.inference.bootstrap import bootstrap
from estimagic.optimization.optimize import maximize
from estimagic.optimization.optimize import minimize
__version__ = "0.1.4"
__all__ = [
"maximize",
"minimize",
"utilities",
"first_derivative",
"bootstrap",
"estimate_msm",
"get_moments_cov",
]
| 1.429688 | 1 |
2018/07/2018_07_30.py | devsagul/daily-coding-problem | 0 | 12795755 | """
Given an integer k and a string s, find the length of the longest substring that contains at most k distinct characters.
For example, given s = "abcba" and k = 2, the longest substring with k distinct characters is "bcb".
"""
def longest_k(s, k):
res = s[:k]
l_res = k
cur = res
letters = set(cur)
num_letters = len(letters)
s_size = len(s)
i = k
while i < len(s):
letter = s[i]
if letter not in letters and num_letters >= k:
letters.remove(cur[0])
cur = cur.lstrip(cur[0])
else:
num_letters += 1
letters.update(letter)
tmp = s[i:].lstrip(letter)
chunk_size = s_size - i - len(tmp)
cur += chunk_size * letter
i += chunk_size
l_cur = len(cur)
if l_cur > l_res:
l_res = l_cur
res = cur
return res
if __name__ == '__main__':
tests = [["abcba", 2],
["contains", 4],
]
answers = ["bcb",
"ntain",
]
for test, answer in zip(tests, answers):
actual = longest_k(*test)
message = "Failed test {0}\ngot {1}" \
" expected {2}".format(test, actual, answer)
assert actual == answer, message | 3.9375 | 4 |
setup.py | AllenCellModeling/CVAE_testbed | 2 | 12795756 | <filename>setup.py
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""The setup script."""
from setuptools import setup, find_packages
with open("README.rst") as readme_file:
readme = readme_file.read()
with open("HISTORY.rst") as history_file:
history = history_file.read()
test_requirements = ["codecov", "flake8", "pytest", "pytest-cov", "pytest-raises"]
setup_requirements = ["pytest-runner"]
dev_requirements = [
"bumpversion>=0.5.3",
"wheel>=0.33.1",
"flake8>=3.7.7",
"tox>=3.5.2",
"coverage>=5.0a4",
"Sphinx>=2.0.0b1",
"sphinx_rtd_theme>=0.3.1",
"recommonmark>=0.5.0",
"twine>=1.13.0",
"pytest>=4.3.0",
"pytest-cov==2.6.1",
"pytest-raises>=0.10",
"pytest-runner>=4.4",
]
interactive_requirements = ["altair", "jupyterlab", "matplotlib"]
with open("requirements.txt", "r") as f:
requirements = f.read().splitlines()
extra_requirements = {
"test": test_requirements,
"setup": setup_requirements,
"dev": dev_requirements,
"interactive": interactive_requirements,
"all": [
*requirements,
*test_requirements,
*setup_requirements,
*dev_requirements,
*interactive_requirements,
],
}
setup(
author="<NAME>",
author_email="<EMAIL>",
classifiers=[
"Development Status :: 2 - Pre-Alpha",
"Intended Audience :: Developers",
"License :: Allen Institute Software License",
"Natural Language :: English",
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 3.7",
],
description="A research testbed for conditional variational autoencoders",
entry_points={
"console_scripts": ["CVAE_train=CVAE_testbed.bin.CVAE_train:train_model"]
},
install_requires=requirements,
license="Allen Institute Software License",
long_description=readme + "\n\n" + history,
include_package_data=True,
keywords="CVAE_testbed",
name="CVAE_testbed",
packages=find_packages(),
python_requires=">=3.6",
setup_requires=setup_requirements,
test_suite="CVAE_testbed/CVAE_testbed/tests",
tests_require=test_requirements,
extras_require=extra_requirements,
url="https://github.com/AllenCellModeling/CVAE_testbed",
version="0.1.0",
zip_safe=False,
)
| 1.640625 | 2 |
lists as stacks and queues exerscise/01.Basic Stack Operations.py | nrgxtra/advanced | 0 | 12795757 | <gh_stars>0
n, s, x = input().split(' ')
st = []
[st.append(int(x)) for x in input().split(' ')]
for j in range(int(s)):
st.pop()
if int(x) in st:
print('True')
else:
st = sorted(st)
if st:
print(st[0])
else:
print('0')
| 3.109375 | 3 |
tests/test_custom_widgets.py | jerryc05/python-progressbar | 806 | 12795758 | <gh_stars>100-1000
import time
import progressbar
class CrazyFileTransferSpeed(progressbar.FileTransferSpeed):
"It's bigger between 45 and 80 percent"
def update(self, pbar):
if 45 < pbar.percentage() < 80:
return 'Bigger Now ' + progressbar.FileTransferSpeed.update(self,
pbar)
else:
return progressbar.FileTransferSpeed.update(self, pbar)
def test_crazy_file_transfer_speed_widget():
widgets = [
# CrazyFileTransferSpeed(),
' <<<',
progressbar.Bar(),
'>>> ',
progressbar.Percentage(),
' ',
progressbar.ETA(),
]
p = progressbar.ProgressBar(widgets=widgets, max_value=1000)
# maybe do something
p.start()
for i in range(0, 200, 5):
# do something
time.sleep(0.1)
p.update(i + 1)
p.finish()
def test_variable_widget_widget():
widgets = [
' [', progressbar.Timer(), '] ',
progressbar.Bar(),
' (', progressbar.ETA(), ') ',
progressbar.Variable('loss'),
progressbar.Variable('text'),
progressbar.Variable('error', precision=None),
progressbar.Variable('missing'),
progressbar.Variable('predefined'),
]
p = progressbar.ProgressBar(widgets=widgets, max_value=1000,
variables=dict(predefined='predefined'))
p.start()
print('time', time, time.sleep)
for i in range(0, 200, 5):
time.sleep(0.1)
p.update(i + 1, loss=.5, text='spam', error=1)
i += 1
p.update(i, text=None)
i += 1
p.update(i, text=False)
i += 1
p.update(i, text=True, error='a')
p.finish()
def test_format_custom_text_widget():
widget = progressbar.FormatCustomText(
'Spam: %(spam).1f kg, eggs: %(eggs)d',
dict(
spam=0.25,
eggs=3,
),
)
bar = progressbar.ProgressBar(widgets=[
widget,
])
for i in bar(range(5)):
widget.update_mapping(eggs=i * 2)
assert widget.mapping['eggs'] == bar.widgets[0].mapping['eggs']
| 2.9375 | 3 |
turdshovel/context.py | daddycocoaman/turdshovel | 39 | 12795759 | import os
from copy import copy
from typing import Any, List, Tuple
from nubia import context, eventbus
from nubia.internal import cmdloader
from nubia.internal.cmdbase import AutoCommand
from pygments.token import Name, Token
from rich import box, inspect
from rich.align import Align
from rich.console import Console
from rich.panel import Panel
from rich.text import Text
from sortedcontainers import SortedSet
from ._nubia import _Exit, _Help
from .constants import COMMAND_PACKAGES, NUBIA_OPTIONS, TITLE_ASCII, TITLE_TEXT
class TurdshovelContext(context.Context):
"""Context for the Turdshovel app. Only allows interactive mode"""
# Need to set this to allow initialization
available_obj_types = SortedSet()
def get_prompt_tokens(self) -> List[Tuple[Any, str]]:
tokens = [
(Token.NewLine, "\n"),
(Token.Title, "Turdshovel"),
(Token.Space, ""),
(Token.Pound, "> "),
]
if self.target_friendly_name:
tokens.insert(3, (Name.Command, self.target_friendly_name))
tokens.insert(3, (Token.At, "@"))
return tokens
def _replace_internal_cmds(self, override: bool):
for k, v in copy(self._registry._cmd_instance_map).items():
if v.__module__.startswith("nubia.internal.commands"):
self._registry._cmd_instance_map.pop(k)
self._registry._completer.meta_dict.pop(k)
self._registry._completer.words.remove(k)
# Readd commands for exit and help with less aliases
for cmd in [_Exit, _Help]:
self._registry.register_command(cmd(), override)
def reload_commands(self):
"""Reloads all the commands for the context"""
self._replace_internal_cmds(override=True)
for cmd in cmdloader.load_commands(COMMAND_PACKAGES):
self._registry.register_command(
AutoCommand(cmd, NUBIA_OPTIONS), override=True
)
def on_interactive(self, args):
self.verbose = args.verbose
self.console = Console(soft_wrap=True)
self.console.set_alt_screen()
# This will be whatever the DataTarget is connected to and the related runtime
self.target = None
self.target_friendly_name = ""
self.runtime = None
self.available_obj_types = SortedSet()
title_panel = Panel.fit(
Text(TITLE_ASCII.rjust(33), style="bold #52311A", end="").append(
TITLE_TEXT, style="bold #693F21"
),
border_style="bold #52311A",
subtitle=f"{':poop:' * 36}",
box=box.SIMPLE,
)
self.console.print(Align.center(title_panel))
self._replace_internal_cmds(override=False)
self.registry.dispatch_message(eventbus.Message.CONNECTED)
| 1.976563 | 2 |
load_data/loader/videogames/pokemon_image_type.py | erickfmm/ML-experiments | 0 | 12795760 | # -*- coding: utf-8 -*-
from pims import ImageReader
from load_data.ILoadSupervised import ILoadSupervised
from os.path import join, exists
import csv
class LoadPokemon(ILoadSupervised):
def __init__(self, path="train_data/Folder_Videojuegos/pokemon-images-and-types"):
self.path = path
self.classes = set()
def get_all(self, sum1=False):
X = []
Y = []
Ys_not_processed = []
with open(join(self.path, "pokemon.csv"), "r") as csv_obj:
csv_reader = csv.DictReader(csv_obj)
for row in csv_reader:
imagename = join(self.path, "images", row["Name"]+".png")
if exists(imagename):
im = ImageReader(imagename)
X.append(im.get_frame(0))
self.classes.add(row["Type1"])
actual_ys = []
actual_ys.append(row["Type1"])
if row["Type2"] is not None:
self.classes.add(row["Type2"])
actual_ys.append(row["Type2"])
Ys_not_processed.append(actual_ys)
Y = self.make_targets(Ys_not_processed, sum1)
return X, Y
def make_targets(self, not_processed, sum1=False):
Y = []
lcl = list(self.classes)
for e in not_processed:
target = [0 for _ in self.classes]
for pktype in e:
target[lcl.index(pktype)] = 1
Y.append(target)
if sum1:
for i in range(len(Y)):
sum_i = sum(Y[i])
Y[i] = [e/float(sum_i) for e in Y[i]]
return Y
def get_classes(self):
return self.classes
def get_headers(self):
return ["image"]# None #self.headers | 2.71875 | 3 |
tests/structural/structural_test.py | lukaschoebel/POTUSgen | 0 | 12795761 | <filename>tests/structural/structural_test.py
import unittest
from timeout_decorator import *
from behavior.ngram_solutions import *
from structural import structural_helpers
try:
from assignment import POTUSgen
importFlag = True
except:
importFlag = False
class TestStructural(unittest.TestCase):
TIMEOUT_CONSTANT = 180
time_error = f"Importing the notebook took more than {TIMEOUT_CONSTANT} seconds. This is longer than expected. Please make sure that every cell is compiling and prevent complex structures."
import_error = "There seems to be an error in the provided notebook. Please make sure that every cell is compiling without an error."
method_error = "Function %s could not be found. Please don\'t rename the methods."
@timeout_decorator.timeout(TIMEOUT_CONSTANT, timeout_exception=TimeoutError, exception_message=time_error)
def test_notebook_import(self):
if (importFlag is False):
raise ImportError(self.import_error)
else:
pass
def test_check_function_names(self):
self.assertIs(structural_helpers.check_for_function('generate_ngram_successors', POTUSgen), True,
self.method_error % ('generate_ngram_successors'))
self.assertIs(structural_helpers.check_for_function('calculate_ngram_freqs', POTUSgen), True,
self.method_error % ('calculate_ngram_freqs'))
self.assertIs(structural_helpers.check_for_function('next_word_max', POTUSgen), True,
self.method_error % ('next_word_max')) | 2.71875 | 3 |
token_service/authz_client.py | WIPACrepo/token-service | 0 | 12795762 | <reponame>WIPACrepo/token-service
"""
Authz client common code.
"""
import asyncio
import inspect
from tornado.web import HTTPError
from rest_tools.server import (Auth, RestHandler, RestServer, authenticated,
catch_error)
class AuthzHandler(RestHandler):
def initialize(self, func, **kwargs):
super(AuthzHandler, self).initialize(**kwargs)
self.func = func
@authenticated
@catch_error
async def get(self):
try:
if inspect.iscoroutinefunction(self.func):
ret = await self.func(self.auth_data)
else:
ret = self.func(self.auth_data)
except Exception:
raise HTTPError(401, 'denied')
if not ret:
ret = {}
self.write(ret)
def run(client_secret, handler_func, address=None, port=None, **kwargs):
"""
Run an Authz client.
Starts a web server that responds to authz requests from the
token service. This function blocks.
Notes on handler_func:
This callable should expect a dict argument with additional data.
Any information returned is embedded in the valid token.
It should raise an error to deny the authz request.
Args:
client_secret (str): a secret string used to validate/sign requests
handler_func (callable): a function to handle the authz request
address (str): bind address
port (int): bind port
"""
auth = Auth(client_secret, issuer='authz')
server = RestServer(**kwargs)
server.add_route('/', AuthzHandler, {'auth': auth, 'func': handler_func})
startup_args = {}
if address:
startup_args['address'] = address
if port:
startup_args['port'] = port
server.startup(**startup_args)
loop = asyncio.get_event_loop()
loop.run_forever()
server.stop()
| 2.625 | 3 |
cogs/GameManager.py | shadowlerone/TabletopClubDiscordBot | 0 | 12795763 | import discord
from discord.ext import commands
import random
import asyncio
class GameManager():
def __init__(self):
self.setup()
def setup(self):
print("GameManager: Loaded")
class GameManagerCog(commands.Cog):
def __init__(self, client):
self.client = client
self.initiatives = {}
self.gamemanager = GameManager()
# Official Format:
# Test (category channel)
# t-session-planning (text channel)
# t-notes (text-channel)
# t-stars-and-wishes (text channel)
# t-pc-basics (text channel)
# t-pc-sheets (text channel)
# t-pc-visuals (text channel)
# t-music (text channel)
# t-dice-rolls (text channel)
# t-voice-chat (text channel)
# T Sessions (voice channel)
# Makes a game (category, channel, role, etc) in the server
@commands.command(aliases=['Creategame','CreateGame','cg','Cg','cG','CG','gamecreate','Gamecreate','GameCreate','gc','Gc','gC','GC'],brief="Makes the necessary channels and roles for a game.", description="/creategame [arg1] [arg2] @member\n\n- arg1 = Game Name/Campaign\n- arg2 = Game Name Abbreviation\n- @member = Game Master\n\nMakes the necessary channels and roles for a game.")
@commands.has_role("Mod")
async def creategame(self, ctx, arg1=None, arg2=None, gm: discord.Member = None):
if(arg1 != None and arg2 != None and gm != None):
# Stuff
guild = ctx.guild
progress_msg = await ctx.send("Making...")
pos = discord.utils.get(ctx.guild.roles, name="⊱ ───── {⭒|PERSONAL|⭒} ───── ⊰").position +2
member = discord.utils.get(ctx.guild.roles, name="Member")
role = await guild.create_role(name=str(arg1), mentionable=True)
await role.edit(position=pos)
await gm.add_roles(role)
overwrites = {
guild.default_role: discord.PermissionOverwrite(add_reactions=False, administrator=False, attach_files=False,ban_members=False,change_nickname=False,connect=False,create_instant_invite=False,deafen_members=False,embed_links=False,external_emojis=False,kick_members=False,manage_channels=False,manage_emojis=False,manage_guild=False,manage_messages=False,manage_nicknames=False,manage_permissions=False,manage_roles=False,manage_webhooks=False,mention_everyone=False,move_members=False,mute_members=False,priority_speaker=False,read_message_history=False,read_messages=False,request_to_speak=False,send_messages=False,send_tts_messages=False,speak=False,stream=False,use_external_emojis=False,use_slash_commands=False,use_voice_activation=False,view_audit_log=False,view_channel=False,view_guild_insights=False),
role: discord.PermissionOverwrite(add_reactions=None, administrator=None, attach_files=None,ban_members=None,change_nickname=None,connect=None,create_instant_invite=None,deafen_members=None,embed_links=None,external_emojis=None,kick_members=None,manage_channels=None,manage_emojis=None,manage_guild=None,manage_messages=None,manage_nicknames=None,manage_permissions=None,manage_roles=None,manage_webhooks=None,mention_everyone=None,move_members=None,mute_members=None,priority_speaker=None,read_message_history=None,read_messages=None,request_to_speak=None,send_messages=None,send_tts_messages=None,speak=True,stream=None,use_external_emojis=None,use_slash_commands=None,use_voice_activation=True,view_audit_log=None,view_channel=True,view_guild_insights=None),
member: discord.PermissionOverwrite(add_reactions=True, administrator=None, attach_files=True,ban_members=None,change_nickname=None,connect=True,create_instant_invite=None,deafen_members=None,embed_links=True,external_emojis=True,kick_members=None,manage_channels=None,manage_emojis=None,manage_guild=None,manage_messages=None,manage_nicknames=None,manage_permissions=None,manage_roles=None,manage_webhooks=None,mention_everyone=None,move_members=None,mute_members=None,priority_speaker=None,read_message_history=True,read_messages=True,request_to_speak=None,send_messages=True,send_tts_messages=None,speak=None,stream=None,use_external_emojis=None,use_slash_commands=None,use_voice_activation=None,view_audit_log=None,view_channel=None,view_guild_insights=None),
gm: discord.PermissionOverwrite(add_reactions=None, administrator=None, attach_files=None,ban_members=None,change_nickname=None,connect=None,create_instant_invite=None,deafen_members=None,embed_links=None,external_emojis=None,kick_members=None,manage_channels=None,manage_emojis=None,manage_guild=None,manage_messages=None,manage_nicknames=None,manage_permissions=None,manage_roles=None,manage_webhooks=None,mention_everyone=True,move_members=None,mute_members=True,priority_speaker=True,read_message_history=None,read_messages=None,request_to_speak=None,send_messages=None,send_tts_messages=None,speak=True,stream=None,use_external_emojis=None,use_slash_commands=True,use_voice_activation=True,view_audit_log=None,view_channel=True,view_guild_insights=None)
}
category = await guild.create_category_channel(str(arg1))
await category.create_text_channel(str(arg2) + " session planning", overwrites=overwrites)
await category.create_text_channel(str(arg2) + " notes", overwrites=overwrites)
await category.create_text_channel(str(arg2) + " star and wishes", overwrites=overwrites)
await category.create_text_channel(str(arg2) + " house rules", overwrites=overwrites)
await category.create_text_channel(str(arg2) + " pc basics", overwrites=overwrites)
await category.create_text_channel(str(arg2) + " pc sheets", overwrites=overwrites)
await category.create_text_channel(str(arg2) + " pc visuals", overwrites=overwrites)
await category.create_text_channel(str(arg2) + " music", overwrites=overwrites)
await category.create_text_channel(str(arg2) + " dice rolls", overwrites=overwrites)
overwrites = {
guild.default_role: discord.PermissionOverwrite(add_reactions=False, administrator=False, attach_files=False,ban_members=False,change_nickname=False,connect=False,create_instant_invite=False,deafen_members=False,embed_links=False,external_emojis=False,kick_members=False,manage_channels=False,manage_emojis=False,manage_guild=False,manage_messages=False,manage_nicknames=False,manage_permissions=False,manage_roles=False,manage_webhooks=False,mention_everyone=False,move_members=False,mute_members=False,priority_speaker=False,read_message_history=False,read_messages=False,request_to_speak=False,send_messages=False,send_tts_messages=False,speak=False,stream=False,use_external_emojis=False,use_slash_commands=False,use_voice_activation=False,view_audit_log=False,view_channel=False,view_guild_insights=False),
role: discord.PermissionOverwrite(add_reactions=None, administrator=None, attach_files=None,ban_members=None,change_nickname=None,connect=None,create_instant_invite=None,deafen_members=None,embed_links=None,external_emojis=None,kick_members=None,manage_channels=None,manage_emojis=None,manage_guild=None,manage_messages=None,manage_nicknames=None,manage_permissions=None,manage_roles=None,manage_webhooks=None,mention_everyone=None,move_members=None,mute_members=None,priority_speaker=None,read_message_history=None,read_messages=None,request_to_speak=None,send_messages=None,send_tts_messages=None,speak=True,stream=None,use_external_emojis=None,use_slash_commands=None,use_voice_activation=True,view_audit_log=None,view_channel=True,view_guild_insights=None),
member: discord.PermissionOverwrite(add_reactions=True, administrator=None, attach_files=True,ban_members=None,change_nickname=None,connect=True,create_instant_invite=None,deafen_members=None,embed_links=True,external_emojis=True,kick_members=None,manage_channels=None,manage_emojis=None,manage_guild=None,manage_messages=None,manage_nicknames=None,manage_permissions=None,manage_roles=None,manage_webhooks=None,mention_everyone=None,move_members=None,mute_members=None,priority_speaker=None,read_message_history=True,read_messages=True,request_to_speak=None,send_messages=True,send_tts_messages=None,speak=None,stream=None,use_external_emojis=None,use_slash_commands=None,use_voice_activation=None,view_audit_log=None,view_channel=True,view_guild_insights=None),
gm: discord.PermissionOverwrite(add_reactions=None, administrator=None, attach_files=None,ban_members=None,change_nickname=None,connect=None,create_instant_invite=None,deafen_members=None,embed_links=None,external_emojis=None,kick_members=None,manage_channels=None,manage_emojis=None,manage_guild=None,manage_messages=None,manage_nicknames=None,manage_permissions=None,manage_roles=None,manage_webhooks=None,mention_everyone=True,move_members=None,mute_members=True,priority_speaker=True,read_message_history=None,read_messages=None,request_to_speak=None,send_messages=None,send_tts_messages=None,speak=True,stream=None,use_external_emojis=None,use_slash_commands=True,use_voice_activation=True,view_audit_log=None,view_channel=True,view_guild_insights=None)
}
await category.create_text_channel(str(arg2) + " voice chat", overwrites=overwrites)
await category.create_voice_channel(str(arg2).upper() + " Sessions", overwrites=overwrites)
await progress_msg.delete()
await ctx.send("Done!")
else:
await ctx.send("Missing arguments!")
@commands.command(aliases=['Deletegame','DeleteGame','dg','Dg','dG','DG','gamedelete','Gamedelete','GameDelete','gd','Gd','gD','GD'],brief="Deletes the appropriate channels and roles for a game.", description="/deletegame [arg]\n\n- arg = Game Name/Campaign\n\nDeletes the appropriate channels and roles for a game.")
@commands.has_role("Mod")
async def deletegame(self, ctx, arg1=None):
if(arg1 != None):
# Stuff
msg = await ctx.send("Are you sure you want to delete " + str(arg1) + "?")
await msg.add_reaction("✅")
await msg.add_reaction("❌")
def check(reaction, user):
return user == ctx.author
try:
reaction = await self.client.wait_for('reaction_add', timeout=60.0, check=check)
if(str(reaction[0]) == '✅'):
# Stuff
channel = discord.utils.get(ctx.guild.channels, name=str(arg1))
role = discord.utils.get(ctx.guild.roles, name=str(arg1))
await role.delete()
category = self.client.get_channel(channel.id)
for channel in category.channels:
await channel.delete()
await category.delete()
await msg.delete()
await ctx.send("Successfully deleted!")
elif(str(reaction[0]) == '❌'):
#More Stuff
await msg.delete()
await ctx.send("Deletion Aborted!")
else:
await ctx.send("That isn't right...")
except asyncio.TimeoutError:
await msg.delete()
await ctx.send("Timed out!")
else:
await ctx.send("Missing arguments!")
@commands.command(aliases=['Initiative','init','Init','i','I','initiate','Initiate'],brief="Allows you to set the current initiative for a game that can be used as a reminder.", description="/initiative [args]\n\n- args = Names separated by spaces to indicate order of initiative\n\nAllows you to set the current initiative for a game that can be used as a reminder.")
async def initiative(self, ctx, *args):
if(len(args) != 0):
if(str(args).isdecimal()):
await ctx.send("You can't have just a number for a name, sorry :(")
else:
game = ctx.channel.category_id
self.initiatives[game] = [arg for arg in args]
await ctx.send("Initiative saved!")
else:
game = ctx.channel.category_id
msg = "```Initiative:\n"
counter = 1
for arg in self.initiatives[game]:
msg += "{}) {}\n".format(counter, arg)
counter+=1
msg += "```"
# print(self.initiatives[game])
await ctx.send(msg)
@commands.command(aliases=['Addplayer','AddPlayer','initadd','Initadd','InitAdd'],brief='Adds a player to the initiative.', description='/addplayer [name] [idx]\n\n- name = The name of the player you are adding to the initiative\n- idx = Where in the list the player will go (optional).\n\nAdds a player to the initiative.')
async def addplayer(self, ctx, name:str, idx=None):
game = ctx.channel.category_id
if(idx != None):
if(not name.isdecimal()):
self.initiatives[game].insert(int(idx)-1, name)
await ctx.send("Successfully added player!")
else:
await ctx.send("No number for name >:T")
else:
if(not name.isdecimal()):
self.initiatives[game].append(name)
await ctx.send("Successfully added player!")
else:
await ctx.send("No number for name! >:T")
@commands.command(aliases=['Removeplayer','RemovePlayer','initdel','Initdel','InitDel'],brief='Removes a player from the initiative.', description="/removeplayer [arg]\n\n- arg = The index or name of the player you'd like to remove from initiative.\n\nRemoves a player from the initiative.")
async def removeplayer(self, ctx, arg):
game = ctx.channel.category_id
if(str(arg).isdecimal()):
del self.initiatives[game][int(arg)-1]
await ctx.send("Successfully removed player!")
else:
del self.initiatives[game][self.initiatives[game].index(str(arg))]
await ctx.send("Successfully removed player!")
def setup(client):
client.add_cog(GameManager(client)) | 2.828125 | 3 |
ivfcrvis/recording.py | tim-shea/buckeye_vis | 1 | 12795764 | <filename>ivfcrvis/recording.py
import numpy
import os
import math
from xml.etree import ElementTree
from scipy.io import wavfile
from matplotlib import pyplot
from features import logfbank
class Recording:
"""Recording reads an ITS file exported from LENA and parses out data about the segments and speakers in the
corresponding WAV file. It also contains a method to split and save out individual segments as WAV files for
acoustic analysis."""
def __init__(self, root, recording_id):
"""Construct a new Recording by reading the ITS file in the directory root with a filename derived from
recording_id."""
self.root = root
self.recording_id = recording_id
starts = []
ends = []
speakers = []
tree = ElementTree.parse(os.path.join(self.root, '{0}.its'.format(self.recording_id)))
root = tree.getroot()
for segment in root.iter('Segment'):
speakers.append(segment.attrib['spkr'])
starts.append(parse_time(segment.attrib['startTime']))
ends.append(parse_time(segment.attrib['endTime']))
self.starts = numpy.array(starts)
self.ends = numpy.array(ends)
self.speakers = speakers
self.samplerate = None
self.signal = None
self.duration = None
def read_recording(self):
"""Read the WAV file corresponding to this Recording. This is deferred because it can be slow."""
filepath = os.path.join(self.root, '{0}.wav'.format(self.recording_id))
self.samplerate, self.signal = wavfile.read(filepath)
self.duration = len(self.signal) / self.samplerate
def frequency_banks(self, blockSize=600):
if self.signal is None:
self.read_recording()
fbanks = numpy.zeros((0, 1, 26))
start = 0
while start < len(self.signal):
end = start + blockSize * self.samplerate
end = end if end < len(self.signal) else len(self.signal)
block = self.signal[start:end]
fbank = logfbank(block, self.samplerate, winlen=0.05, winstep=0.025)
fbanks = numpy.concatenate((fbanks, numpy.reshape(fbank, (len(fbank), 1, 26))))
start = end
return fbanks
def split_segments(self):
"""Split the WAV file for this recording into individual segments and save those segments in a directory
structure according to the identified speaker."""
recording_dir = os.path.join(self.root, self.recording_id)
if not os.path.exists(recording_dir):
os.makedirs(recording_dir)
for speaker in set(self.speakers):
speaker_dir = os.path.join(recording_dir, speaker)
if not os.path.exists(speaker_dir):
os.makedirs(speaker_dir)
if self.signal is None:
self.read_recording()
for start, end, speaker, i in zip(self.starts, self.ends, self.speakers, range(len(self.starts))):
segment = self.signal[int(start * self.samplerate):int(end * self.samplerate)]
wavfile.write(os.path.join(recording_dir, speaker, '{0}.wav'.format(i)), self.samplerate, segment)
def read_segment(self, category, i):
"""Read an individual segment WAV file. Returns the sample rate and signal."""
filename = os.path.join(self.root, self.recording_id, category, '{0}.wav'.format(i))
return wavfile.read(filename)
def filter_speaker(self, speaker):
"""Return the indices, start times, and end times of all segments labeled with the speaker."""
index = numpy.array(self.speakers) == speaker
return numpy.where(index)[0], self.starts[index], self.ends[index]
def parse_time(formatted):
"""Returns the time in seconds indicated by the formatted string."""
# TODO: This should not require Pacific timezone, lookup lena format spec
if formatted.startswith('PT') and formatted.endswith('S'):
return float(formatted[2:-1])
def plot_speaker_counts(recording):
"""Plot the number of segments in the recording for each speaker."""
speakers, counts = numpy.unique(recording.speakers, return_counts=True)
fig = pyplot.figure()
pyplot.bar(numpy.arange(len(speakers)) + 0.1, counts)
pyplot.title('Number of Vocalizations by Speaker')
pyplot.xticks(numpy.arange(len(speakers)) + 0.5, speakers)
pyplot.xlim(0, len(speakers))
pyplot.xlabel('Speaker')
pyplot.ylabel('Count')
return fig
def plot_durations(recording, speaker=None):
"""Plot a time series and a histogram of segment durations, optionally filtered for a speaker."""
if speaker is None:
starts = recording.starts
ends = recording.ends
else:
i, starts, ends = recording.filter_speaker(speaker)
durations = ends - starts
fig = pyplot.figure()
pyplot.subplot(2, 1, 1)
pyplot.plot(starts + durations / 2, durations)
pyplot.title('Vocalization Durations for {0}'.format('ALL' if speaker is None else speaker))
pyplot.xlabel('Time (s)')
pyplot.ylabel('Duration (s)')
pyplot.subplot(2, 1, 2)
pyplot.hist(durations, bins=numpy.logspace(0, 4, 100))
pyplot.xscale('log')
pyplot.yscale('log')
pyplot.xlabel('Duration (s)')
pyplot.ylabel('Count')
return fig
def plot_intervals(recording, speaker):
"""Plot a time series and histogram of segment intervals labeled as speaker."""
i, starts, ends = recording.filter_speaker(speaker)
intervals = starts[1:] - ends[:-1]
fig = pyplot.figure()
pyplot.subplot(2, 1, 1)
pyplot.plot(starts[1:], intervals)
pyplot.title('Vocalization Intervals for {0}'.format(speaker))
pyplot.xlabel('Time (s)')
pyplot.ylabel('Interval (s)')
pyplot.subplot(2, 1, 2)
pyplot.hist(intervals, bins=numpy.logspace(0, 4, 50))
pyplot.xscale('log')
pyplot.yscale('log')
pyplot.xlabel('Interval (s)')
pyplot.ylabel('Count')
return fig
def plot_volubility(recording, speaker):
"""Plot the volubility ratio (proportion of time that speaker is speaking) as a time series and histogram. This
analysis uses one minute blocks to aggregate segments."""
minutes = math.ceil((recording.ends[-1] - recording.starts[0]) / 60)
volubility = numpy.zeros(minutes)
i, starts, ends = recording.filter_speaker(speaker)
for m in range(minutes):
start_minute = 60 * m
end_minute = 60 * m + 60
for start, end in zip(starts, ends):
volubility[m] += max(min(end_minute, end) - max(start_minute, start), 0)
volubility /= 60
fig = pyplot.figure()
pyplot.subplot(2, 1, 1)
pyplot.plot(60 * numpy.arange(minutes), volubility)
pyplot.title('Volubility for {0}'.format(speaker))
pyplot.xlabel('Time (min)')
pyplot.ylabel('Vocalized Seconds / Minute')
pyplot.subplot(2, 1, 2)
pyplot.hist(volubility, bins=50)
pyplot.yscale('log')
pyplot.xlabel('Volubility')
pyplot.ylabel('Count')
return fig
| 2.890625 | 3 |
touchdown/tests/stubs/aws/launch_configuration.py | yaybu/touchdown | 14 | 12795765 | # Copyright 2016 Isotoma Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import base64
import datetime
from touchdown.core.utils import force_bytes, force_str
from .service import ServiceStubber
class LaunchConfigurationStubber(ServiceStubber):
client_service = "ec2"
def add_describe_launch_configurations_empty_response(self):
return self.add_response(
"describe_launch_configurations",
service_response={"LaunchConfigurations": []},
expected_params={},
)
def add_describe_launch_configurations_one_response(self, user_data=None):
launch_config = {
"LaunchConfigurationName": self.resource.name,
"ImageId": "ami-cba130bc",
"InstanceType": "t2.micro",
"CreatedTime": datetime.datetime.now(),
}
if user_data:
launch_config["UserData"] = force_str(
base64.b64encode(force_bytes(user_data))
)
return self.add_response(
"describe_launch_configurations",
service_response={"LaunchConfigurations": [launch_config]},
expected_params={},
)
def add_describe_auto_scaling_groups(self):
return self.add_response(
"describe_auto_scaling_groups",
service_response={"AutoScalingGroups": []},
expected_params={},
)
def add_create_launch_configuration(self, user_data=None):
expected_params = {
"ImageId": "ami-cba130bc",
"InstanceMonitoring": {"Enabled": False},
"InstanceType": "t2.micro",
"LaunchConfigurationName": "my-test-lc.1",
}
if user_data:
expected_params["UserData"] = user_data
return self.add_response(
"create_launch_configuration",
service_response={},
expected_params=expected_params,
)
def add_delete_launch_configuration(self):
return self.add_response(
"delete_launch_configuration",
service_response={},
expected_params={"LaunchConfigurationName": self.resource.name},
)
| 1.953125 | 2 |
loop2.py | musaibnazir/MixedPy | 0 | 12795766 | <reponame>musaibnazir/MixedPy
num = 5
for i in range(0,num):
for j in range(0,num-i-1):
print(end=" ")
for j in range(1,i+1):
print(j," ",end="")
print()
| 3.90625 | 4 |
415-add-strings/415-add-strings.py | yuzhengcuhk/MyLeetcodeRecord | 3 | 12795767 | <reponame>yuzhengcuhk/MyLeetcodeRecord<gh_stars>1-10
class Solution:
def addStrings(self, num1: str, num2: str) -> str:
intNum1 = 0
intNum2 = 0
for i in num1:
intNum1 = intNum1 * 10 + int(i)
for i in num2:
intNum2 = intNum2 * 10 + int(i)
result = str(intNum1 + intNum2)
return result | 2.6875 | 3 |
lambdapool/cli.py | rorodata/lambdapool | 0 | 12795768 | import sys
import click
from .function import LambdaPoolFunction
from . import utils
from tabulate import tabulate
from lambdapool import exceptions
@click.group()
def cli():
pass
@cli.command()
@click.option('--requirements', '-r', type=click.Path(exists=True), help="Specifies the dependencies to be installed along with the function")
@click.option('--memory', type=click.INT, help="Sets the memory size of the function environment")
@click.option('--timeout', type=click.INT, help="Sets the timeout for the function in seconds")
@click.option('--layers', help="Sets the layers to be used when the function is ran. The Layers ARN's (a maximum of 5) should be specified.")
@click.argument('function_name', nargs=1)
@click.argument('paths', nargs=-1, type=click.Path(exists=True))
def create(function_name, paths, requirements, memory, timeout, layers):
"""Create a new function"""
click.echo('=== Creating lambdapool function ===')
try:
func = LambdaPoolFunction(
function_name=function_name,
paths=paths,
requirements=requirements,
memory=memory,
timeout=timeout,
layers=layers.split(',') if layers else []
)
if func.exists():
click.echo(f'lambdapool function {function_name} already exists')
sys.exit(1)
func.create()
except exceptions.LambdaFunctionError as e:
click.echo(f'ERROR: {e}')
sys.exit(1)
click.echo(f'=== Succesfully created lambdapool function {function_name} ===')
@cli.command()
def list():
"""List all deployed functions"""
funcs = LambdaPoolFunction.list()
funcs = sorted(funcs, key=lambda x: x['last_updated'], reverse=True)
rows = []
for func in funcs:
rows.append(
[
func['function_name'],
utils.convert_size(func['size']),
utils.datestr(func['last_updated']),
func['memory'],
func['timeout']
]
)
click.echo(tabulate(rows, headers=['FUNCTION NAME', 'SIZE', 'WHEN', 'RUNTIME MEMORY (MB)', 'TIMEOUT (SEC)']))
@cli.command()
@click.option('--requirements', '-r', type=click.Path(exists=True), help="Specifies the dependencies to be installed along with the function")
@click.option('--memory', type=click.INT, help="Sets the memory size of the function environment")
@click.option('--timeout', type=click.INT, help="Sets the timeout for the function in seconds")
@click.option('--layers', help="Sets the layers to be used when the function is ran. The Layers ARN's (a maximum of 5) should be specified.")
@click.argument('function_name', nargs=1)
@click.argument('paths', nargs=-1)
def update(function_name, paths, requirements, memory, timeout, layers):
"""Update an existing function"""
click.echo('=== Updating lambdapool function ===')
try:
func = LambdaPoolFunction(
function_name=function_name,
paths=paths,
requirements=requirements,
memory=memory,
timeout=timeout,
layers=layers.split(',') if layers else []
)
func.update()
except exceptions.LambdaFunctionError as e:
click.echo(f'ERROR: {e}')
sys.exit(1)
click.echo(f'=== Updated lambdapool function {function_name} ===')
@cli.command()
@click.argument('function_name', nargs=1)
def delete(function_name):
"""Delete a function"""
click.echo('=== Deleting lambdapool function ===')
func = LambdaPoolFunction(function_name=function_name)
func.delete()
click.echo(f'=== Deleted lambdapool function {function_name}===')
| 2.5625 | 3 |
Algorithms/Dynamic_Programming/0-1_Knapsack_Problem/knapsack_problem_0_1.py | arslantalib3/algo_ds_101 | 182 | 12795769 | <reponame>arslantalib3/algo_ds_101<filename>Algorithms/Dynamic_Programming/0-1_Knapsack_Problem/knapsack_problem_0_1.py
#0/1 Knapsack problem
def knapsack(val, wt, N, C):
table = [[ 0 for _ in range(0, C+1)] for _ in range(0, N+1)]
table[0][0] = 0
for i in range(1, N+1):
for c in range(1, C+1):
if c - wt[i-1] < 0:
table[i][c] = table[i-1][c]
else:
table[i][c] = max(table[i-1][c], table[i-1][c-wt[i-1]]+val[i-1])
return table[N][C]
N = int(input().strip())
W = int(input().strip()) # capacity
val = [ int(v) for v in input().strip().split(" ")]
wt = [ int(w) for w in input().strip().split(" ")]
print(knapsack(val, wt, N, W)) | 3.578125 | 4 |
adventure_game.py | boslovski/adventure | 0 | 12795770 | import time
import random
def print_pause(message_to_print):
print(message_to_print)
time.sleep(2)
def intro(item, option):
print_pause("You find yourself standing in an open field, filled "
"with grass and yellow wildflowers.\n")
print_pause("Rumor has it that a " + option + " is somewhere around "
"here, and has been terrifying the nearby village.\n")
print_pause("In front of you is a house.\n")
print_pause("To your right is a dark cave.\n")
print_pause("In your hand you hold your trusty (but not very "
"effective) dagger.\n")
def cave(item, option):
if "sword" in item:
print_pause("\nYou peer cautiously into the cave.")
print_pause("\nYou've been here before, and gotten all"
" the good stuff. It's just an empty cave"
" now.")
print_pause("\nYou walk back to the field.\n")
else:
print_pause("\nYou peer cautiously into the cave.")
print_pause("\nIt turns out to be only a very small cave.")
print_pause("\nYour eye catches a glint of metal behind a "
"rock.")
print_pause("\nYou have found the magical Sword of Ogoroth!")
print_pause("\nYou discard your silly old dagger and take "
"the sword with you.")
print_pause("\nYou walk back out to the field.\n")
item.append("sword")
field(item, option)
def house(item, option):
print_pause("\nYou approach the door of the house.")
print_pause("\nYou are about to knock when the door "
"opens and out steps a " + option + ".")
print_pause("\nEep! This is the " + option + "'s house!")
print_pause("\nThe " + option + " attacks you!\n")
if "sword" not in item:
print_pause("You feel a bit under-prepared for this, "
"what with only having a tiny dagger.\n")
while True:
choice2 = input("Would you like to (1) fight or (2) "
"run away?")
if choice2 == "1":
if "sward" in item:
print_pause("\nAs the " + option + " moves to attack, "
"you unsheath your new sword.")
print_pause("\nThe Sword of Ogoroth shines brightly in "
"your hand as you brace yourself for the "
"attack.")
print_pause("\nBut the " + option + "takes one look at "
"your shiny new toy and runs away!")
print_pause("\nYou have rid the town of the " + option +
". You are victorious!\n")
else:
print_pause("\nYou do your best...")
print_pause("but your dagger is no match for the "
+ option + ".")
print_pause("\nYou have been defeated!\n")
play_again()
break
if choice2 == "2":
print_pause("\nYou run back into the field. "
"\nLuckily, you don't seem to have been "
"followed.\n")
field(item, option)
break
def field(item, option):
print_pause("Enter 1 to knock on the door of the house.")
print_pause("Enter 2 to peer into the cave.")
print_pause("What would you like to do?")
while True:
choice1 = input("(Please enter 1 or 2.)\n")
if choice1 == "1":
house(item, option)
break
elif choice1 == "2":
cave(item, option)
break
def play_again():
again = input("Would you like to play again? (y/n)").lower()
if again == "y":
print_pause("\n\n\nExcellent! Restarting the game ...\n\n\n")
play_game()
elif again == "n":
print_pause("\n\n\nThanks for playing! See you next time.\n\n\n")
else:
play_again()
def play_game():
item = []
option = random.choice(["pirate", "fairy", "dragon", "gorgon",
"troll"])
intro(item, option)
field(item, option)
play_game()
| 3.90625 | 4 |
efundsapi/views/__init__.py | code-scaffold/django | 1 | 12795771 | <gh_stars>1-10
from .demo import (DemoViewSet,)
| 1.03125 | 1 |
measure_mate/tests/models/test_rating.py | niche-tester/measure-mate | 15 | 12795772 | from builtins import str
from django.test import TestCase
from measure_mate.tests.factories import TemplateFactory, AttributeFactory, RatingFactory
class RatingTestCases(TestCase):
def test_creation_of_rating(self):
template = TemplateFactory()
attribute = AttributeFactory(template=template)
rating = RatingFactory(attribute=attribute, rank=1)
rating.clean()
self.assertEqual("%s - %s - %s" % (template.name, attribute.name, rating.name), str(rating))
| 2.71875 | 3 |
watershed.py | by256/icsg3d | 27 | 12795773 | <filename>watershed.py
"""
## Functions for computing watershed segmentation
--------------------------------------------------
## Author: <NAME>.
## Email: <EMAIL>
## Version: 1.0.0
--------------------------------------------------
## License: MIT
## Copyright: Copyright <NAME> & <NAME> 2020, ICSG3D
-------------------------------------------------
"""
from itertools import product
import matplotlib.pyplot as plt
import numpy as np
from scipy.ndimage.morphology import distance_transform_edt
from skimage import filters, measure, morphology, segmentation
from viz import plot_points_3d
def get_background(S, kernel_size=1):
kernel = morphology.ball(kernel_size)
return morphology.dilation(S, kernel)
def get_foreground(S, kernel_size=1, erode=True):
if not erode:
return S
else:
kernel = morphology.ball(kernel_size)
return morphology.erosion(S, kernel)
def crop(a, bbox):
return a[bbox[0] : bbox[3], bbox[1] : bbox[4], bbox[2] : bbox[5]]
def segment_nuclei(
binary,
species,
intensity,
wmin=8,
it=1,
max_iters=5,
min_convexity=0.8,
verbose=False,
):
""" Computes segmented form of species matrix using recursive watershed segmentation """
# Matrix for storing result
R = np.zeros(binary.shape)
binary = binary.astype(int)
# 1. Label the connected components
labels = measure.label(binary, connectivity=1)
seg_classes, seg_counts = np.unique(labels, return_counts=True)
seg_classes = np.array(
[seg_classes[i] for i in range(len(seg_classes)) if seg_counts[i] > 3]
)
seg_classes = seg_classes[seg_classes != 0]
if verbose:
print("\nIteration", it)
print("Classes", seg_classes)
print("Counts", seg_counts)
plot_points_3d(labels)
for cl in seg_classes:
if verbose:
print("Class", cl)
# Crop the images
binary_cl = np.where(labels == cl, labels, 0)
intensity_cl = np.where(labels == cl, intensity, 0)
species_cl = np.where(labels == cl, species, 0)
region = measure.regionprops(binary_cl, intensity_cl)
bbox = region[0].bbox
binary_bbox = crop(binary_cl, bbox)
intensity_bbox = crop(intensity_cl, bbox)
species_bbox = crop(species_cl, bbox)
chull = morphology.convex_hull_image(binary_bbox)
convexity = np.count_nonzero(binary_bbox) / np.count_nonzero(chull)
if verbose:
print("Convexity:", convexity)
if convexity >= min_convexity:
max_class = np.max(R)
R[bbox[0] : bbox[3], bbox[1] : bbox[4], bbox[2] : bbox[5]] = np.where(
binary_bbox == cl,
max_class + 1,
R[bbox[0] : bbox[3], bbox[1] : bbox[4], bbox[2] : bbox[5]],
)
continue
# Get the foreground, bg etc.
# Determine wether or not to erode
fg = get_foreground(binary_bbox)
bg = get_background(binary_bbox)
unknown = bg - fg
if verbose:
print("Segmenting")
plot_points_3d(fg)
# Markers for ws
markers = measure.label(fg)
markers += 1
markers[unknown == 1] = 0
# WS
wss = segmentation.watershed(binary_bbox, markers)
wss[wss == 1] = 0
max_class = np.max(R)
wss = wss + max_class # sub region with classes relabelled
wss[wss == max_class] = 0
nclasses = len(np.unique(wss)) - 1
if verbose:
print("WS", it, np.unique(wss, return_counts=True))
plot_points_3d(wss)
print(int(np.count_nonzero(wss) / wmin), nclasses)
# Determine wether or not to segment again on the basis of convexity and object counts
if (
int(np.count_nonzero(wss) / wmin) > len(np.unique(wss)) - 1
and it < max_iters
):
if verbose:
print("Segmenting again")
Rp = segment_nuclei(
wss,
species_bbox,
intensity_bbox,
it=it + 1,
verbose=verbose,
max_iters=max_iters,
min_convexity=min_convexity,
)
max_class = np.max(R)
Rp = Rp + max_class # sub region with classes relabelled
Rp[Rp == max_class] = 0
R[bbox[0] : bbox[3], bbox[1] : bbox[4], bbox[2] : bbox[5]] = np.where(
Rp != 0, Rp, R[bbox[0] : bbox[3], bbox[1] : bbox[4], bbox[2] : bbox[5]]
)
else:
R[bbox[0] : bbox[3], bbox[1] : bbox[4], bbox[2] : bbox[5]] = np.where(
wss != 0,
wss,
R[bbox[0] : bbox[3], bbox[1] : bbox[4], bbox[2] : bbox[5]],
)
if verbose:
print(it, np.unique(R, return_counts=True))
return R
def majority_vote(seg_img, R, cl):
""" Majority vote of class cl in a region R in segmented image"""
binary_label_map = np.where(R == cl, seg_img, 0).astype(int)
if np.count_nonzero(binary_label_map) == 0:
return 0
unique, counts = np.unique(binary_label_map, return_counts=True)
unique_counts = sorted(list(zip(unique, counts)), key=lambda x: x[1])
unique_counts = [i for i in unique_counts if i[0] != 0]
specie = unique_counts[-1][0]
return specie
def centroids(seg_img, R):
""" Determine centroid of a region R in segmented image """
classes = np.unique(R)[1:]
atoms = []
means = []
xc = np.linspace(0, R.shape[0], R.shape[0] + 1)[:-1]
yc = np.linspace(0, R.shape[0], R.shape[0] + 1)[:-1]
zc = np.linspace(0, R.shape[0], R.shape[0] + 1)[:-1]
coords = np.array(list(product(xc, yc, zc))).reshape(32, 32, 32, 3)
seg_img_coords = np.concatenate([seg_img.reshape(32, 32, 32, 1), coords], axis=-1)
for cl in classes:
cmask = R == cl
smask = seg_img_coords[cmask]
specie = majority_vote(seg_img, R, cl)
if specie != 0:
means.append(np.mean(smask[:, 1:], axis=0))
atoms.append(specie)
return atoms, means
def watershed_clustering(M, S, Sb, max_iters=5, return_ws=False, verbose=False):
"""Determine centroids and species of atoms in the density/species matrices
Returns the atom z numbers and means in voxel coordinates"""
M = M.squeeze()
S = S.squeeze()
Sb = Sb.squeeze()
R = segment_nuclei(Sb, S, M, max_iters=max_iters, verbose=verbose)
atoms, means = centroids(S, R)
if return_ws:
return np.array(atoms), np.array(means), R
else:
return np.array(atoms), np.array(means)
| 2.703125 | 3 |
hypemaths/exceptions/exceptions.py | janaSunrise/HypeMaths | 8 | 12795774 | class InvalidMatrixError(Exception):
pass
class MatrixDimensionError(Exception):
pass
class MatrixNotSquare(Exception):
pass
class InvalidVectorError(Exception):
pass
class VectorDimensionError(Exception):
pass
| 1.890625 | 2 |
stanCode_Projects/my_photoshop/mirror_lake.py | EricCheng8679/sc-projects | 0 | 12795775 | """
File: mirror_lake.py
----------------------------------
This file reads in mt-rainier.jpg and
makes a new image that creates a mirror
lake vibe by placing an inverse image of
mt-rainier.jpg below the original one.
"""
from simpleimage import SimpleImage
def reflect(filename):
"""
:param filename: str, the file directory of the original image
:return: flip-vertical image
"""
img = SimpleImage(filename)
blank_img = SimpleImage.blank(img.width, img.height * 2) # generating a blank image of double height
for x in range(img.width):
for y in range(img.height):
every_color_of_pixel = img.get_pixel(x, y)
upper_blank = blank_img.get_pixel(x, y) # upper part of blank image
lower_blank = blank_img.get_pixel(x, blank_img.height - 1 - y) # lower part of blank_image
upper_blank.red = every_color_of_pixel.red
upper_blank.green = every_color_of_pixel.green
upper_blank.blue = every_color_of_pixel.blue
lower_blank.red = every_color_of_pixel.red
lower_blank.green = every_color_of_pixel.green
lower_blank.blue = every_color_of_pixel.blue
return blank_img
def main():
"""
This program generates a flip-vertical image.
"""
original_mt = SimpleImage('images/mt-rainier.jpg')
original_mt.show()
reflected = reflect('images/mt-rainier.jpg')
reflected.show()
if __name__ == '__main__':
main()
| 4.09375 | 4 |
src/test/python/make_test_data.py | svanbodegraven/VariantSpark | 6 | 12795776 | import os
import pandas as pd
'''
Generate files for decision tree integration test
'''
BASEDIR = os.path.abspath(os.path.join(os.path.basename(__file__), '../../../..'))
def proj_path(path):
return os.path.join(BASEDIR, path)
data = pd.read_csv(proj_path('data/CNAE-9.csv'), names=['category'] + ["w_%s" % i for i in range(0, 856)])
y = data.category
X = data[["w_%s" % i for i in range(0, 856)]]
# Save output data
X_df = X.transpose()
X_df.to_csv(proj_path('data/CNAE-9-wide.csv'))
y_df = pd.DataFrame(y)
y_df.to_csv(proj_path('data/CNAE-9-labels.csv'))
from sklearn import tree
clf = tree.DecisionTreeClassifier(random_state=6)
clf.fit(X, y)
pred = clf.predict(X)
print clf
print "Impurity len: %s" % len(clf.tree_.impurity)
# Save the data for test verification
var_df = pd.DataFrame(dict(importance=clf.feature_importances_), index=X.columns)
pred_df = pd.DataFrame(dict(predicted=pred))
tree_df = pd.DataFrame(dict(impurity=clf.tree_.impurity, feature=clf.tree_.feature, threshold=clf.tree_.threshold))
var_df.to_csv(proj_path('src/test/data/CNAE-9-importance.csv'))
pred_df.to_csv(proj_path('src/test/data/CNAE-9-predicted.csv'))
tree_df.to_csv(proj_path('src/test/data/CNAE-9-tree.csv'))
| 2.875 | 3 |
test/demographicsChart.py | gioandreou/thesis-old | 0 | 12795777 | <gh_stars>0
from openpyxl import load_workbook
import numpy as np
import datetime as dt
import matplotlib.pyplot as plt
import pandas as pd
import matplotlib.pyplot as plt
import matplotlib.dates as dates
def func(pct, allvals):
absolute = int(pct/100.*np.sum(allvals))
return "{:.1f}%\n({:d} )".format(pct, absolute)
labels_Ages=['13-17','18-24','25-34','35-44','45-54','55-64','65+']
def plot_ages_content(dataframe):
#print(dataframe)
# iloc[rows,cols]
date_content=dataframe.iloc[:,0:1]
women_age = dataframe.iloc[:,1:8].mean()
men_age=dataframe.iloc[:,8:15].mean()
fig, ax = plt.subplots(figsize=(8, 6), subplot_kw=dict(aspect="equal"))
women_age.plot.pie(labels=['F.13-17','F.18-24','F.25-34','F.35-44','F.45-54','F.55-64','F.65+'],
fontsize=8,subplots=True, autopct=lambda pct:func(pct,women_age.values),
textprops=dict(color="w"))
ax.set_title("Average Women Age Chart for Content Activity"+date_content.iat[0,0].split("T",1)[0]+" until "+date_content.iat[-1,0].split("T",1)[0],fontsize=15)
ax.set_ylabel('Chart')
ax.set_xlabel('Average Percentage and (number of people)')
ax.legend(labels=labels_Ages,
title="Age Groups",
loc="center left",
bbox_to_anchor=(1, 0, 0.5, 1))
plt.savefig("charts/Women-Age-Content"+".png",dpi=300)
print("Women : Age-Content chart was created!")
plt.clf()
fig, ax = plt.subplots(figsize=(8, 6), subplot_kw=dict(aspect="equal"))
men_age.plot.pie(labels=['M.13-17','M.18-24','M.25-34','M.35-44','M.45-54','M.55-64','M.65+'],
fontsize=8,subplots=True, autopct=lambda pct:func(pct,men_age.values),
textprops=dict(color="w"),pctdistance=0.7)
ax.set_title("Average Men Age Chart for Content Activity"+date_content.iat[0,0].split("T",1)[0]+" until "+date_content.iat[-1,0].split("T",1)[0],fontsize=15)
ax.set_ylabel('Chart')
ax.set_xlabel('Average Percentage and (number of people)')
ax.legend(labels=labels_Ages,
title="Age Groups",
loc="center left",
bbox_to_anchor=(1, 0, 0.5, 1))
plt.savefig("charts/Men-Age-Content"+".png",dpi=300)
print("Men : Age-Content chart was created!")
plt.clf()
def plot_ages_impressions(dataframe):
women_age = dataframe.iloc[:,1:8].mean()
men_age=dataframe.iloc[:,8:15].mean()
date_impression = dataframe.iloc[:,0:1]
fig, ax = plt.subplots(figsize=(8, 6), subplot_kw=dict(aspect="equal"))
women_age.plot.pie(labels=['F.13-17','F.18-24','F.25-34','F.35-44','F.45-54','F.55-64','F.65+'],
fontsize=8,subplots=True, autopct=lambda pct:func(pct,women_age.values),
textprops=dict(color="w"),pctdistance=0.7)
ax.set_title("Average Women Age Chart for Impression"+date_impression.iat[0,0].split("T",1)[0]+" until "+date_impression.iat[-1,0].split("T",1)[0],fontsize=15)
ax.set_ylabel('Chart')
ax.set_xlabel('Average Percentage and (number of people)')
ax.legend(labels=labels_Ages,
title="Age Groups",
loc="center left",
bbox_to_anchor=(1, 0, 0.5, 1))
plt.savefig("charts/Women-Age-Impression"+".png",dpi=300)
print("Women : Age-Impression chart was created!")
plt.clf()
fig, ax = plt.subplots(figsize=(8, 6), subplot_kw=dict(aspect="equal"))
men_age.plot.pie(labels=['M.13-17','M.18-24','M.25-34','M.35-44','M.45-54','M.55-64','M.65+'],
fontsize=8,subplots=True, autopct=lambda pct:func(pct,men_age.values),
textprops=dict(color="w"),pctdistance=0.7)
ax.set_title("Average Men Age Chart for Impressions"+date_impression.iat[0,0].split("T",1)[0]+" until "+date_impression.iat[-1,0].split("T",1)[0],fontsize=15)
ax.set_ylabel('Chart')
ax.set_xlabel('Average Percentage and (number of people)')
ax.legend(labels=labels_Ages,
title="Age Groups",
loc="center left",
bbox_to_anchor=(1, 0, 0.5, 1))
plt.savefig("charts/Men-Age-Impression"+".png",dpi=300)
print("Men : Age-Impression chart was created!")
plt.clf()
def plot_city(dataframe1,dataframe2):
#print(dataframe1)
#print(dataframe2)
date_content=dataframe1.iloc[:,0:1]
date_impression = dataframe1.iloc[:,0:1]
#print(date_content.iat[-1,0])
sub_content= dataframe1.iloc[:,1:]
sub_impression = dataframe2.iloc[:,1:]
top10_content = sub_content.mean().nlargest(10)
top10_impression = sub_impression.mean().nlargest(10)
objects_content= top10_content.axes
fig, ax = plt.subplots(figsize=(12, 12))
top10_content.plot(kind='bar',use_index=True,position=0.8, grid=True,fontsize=8,rot=6,)
plt.ylabel('People that are talking about the Page',fontsize=15)
plt.title('Average content activity. Days: '+date_content.iat[0,0].split("T",1)[0]+" until "+date_content.iat[-1,0].split("T",1)[0],fontsize=15)
plt.savefig("charts/City-Content"+".png",dpi=300)
print("City-Content Activity chart was created!")
plt.clf()
fig, ax = plt.subplots(figsize=(12, 12))
top10_impression.plot(kind='bar',use_index=True, grid=True,fontsize=8,rot=10,)
plt.ylabel('People that Page was appeared on their screen',fontsize=15)
plt.title('Average Impressions. Days :'+date_impression.iat[0,0].split("T",1)[0]+" until "+date_impression.iat[-1,0].split("T",1)[0],fontsize=15)
plt.savefig("charts/City-Impressions"+".png",dpi=300)
print("City-Impression chart was created!")
plt.clf()
#print(top10_content.values)
def plot_country(dataframe):
date_impression = dataframe.iloc[:,0:1]
sub_impression = dataframe.iloc[:,1:]
top10_impression = sub_impression.mean().nlargest(10)
fig, ax = plt.subplots(figsize=(12, 12))
top10_impression.plot(kind='bar',use_index=True,position=0.8, grid=True,fontsize=8,rot=6,)
plt.ylabel('People\'s Countries in which the Page was appeared on their screens',fontsize=15)
plt.title('Average content activity. Days: '+date_impression.iat[0,0].split("T",1)[0]+" until "+date_impression.iat[-1,0].split("T",1)[0],fontsize=15)
plt.savefig("charts/Country-Impression"+".png",dpi=300)
print("Country-Impression chart was created!")
plt.clf()
def run_charts():
xlsxfile_age_content ='excels/Ages-Content.xlsx'
age_content = pd.read_excel(xlsxfile_age_content)
plot_ages_content(age_content)
xlsxfile_age_impression = 'excels/Ages-Impressions.xlsx'
age_impression = pd.read_excel(xlsxfile_age_impression)
plot_ages_impressions(age_impression)
xlsxfile_city_content = 'excels/City-Content.xlsx'
city_content = pd.read_excel(xlsxfile_city_content)
xlsxfile_city_impression = 'excels/City-Impression.xlsx'
city_impression = pd.read_excel(xlsxfile_city_impression)
plot_city(city_content,city_impression)
xlsxfile_country_impression = 'excels/Country-Impression.xlsx'
country_impression= pd.read_excel(xlsxfile_country_impression)
plot_country(country_impression)
run_charts() | 2.890625 | 3 |
src/nitpick/style/__init__.py | jaysonsantos/nitpick | 0 | 12795778 | <gh_stars>0
"""Styles parsing and merging."""
from .cache import parse_cache_option
from .core import Style
__all__ = ("Style", "parse_cache_option")
| 1.242188 | 1 |
experiments/tests/test_subsets_exp.py | snspam/sn_spam | 0 | 12795779 | """
Tests the subsets_exp module.
"""
import mock
import unittest
from .context import subsets_exp
from .context import config
from .context import runner
from .context import test_utils as tu
class Subsets_ExperimentTestCase(unittest.TestCase):
def setUp(self):
config_obj = tu.sample_config()
mock_runner_obj = mock.Mock(runner.Runner)
self.test_obj = subsets_exp.Subsets_Experiment(config_obj,
mock_runner_obj)
def tearDown(self):
self.test_obj = None
def test_init(self):
# setup
test_obj = self.test_obj
# assert
self.assertTrue(isinstance(test_obj.config_obj, config.Config))
self.assertTrue(isinstance(test_obj.runner_obj, runner.Runner))
self.assertTrue(test_obj.config_obj.modified)
self.assertTrue(test_obj.config_obj.pseudo)
def test_divide_data_into_subsets(self):
self.test_obj.config_obj.end = 4000
self.test_obj.config_obj.start = 0
self.test_obj.config_obj.fold = '0'
result = self.test_obj.divide_data_into_subsets(num_subsets=4)
exp = [(0, 1000, '0'), (1000, 2000, '1'), (2000, 3000, '2'),
(3000, 4000, '3')]
self.assertTrue(len(result) == 4)
self.assertTrue(result == exp)
def test_run_experiment(self):
subsets = [(1, 2, '4'), (7, 77, '88'), (7, 88, '169')]
self.test_obj.single_run = mock.Mock()
self.test_obj.change_config_parameters = mock.Mock()
self.test_obj.run_experiment(subsets)
exp_ccp = [mock.call(1, 2, '4'), mock.call(7, 77, '88'),
mock.call(7, 88, '169')]
self.assertTrue(self.test_obj.single_run.call_count == 3)
self.assertTrue(self.test_obj.change_config_parameters.call_args_list
== exp_ccp)
def test_single_run(self):
self.test_obj.runner_obj.run_independent = mock.Mock()
self.test_obj.runner_obj.run_independent.return_value = ('v', 't')
self.test_obj.change_config_rel_op = mock.Mock()
self.test_obj.runner_obj.run_relational = mock.Mock()
self.test_obj.runner_obj.run_evaluation = mock.Mock()
self.test_obj.single_run()
exp_ccro = [mock.call(train=True), mock.call(train=False)]
exp_rel = [mock.call('v', 't'), mock.call('v', 't')]
self.test_obj.runner_obj.run_independent.assert_called_with()
self.assertTrue(self.test_obj.change_config_rel_op.call_args_list ==
exp_ccro)
self.assertTrue(self.test_obj.runner_obj.run_relational.call_args_list
== exp_rel)
self.test_obj.runner_obj.run_evaluation.assert_called_with('t')
def test_change_config_parameters(self):
self.test_obj.change_config_parameters(2, 4, '69')
self.assertTrue(self.test_obj.config_obj.start == 2)
self.assertTrue(self.test_obj.config_obj.end == 4)
self.assertTrue(self.test_obj.config_obj.fold == '69')
def test_change_config_rel_op(self):
self.test_obj.change_config_rel_op(train=False)
self.assertTrue(self.test_obj.config_obj.infer)
def test_suite():
suite = unittest.TestLoader().loadTestsFromTestCase(
Subsets_ExperimentTestCase)
return suite
if __name__ == '__main__':
unittest.main()
| 2.734375 | 3 |
order.py | usjeong/coining-monitor | 0 | 12795780 | <reponame>usjeong/coining-monitor<filename>order.py
import requests
import sys
import time
from datetime import datetime
def watch_price(max_price=0, min_price=0):
resp = requests.get("https://api.coinone.co.kr/trades/?currency=eth")
result = resp.json()
order = result["completeOrders"][-1]
price = int(order["price"])
date_now = datetime.fromtimestamp(int(order["timestamp"]))
print("max_limit: %d\nmin_limit: %d\n" % (max_price, min_price))
print("time: %s\nprice: %d\n" % (date_now, price))
if max_price == 0 and min_price == 0:
return
if price >= max_price:
for _ in range(3):
time.sleep(0.2)
print("warn!!! max price: %d\a\n" % price)
elif price <= min_price:
for _ in range(5):
time.sleep(0.2)
print("warn!!! min price: %d\a\n" % price)
if __name__ == "__main__":
max_price = int(sys.argv[1])
min_price = int(sys.argv[2])
while True:
time.sleep(5)
watch_price(max_price, min_price)
| 3.15625 | 3 |
main.py | ashduino101/python-terminal-video-player | 0 | 12795781 | <gh_stars>0
import os
import sys
import time
import moviepy.editor
import pygame
from blessed import Terminal
from PIL import Image, ImageOps
import cv2
term = Terminal()
HALF = '\N{LOWER HALF BLOCK}'
def image(im):
im = ImageOps.fit(im, (term.width, term.height * 2))
pixels = im.load()
res = ''
for y in range(im.size[1] // 2):
for x in range(im.size[0]):
# false positives, pycharm doesn't like this for some reason
# noinspection PyUnresolvedReferences
r, g, b = pixels[x, y * 2]
# noinspection PyUnresolvedReferences
r2, g2, b2 = pixels[x, y * 2 + 1]
res += term.on_color_rgb(r, g, b) + term.color_rgb(r2, g2, b2) + HALF
return res
def video(path):
with term.cbreak(), term.hidden_cursor(), term.fullscreen():
# get start time
start = time.time()
# variables
frame_count = 1
dropped_frames = 0
# load video
capture = cv2.VideoCapture(path)
# get fps
fps = capture.get(cv2.CAP_PROP_FPS)
# load audio from video
v = moviepy.editor.VideoFileClip(path)
audio = v.audio
audio.write_audiofile(path.split(".")[0] + ".wav")
# play audio
pygame.mixer.init()
pygame.mixer.music.load(path.split(".")[0] + ".wav")
pause = False
first = True
# main loop
while capture.isOpened():
# for pause/exit
inp = term.inkey(timeout=0.01)
# esc
if inp == "\x1b" or inp == "q":
break
if inp == ' ':
pause = not pause
pygame.mixer.music.pause() if pause else pygame.mixer.music.unpause()
print(term.home + term.move_y((term.height - 1) // 2))
print(
term.black_on_white(
term.center(
'Paused. Press %s to unpause, or %s or %s to exit.' % (
term.italic(term.bold("Space")) + term.normal,
term.italic(term.bold("Escape")) + term.normal,
term.italic(term.bold("Q")) + term.normal
)
)
)
)
if not pause:
if first:
pygame.mixer.music.play()
first = False
ret, frame = capture.read()
elapsed = time.time() - start
expected_frame = int(elapsed * fps)
if frame_count < expected_frame:
frame_count += 1
dropped_frames += 1
continue
if not ret:
break
frame_count += 1
img = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
im = Image.fromarray(img)
sys.stdout.write(term.home + image(im))
sys.stdout.write(
term.white_on_black +
"Elapsed time: {} | "
"Actual frame: {} | "
"Theoretical frame: {} | "
"Dropped frames: {} | "
"FPS: {}".format(
elapsed, frame_count - dropped_frames,
expected_frame, dropped_frames,
(frame_count - dropped_frames) / elapsed
)
)
sys.stdout.flush()
capture.release()
cv2.destroyAllWindows()
pygame.mixer.music.stop()
video(sys.argv[1])
| 2.6875 | 3 |
us_counties_death_per_cases.py | RealHulubulu/Coronavirus_Data | 0 | 12795782 | <filename>us_counties_death_per_cases.py
# -*- coding: utf-8 -*-
"""
Created on Mon Jun 29 15:54:28 2020
https://plotly.com/python/county-choropleth/?fbclid=IwAR1xOTSniBA_d1okZ-xEOa8eEeapK8AFTgWILshAnEvfLgJQPAhHgsVCIBE
https://www.kaggle.com/fireballbyedimyrnmom/us-counties-covid-19-dataset
better census data
https://www2.census.gov/programs-surveys/popest/datasets/2010-2019/counties/totals/
"""
from urllib.request import urlopen
import json
import pandas as pd
import plotly.express as px
import plotly
from plotly.offline import plot
import os
import math
if not os.path.exists("images_counties"):
os.mkdir("images_counties")
with urlopen('https://gist.githubusercontent.com/wavded/1250983/raw/bf7c1c08f7b1596ca10822baeb8049d7350b0a4b/stateToFips.json') as response:
fips_states = json.load(response)
fips_states_keys = list(fips_states.keys())
fips_states_values = list(fips_states.values())
fips_states_keys = [w.replace('ogia', 'orgia') for w in fips_states_keys]
THIS_FOLDER = os.path.dirname(os.path.abspath(__file__))
dfmain = pd.read_csv('https://raw.githubusercontent.com/nytimes/covid-19-data/master/us-counties.csv',
dtype={"fips": str})
# print(dfmain.head)
# print(dfmain.shape)
# print(dfmain["date"][dfmain.shape[0] - 1])
# print(dfmain["date"][1])
# current_date = dfmain["date"][dfmain.shape[0] - 1] # 6/29/2020, or yesterday
# current_date = df["date"][10] # 6/29/2020
#%%
def line_prepender(filename, line):
with open(filename, 'r+') as f:
content = f.read()
f.seek(0, 0)
f.write(line.rstrip('\r\n') + '\n' + content)
f.write('\n{% endblock %}')
#%%
def load_data(when = 0, yesterday=True):
df = pd.read_csv('https://raw.githubusercontent.com/nytimes/covid-19-data/master/us-counties.csv',
dtype={"fips": str})
current_date = ''
if yesterday:
current_date = df["date"][df.shape[0] - 1] # 6/29/2020
else:
current_date = df["date"][when]
return df, current_date
def make_df_for_date(input_date, df):
specific_date_df = pd.DataFrame(data=None, columns=list(df.columns.values))
for index, row in df.iterrows():
if row["date"] == input_date:
specific_date_df.loc[df.index[index]] = df.iloc[index]
# print(specific_date_df) # has all data for current date
# 3067 x 6
# specific_date_df = specific_date_df.copy()
IFR_list = []
for index, row in specific_date_df.iterrows():
if row["cases"] > 0:
IFR = row["deaths"] / row["cases"]
IFR_list.append(IFR)
else:
IFR_list.append(0)
specific_date_df["IFR"] = IFR_list
# print(specific_date_df)
specific_date_df = specific_date_df.reset_index(drop=True)
# specific_date_Georgia_df = pd.DataFrame(data=None, columns=list(specific_date_df.columns.values))
# print(specific_date_df)
# print(specific_date_Georgia_df)
# # for picking out a specific state, in this case Georgia
# state = "Georgia"
# index_counter = 0
# for index, row in specific_date_df.iterrows():
# # print(index)
# if row["state"] == state:
# # print("yes")
# # print(index)
# # print(copy_new_df.index[index])
# specific_date_Georgia_df.loc[index_counter] = specific_date_df.iloc[index]
# index_counter += 1
# # print(index_counter)
# print(specific_date_Georgia_df) # has all data for current date
with urlopen('https://gist.githubusercontent.com/wavded/1250983/raw/bf7c1c08f7b1596ca10822baeb8049d7350b0a4b/stateToFips.json') as response:
fips_states = json.load(response)
# print(fips_states)
fips_states_keys = list(fips_states.keys())
fips_states_values = list(fips_states.values())
fips_states_keys = [w.replace('ogia', 'orgia') for w in fips_states_keys]
# these are in the data from kaggle but not in the geojson
specific_date_df = specific_date_df[specific_date_df["state"] != "Northern Mariana Islands"]
specific_date_df = specific_date_df[specific_date_df["state"] != "Virgin Islands"]
specific_date_df = specific_date_df[specific_date_df["state"] != "Puerto Rico"]
specific_date_df = specific_date_df[specific_date_df["state"] != "Guam"]
for state, state_id in zip(fips_states_keys, fips_states_values):
specific_date_df['state'] = specific_date_df['state'].replace(state, state_id)
# print(specific_date_df)
specific_date_df["state_name"] = specific_date_df["state"]
for state, state_id in zip(fips_states_keys, fips_states_values):
specific_date_df['state_name'] = specific_date_df['state_name'].replace(state_id, state)
county_and_state = []
for index, row in specific_date_df.iterrows():
c_and_s = row["county"] +", "+ row["state_name"]
county_and_state.append(c_and_s)
specific_date_df["county_and_state"] = county_and_state
return specific_date_df
#%%
def states_heat_map(specific_date_df):
"for showing data per state"
states_only_df = pd.DataFrame()
list_state_count = []
list_str_states = list(specific_date_df["state"].unique())
# print(list_str_states)
for id_ in list_str_states:
total = 0
for index, row in specific_date_df.iterrows():
if row["state"] == id_:
# print(id_)
total += row["cases"]
list_state_count.append(total)
# break
print(list_state_count)
print(len(list_state_count))
states_only_df["per_state_count"] = list_state_count
states_only_df["state_id"] = list_str_states
states_only_df["state_name"] = fips_states_keys
print(states_only_df)
my_file = os.path.join(THIS_FOLDER, 'population_states_2019.txt')
pop_states = pd.read_csv(my_file, header=0)
# print(pop_states["State"])
# print(states_only_df)
pop_list = []
for state in states_only_df["state_name"]:
for i,row in pop_states.iterrows():
if row["State"] == state:
pop_list.append(row["Population"])
states_only_df["state_pop"] = pop_list
# print(pop_list)
# print(len(pop_list))
per100k = []
for pop, count in zip(states_only_df["state_pop"], states_only_df["per_state_count"]):
per100k.append(100000 * (count/pop))
states_only_df["per100k"] = per100k
print(states_only_df)
with open('gz_2010_us_040_00_20m.json') as response:
states_mapping = json.load(response)
print(states_mapping["features"][0]["properties"]["STATE"])
print(len(states_mapping["features"])) #3221
# per state
fig = px.choropleth(states_only_df, geojson=states_mapping,
locations='state_id',
color='per100k',
color_continuous_scale="Viridis",
# range_color=(0, 10),
# locationmode = 'USA-states',
featureidkey = "properties.STATE",
hover_name = "state_name",
scope="usa",
labels={'per100k':'cases per 100k'}
)
fig.update_layout(margin={"r":0,"t":0,"l":0,"b":0})
fig.show()
plot(fig)
return fig
#%%
def counties_heat_map(specific_date_df, date):
"for showing data per county"
my_file = os.path.join(THIS_FOLDER, 'all_census_data.csv')
pop_counties = pd.read_csv(open(my_file))
# print(pop_counties)
county_id = list(pop_counties["COUNTY"])
state_id = list(pop_counties["STATE"])
population_per_county = list(pop_counties["POPESTIMATE2019"])
fips_county_ids = []
for n, c_id in enumerate(county_id):
if c_id < 10:
county_id[n] = "00"+str(c_id)
elif c_id < 100:
county_id[n] = "0"+str(c_id)
else:
county_id[n] = str(c_id)
for n, s_id in enumerate(state_id):
if s_id < 10:
state_id[n] = "0"+str(s_id)
else:
state_id[n] = str(s_id)
# print(county_id[57])
# print(state_id[600])
for c,s in zip(county_id, state_id):
fips_county_ids.append(s + c)
# print(fips_county_ids[1])
# print(len(county_id))
# print(len(state_id))
# print(len(fips_county_ids))
specific_date_df["county"] = specific_date_df["county"].str.replace('.', '') # DistrictOfColumbia
spec_fips = list(specific_date_df["fips"])
odd_balls = [] # unknown county cases
population_counties_list = []
# counter = 0
for spec_fips in spec_fips:
boo = True
for fips_census in fips_county_ids:
if spec_fips == fips_census:
population_counties_list.append(population_per_county[fips_county_ids.index(fips_census)])
boo = False
# counter += 1
if boo == True:
population_counties_list.append(1)
odd_balls.append(spec_fips) # unknown county cases
# print(spec_fips)
# print(len(population_counties_list)) # 3065
# print(population_counties_list)
specific_date_df["county_population"] = population_counties_list
per100k = []
for pop, count in zip(specific_date_df["county_population"], specific_date_df["cases"]):
if pop == 1:
per100k.append(1)
else:
per100k.append(100000 * (count/pop))
specific_date_df["cases_per100k"] = per100k
# print(specific_date_df)
# print(per100k)
per10k = []
for pop, count in zip(specific_date_df["county_population"], specific_date_df["cases"]):
if pop == 1:
per10k.append(1)
else:
per10k.append(10000 * (count/pop))
specific_date_df["per10k"] = per10k
# print(specific_date_df)
# print(per10k)
# import math
log10_per10k = []
for item in per10k:
# print(item)
log10_per10k.append(math.log10(item))
specific_date_df["log10_per10k"] = log10_per10k
# import math
log10_per100k = []
for item in per100k:
# print(item)
log10_per100k.append(math.log10(item))
specific_date_df["cases_per_log10_per100k"] = log10_per100k
copy_df = specific_date_df.copy() # this is to remove data from census that is missing from covid
copy_df = copy_df[copy_df['cases_per_log10_per100k'] != 0]
# Per county geojson
with urlopen('https://raw.githubusercontent.com/plotly/datasets/master/geojson-counties-fips.json') as response:
counties = json.load(response)
# print(counties["features"][0]["properties"]["STATE"])
# print((counties["features"][0])) #3221
# per county
fig = px.choropleth(copy_df, geojson=counties,
locations='fips',
color='cases_per_log10_per100k',
# color_continuous_scale="icefire", # winner
# color_continuous_scale="Viridis",
# color_continuous_scale="hot",
# color_continuous_scale="ice",
# color_continuous_scale="thermal",
color_continuous_scale=[[0.0,'rgb(0,0,200)'],
[0.3, 'rgb(149,207,216)'],
[0.5, 'rgb(234,252,258)'],
[0.6, 'rgb(255,210,0)'],
[1.0, 'rgb(200,0,0)']],
range_color=(0, 5),
# locationmode = 'USA-states',
featureidkey = "id",
hover_name = "county_and_state",
hover_data = ["county_population", "cases", "cases_per100k", "cases_per_log10_per100k", "deaths", "IFR"],
scope="usa",
labels = {'cases_per_log10_per100k': 'log(cases/100k)'}
)
fig.update_layout(margin={"r":5,"t":20,"l":5,"b":5},
title_text = '<br><br>Covid-19 Total Cases Per 100k Population Per<br>County Using 2019 Census Estimations<br>'+date,
titlefont = {"size": 15, "color":"White"},
paper_bgcolor='#4E5D6C',
plot_bgcolor='#4E5D6C',
geo=dict(bgcolor= 'rgba(0,0,0,0)', lakecolor='#4E5D6C'),
font = {"size": 14, "color":"White"},
autosize = False,
width = 800,
height = 650
)
# fig.show()
# plot(fig,filename='covid_counties_'+date+'.html')
plot(fig,filename='C:/Users/karas/.spyder-py3/Covid_Maps_Heroku/main_site_covid/templates/Current_counties.html')
# plot(fig,filename='C:/Users/karas/.spyder-py3/Covid_Maps_Heroku/main_site_covid/templates/'+date+'_counties.html')
# plot(fig)
return fig
#%%
def main():
#[282519 rows x 6 columns] dfmain.shape[0]
old_date = ''
for i in range(dfmain.shape[0]):
new_date = dfmain["date"][i]
if i%50 == 0 and new_date != old_date:
old_date = new_date
new_date = dfmain["date"][dfmain.shape[0] - 1] # if yesterday = True
# new_date = '2020-06-30'
print("Date: ", new_date)
# df, current_date = load_data(when = i, yesterday=False)
df, current_date = load_data(when = i, yesterday=True)
# current_date = new_date
specific_date_df = make_df_for_date(input_date = current_date, df = df)
fig = counties_heat_map(specific_date_df, new_date)
# states_heat_map(specific_date_df):
# fig.write_image("images_counties/"+new_date+"_county_per100k.png")
fig.write_image("C:/Users/karas/.spyder-py3/Covid_Maps_Heroku/main_site_covid/pages/static/current_counties.png")
html_header = """
{% extends 'base.html' %}
{% block content %}
<body style="background-color:black;color:white;">
"""
line_prepender('C:/Users/karas/.spyder-py3/Covid_Maps_Heroku/main_site_covid/templates/Current_counties.html', html_header)
break
#%%
if __name__ == "__main__":
main()
# #%%
# from PIL import Image, ImageDraw
# import PIL
# import os
# images = []
# directory = 'C:/Users/karas/.spyder-py3/coronavirus/images_counties'
# for filename in os.listdir(directory):
# # print("hi")
# f = Image.open('C:/Users/karas/.spyder-py3/coronavirus/images_counties/'+filename)
# # f = f.save(filename)
# images.append(f)
# print(len(images))
# images[0].save('covid_timeline_county_cases.gif',
# save_all=True, append_images=images[1:], optimize=False, duration=500, loop=0)
#%%
#Graveyard
# def counties_heat_map(specific_date_df, date):
# "for showing data per county"
# my_file = os.path.join(THIS_FOLDER, 'population_counties_2019.xlsx')
# pop_counties = pd.read_excel(open(my_file, 'rb'), index_col=None, sep='\t')
# # print(pop_counties)
# # print(pop_counties["Geographic Area"])
# # pop_counties["Geographic Area"] = pop_counties["Geographic Area"].map(lambda x: x.lstrip('. ,').rstrip('aAbBcC'))
# pop_counties["Geographic Area"] = pop_counties["Geographic Area"].str.replace('.', '')
# pop_counties["Geographic Area"] = pop_counties["Geographic Area"].str.replace(',', '')
# # pop_counties["Geographic Area"] = pop_counties["Geographic Area"].str.replace('District of Columbia District of Columbia', 'District of Columbia')
# pop_counties["Geographic Area"] = pop_counties["Geographic Area"].str.replace(' County', '')
# # pop_counties["Geographic Area"] = pop_counties["Geographic Area"].str.replace(' ', '')
# # print(pop_counties)
# # for value in pop_counties["Geographic Area"]:
# # if "District " in value:
# # print(value)
# # for item in pop_counties["Geographic Area"]:
# # if "Virginia" in item:
# # print(item)
# # print(pop_counties.shape)
# states_col_for_county_pop = []
# for index, row in pop_counties.iterrows():
# one_state = ''
# for state in fips_states_keys:
# if state in row["Geographic Area"]:
# if row["Geographic Area"].find(state) > 1:
# one_state = state
# # if one_state == "Distric of Columbia":
# # print("huzzah")
# if state == "District of Columbia":
# # print("aye")
# # print(one_state)
# one_state = "District of Columbia"
# if one_state in row["Geographic Area"]:
# states_col_for_county_pop.append(one_state)
# # print(len(states_col_for_county_pop))
# # print(states_col_for_county_pop)
# pop_counties["state"] = states_col_for_county_pop
# # print(pop_counties)
# counties_list = []
# for index, row in pop_counties.iterrows():
# for state in fips_states_keys:
# if state in row["Geographic Area"]:
# if row["Geographic Area"].find(state) > 1:
# counties = row["Geographic Area"].replace(state, '')
# if state == "District of Columbia":
# # print("trouble maker")
# # print(counties)
# counties = "District of Columbia"
# counties_list.append(counties)
# # for index, row in pop_counties.iterrows():
# # if row["state"] in row["Geographic Area"]:
# # # print("oh yeah")
# # row["Geographic Area"].replace(row["state"], '')
# # break
# # print(len(counties_list))
# # print((counties_list))
# pop_counties["Geographic Area"] = counties_list
# # print(pop_counties)
# # for index, row in pop_counties.iterrows():
# # if row["Geographic Area"] == "District of Columbia":
# # # print("sure") #yes
# # print(specific_date_df)
# for state, state_id in zip(fips_states_keys, fips_states_values):
# pop_counties["state"] = pop_counties["state"].replace(state, state_id)
# specific_date_df["county"] = specific_date_df["county"].str.replace('.', '') # DistrictOfColumbia
# pop_counties["Geographic Area"] = pop_counties["Geographic Area"].str.replace('Parish', '') # DistrictOfColumbia
# pop_counties["Geographic Area"] = pop_counties["Geographic Area"].str.replace(' ', '') # DistrictOfColumbia
# spec_county = list(specific_date_df["county"])
# spec_state = list(specific_date_df["state"])
# pop_county = list(pop_counties["Geographic Area"])
# pop_state = list(pop_counties["state"])
# population_per_county = list(pop_counties[2019])
# population_counties_list = []
# # counter = 0
# for s_county, s_state in zip(spec_county, spec_state):
# boo = True
# for p_county, p_state in zip(pop_county, pop_state):
# if s_county == p_county and s_state == p_state:
# population_counties_list.append(population_per_county[pop_county.index(p_county)])
# boo = False
# # counter += 1
# if boo == True:
# population_counties_list.append(1)
# # print(len(population_counties_list)) # 3065
# # print(population_counties_list)
# specific_date_df["county_population"] = population_counties_list
# # print(specific_date_df)
# per100k = []
# for pop, count in zip(specific_date_df["county_population"], specific_date_df["cases"]):
# if pop == 1:
# per100k.append(1)
# else:
# per100k.append(100000 * (count/pop))
# specific_date_df["per100k"] = per100k
# # print(specific_date_df)
# # print(per100k)
# per10k = []
# for pop, count in zip(specific_date_df["county_population"], specific_date_df["cases"]):
# if pop == 1:
# per10k.append(1)
# else:
# per10k.append(10000 * (count/pop))
# specific_date_df["per10k"] = per10k
# # print(specific_date_df)
# # print(per10k)
# # import math
# log10_per10k = []
# for item in per10k:
# # print(item)
# log10_per10k.append(math.log10(item))
# specific_date_df["log10_per10k"] = log10_per10k
# # import math
# log10_per100k = []
# for item in per100k:
# # print(item)
# log10_per100k.append(math.log10(item))
# specific_date_df["log10_per100k"] = log10_per100k
# copy_df = specific_date_df.copy() # this is to remove data from census that is missing from covid
# copy_df = copy_df[copy_df['log10_per100k'] != 0]
# # Per county geojson
# with urlopen('https://raw.githubusercontent.com/plotly/datasets/master/geojson-counties-fips.json') as response:
# counties = json.load(response)
# # print(counties["features"][0]["properties"]["STATE"])
# # print((counties["features"][0])) #3221
# # per county
# fig = px.choropleth(copy_df, geojson=counties,
# locations='fips',
# color='log10_per100k',
# # color_continuous_scale="Reds",
# color_continuous_scale="Viridis",
# range_color=(0, 5),
# # locationmode = 'USA-states',
# featureidkey = "id",
# hover_name = "county",
# scope="usa",
# )
# fig.update_layout(margin={"r":5,"t":5,"l":5,"b":5},
# title_text = '<br><br>Covid-19 Spread Per 100k Population Per County<br>Using 2019 Census Estimations<br>'+date
# )
# # fig.show()
# # plot(fig)
# return fig | 2.34375 | 2 |
source_code/report.py | zacharybeebe/amabilis | 0 | 12795783 | from timberscale import Timber
import csv
import math
##### REPORT DATA MODULE
class Report(object):
LOG_RANGE_LIST = [["40+ ft", range(41, 121)], ["31-40 ft", range(31, 41)], ["21-30 ft", range(21, 31)],
["11-20 ft", range(11, 21)], ["1-10 ft", range(1, 11)]]
def __init__(self, CSV, Stand_to_Examine, Plots, Pref_Log, Min_Log):
self.csv = CSV
self.stand = Stand_to_Examine.upper()
self.plots = Plots
self.plog = Pref_Log
self.mlog = Min_Log
self.species_list = []
self.summary_conditions = []
self.summary_logs = []
self.conditions_dict = {}
self.logs_dict = {}
self.report()
def report(self):
AVG_HDR, self.species_list = self.get_HDR_Species()
## MAIN READ AND TREE (TIMBER CLASS) INITIALIZATION
with open(self.csv, 'r') as tree_data:
tree_data_reader = csv.reader(tree_data)
next(tree_data_reader)
for line in tree_data_reader:
if line[0] == "":
break
elif str(line[0]).upper() != self.stand:
next
else:
SPECIES = str(line[3]).upper()
DBH = float(line[4])
if line[5] == "":
HEIGHT = int(round(AVG_HDR * (DBH/12),0))
else:
HEIGHT = int(float(line[5]))
PLOT_FACTOR = float(line[6])
if DBH >= 6.0:
tree = Timber(SPECIES, DBH, HEIGHT)
merch_dib = tree.merch_dib()
if merch_dib < 5:
merch_dib = 5
single = tree.tree_single(merch_dib, self.plog, self.mlog)
tree_per_acre = tree.tree_acre(merch_dib, self.plog, self.mlog, PLOT_FACTOR)
log_per_acre = tree.log_acre(merch_dib, self.plog, self.mlog, PLOT_FACTOR)
self.summary_conditions.append([single['SPP'][0],
[tree_per_acre['TPA'], tree_per_acre['BA_AC'], tree_per_acre['RD_AC'],
single['T_HGT'], single['HDR'], single['VBAR'], tree_per_acre['BF_AC'],
tree_per_acre['CF_AC']]])
self.summary_logs.append(self.get_log_list(single['SPP'][0], log_per_acre))
else:
tree = Timber(SPECIES, DBH, HEIGHT)
self.summary_conditions.append([tree.SPP,
[tree.get_TPA(PLOT_FACTOR), tree.get_BA_acre(PLOT_FACTOR),
tree.get_RD_acre(PLOT_FACTOR), tree.HGT, tree.HDR, 0, 0, 0]])
## SUMMARY STATISTICS
self.conditions_dict = self.get_conditions_dict()
self.logs_dict = self.get_logs_dict()
return
def get_HDR_Species(self):
HDR_LIST = []
SPECIES_LIST = []
with open(self.csv, 'r') as tree_data:
tree_data_reader = csv.reader(tree_data)
next(tree_data_reader)
for line in tree_data_reader:
if line[0] == "":
break
elif str(line[0]).upper() != self.stand:
next
else:
SPP = str(line[3]).upper()
if SPP not in SPECIES_LIST:
SPECIES_LIST.append(SPP)
if line[5] != "":
DBH = float(line[4])
HEIGHT = float(line[5])
HDR_LIST.append(HEIGHT / (DBH / 12))
AVG_HDR = round(sum(HDR_LIST) / len(HDR_LIST), 2)
return AVG_HDR, SPECIES_LIST
def get_log_list(self, Species, Log_Dict):
master = [Species]
for key in Log_Dict:
rng = ""
for ranges in self.LOG_RANGE_LIST:
if Log_Dict[key]['L_LGT'] in ranges[1]:
rng = ranges[0]
temp_list = [Log_Dict[key]['L_GRD'][0], rng, Log_Dict[key]['L_CT_AC'],
Log_Dict[key]['L_BF_AC'], Log_Dict[key]['L_CF_AC']]
master.append(temp_list)
return master
def get_conditions_dict(self):
# ORDER OF INITAL SPP LIST - [0SPPCOUNT, 1TPA, 2BA_AC, 3RD_AC, 4T_HGT, 5HDR, 6VBAR, 7BF_AC, 8CF_AC]
# After Pop SPPCOUNT and Add QMD to 2 index
# ORDER OF FINAL SPP LIST - [0TPA, 1BA_AC, 2QMD, 3RD_AC, 4T_HGT, 5HDR, 6VBAR, 7BF_AC, 8CF_AC]
master = {}
totals_temp = [0, 0, 0, 0, 0, 0, 0, 0, 0]
for spp in self.species_list:
master[spp] = [0, 0, 0, 0, 0, 0, 0, 0, 0]
for data in self.summary_conditions:
spp = data[0]
master[spp][0] += 1
totals_temp[0] += 1
for i in range(1, len(data[1]) + 1):
master[spp][i] += data[1][i - 1]
totals_temp[i] += data[1][i - 1]
master["TOTALS"] = totals_temp
for key in master:
sums = [1, 2, 3, 7, 8]
for i in range(1, len(master[key])):
if i in sums:
master[key][i] = master[key][i] / self.plots
else:
master[key][i] = master[key][i] / master[key][0]
master[key].pop(0)
master[key].insert(2, math.sqrt((master[key][1] / master[key][0]) / .005454))
return master
def get_logs_dict(self):
log_rng = ["40+ ft", "31-40 ft", "21-30 ft", "11-20 ft", "1-10 ft", 'TGRD']
master = {}
# Formatting Species into main keys
for spp in self.species_list:
master[spp] = {}
master['TOTALS'] = {}
# Formatting Grades and Ranges in correct order, as nested dicts of Species and Totals
for key in master:
for grade in Timber.GRADE_NAMES:
master[key][grade] = {}
for rng in log_rng:
master[key][grade][rng] = [0, 0, 0]
master[key]['TTL'] = {}
for rng in log_rng:
master[key]['TTL'][rng] = [0, 0, 0]
# Adding data to Master Dict
for data in self.summary_logs:
spp = data[0]
for i in range(1, len(data)):
grade, rng = data[i][0], data[i][1]
for j in range(2, len(data[i])):
master[spp][grade][rng][j - 2] += (data[i][j] / self.plots)
master[spp][grade]['TGRD'][j - 2] += (data[i][j] / self.plots)
master[spp]['TTL'][rng][j - 2] += (data[i][j] / self.plots)
master[spp]['TTL']['TGRD'][j - 2] += (data[i][j] / self.plots)
master['TOTALS'][grade][rng][j - 2] += (data[i][j] / self.plots)
master['TOTALS'][grade]['TGRD'][j - 2] += (data[i][j] / self.plots)
master['TOTALS']['TTL'][rng][j - 2] += (data[i][j] / self.plots)
master['TOTALS']['TTL']['TGRD'][j - 2] += (data[i][j] / self.plots)
# Removing any Grades that have zero data
ax_list = []
for key in master:
for grade in master[key]:
count = 0
for rng in master[key][grade]:
count += master[key][grade][rng][0]
if count == 0:
ax_list.append((key, grade))
for ax in ax_list:
del master[ax[0]][ax[1]]
return master
| 2.984375 | 3 |
cnlp/data/data_loaders/data_loader.py | pfchai/CNLP | 0 | 12795784 | # -*- coding: utf-8 -*-
from cnlp.common.registrable import Registrable
class DataLoader(Registrable):
default_implementation = 'simple'
def __len__(self):
raise TypeError
def __iter__(self):
raise NotImplementedError
def iter_instances(self):
raise NotImplementedError
def index_with(self, vocab):
raise NotImplementedError
def set_target_device(self, device):
raise NotImplementedError
| 2.15625 | 2 |
bin/geomag_webservice.py | alejandrodelcampillo/geomag-algorithms | 1 | 12795785 | <gh_stars>1-10
#! /usr/bin/env python
from __future__ import absolute_import, print_function
import os
import sys
from wsgiref.simple_server import make_server
# ensure geomag is on the path before importing
try:
import geomagio # noqa (tells linter to ignore this line.)
except ImportError:
path = os.path
script_dir = path.dirname(path.abspath(__file__))
sys.path.append(path.normpath(path.join(script_dir, "..")))
import geomagio
if __name__ == "__main__":
# read configuration from environment
edge_host = os.getenv("EDGE_HOST", "cwbpub.cr.usgs.gov")
edge_port = int(os.getenv("EDGE_PORT", "2060"))
factory_type = os.getenv("GEOMAG_FACTORY_TYPE", "edge")
webservice_host = os.getenv("GEOMAG_WEBSERVICE_HOST", "")
webservice_port = int(os.getenv("GEOMAG_WEBSERVICE_PORT", "7981"))
version = os.getenv("GEOMAG_VERSION", None)
# configure factory
if factory_type == "edge":
factory = geomagio.edge.EdgeFactory(host=edge_host, port=edge_port)
else:
raise "Unknown factory type '%s'" % factory_type
print("Starting webservice on %s:%d" % (webservice_host, webservice_port))
app = geomagio.WebService(factory, version)
httpd = make_server(webservice_host, webservice_port, app)
httpd.serve_forever()
| 2.09375 | 2 |
arko/parser.py | bfontaine/arkopy | 0 | 12795786 | # -*- coding: UTF-8 -*-
import ply.yacc
from collections import OrderedDict
from .lexer import tokens, lex
# a:4:{s:4:"date";s:10:"2019-12-29";s:10:"type_fonds";s:11:"arko_seriel";s:4:"ref1";i:12;s:4:"ref2";i:4669;}
from .models import Object
start = 'expression'
def p_expression(p):
"""expression : atom
| associative"""
p[0] = p[1]
def p_atom(p):
"""atom : integer
| float
| boolean
| string
| null"""
p[0] = p[1]
def p_collection(p):
"""associative : array
| object"""
p[0] = p[1]
def p_integer(p):
"""integer : I_SYMBOL COLON INTEGER"""
p[0] = int(p[3])
def p_float(p):
"""float : D_SYMBOL COLON FLOAT"""
p[0] = float(p[3])
def p_boolean(p):
"""boolean : B_SYMBOL COLON INTEGER"""
p[0] = p[3] != "0"
def p_string(p):
"""string : S_SYMBOL COLON INTEGER COLON STRING"""
p[0] = p[5]
def p_null(p):
"""null : N_SYMBOL"""
p[0] = None
def p_array(p):
"""array : A_SYMBOL raw_array"""
p[0] = p[2]
def p_raw_array(p):
"""raw_array : COLON INTEGER COLON LEFT_BRACKET array_expressions RIGHT_BRACKET"""
d = OrderedDict()
expressions = p[5]
for i, k in enumerate(expressions[::2]):
d[k] = expressions[i * 2 + 1]
p[0] = d
def p_array_expressions_array_expression(p):
"""array_expressions : expression SEMICOLON"""
p[0] = [p[1]]
def p_array_expressions_array_expression_array_expressions(p):
"""array_expressions : expression SEMICOLON array_expressions"""
p[0] = [p[1]] + p[3]
def p_object(p):
"""object : O_SYMBOL COLON INTEGER COLON STRING raw_array"""
p[0] = Object(p[5], dict(p[6]))
def eof():
raise RuntimeError('EOF Reached')
def p_error(p):
if p is None:
eof()
else:
raise RuntimeError(str(p))
def parse(text):
parser = ply.yacc.yacc()
expression = parser.parse(text, lexer=lex())
return expression
| 2.546875 | 3 |
wmgraph/api/group.py | patrickatamaniuk/wmgraph | 0 | 12795787 | from .cache import memoized
class MgraphConnectorGroupMixin:
def list_groups(self, **kwargs):
'''https://docs.microsoft.com/en-us/graph/api/resources/group?view=graph-rest-1.0'''
url = f'/groups'
search = kwargs.get('search')
if search is not None:
del kwargs['search']
url += f"?$filter=startswith(displayName,'{search}')"
return self.get_paged(url, **kwargs)
@memoized
def get_group(self, group_id):
'''returns a group'''
return self.get(f'/groups/{group_id}')
def list_group_members(self, group_id):
'''returns directoryObjects'''
return self.get_paged(f'/groups/{group_id}/members')
def get_directoryobject(self, object_id):
return self.get(f'/directoryObjects/{object_id}')
def list_group_owners(self, group_id):
return self.get_paged(f'/groups/{group_id}/owners')
def get_group_drive(self, group_id):
return self.get(f'/groups/{group_id}/drive')
def get_group_drives(self, group_id):
return self.get(f'/groups/{group_id}/drives')
| 2.46875 | 2 |
scripts/quest/q22000e.py | G00dBye/YYMS | 54 | 12795788 | <reponame>G00dBye/YYMS<filename>scripts/quest/q22000e.py
# 22000 | Strange dream (Evan intro)
sm.setSpeakerID(1013101)
sm.sendNext("Hey, Evan. You up? What's with the dark circles under your eyes? Didn't sleep well? Huh? A strange dream? What was it about? Whoa? A dream about a dragon?")
sm.sendSay("Muahahahahaha, a dragon? Are you serious? I don't know how to interpret dreams, but that sounds like a good one! Did you see a dog in your dream, too? Hahaha!\r\n\r\n#fUI/UIWindow2.img/QuestIcon/8/0# 20 exp")
sm.giveExp(20)
sm.completeQuest(parentID)
sm.sendSayImage("UI/tutorial/evan/2/0")
| 1.609375 | 2 |
aaTwitter.py | OpenLinkedSocialData/aa01 | 0 | 12795789 | #! /usr/bin/env python
#-*- coding: utf8 -*-
# put this on /usr/local/bin/
# without .py extension
from twython import TwythonStreamer
import datetime, pymongo
class MyStreamer(TwythonStreamer):
def on_success(self, data):
if 'text' in data:
now=datetime.datetime.now()
nick=data['user']["screen_name"].encode('utf-8')
shout=data['text'].encode('utf-8')
shout_transaction={"time":now,"nick":nick,"shout":shout}
try:
client.aaserver.shouts.insert(shout_transaction)
except:
client=pymongo.MongoClient("mongodb://labmacambira:[email protected]:31948/aaserver")
client.aaserver.shouts.insert(shout_transaction)
print shout_transaction
def on_error(self, status_code, data):
print status_code
print "iniciando streaming"
class tw4:
tak= "U3gkdcw144pb3H315Vsmphne5"
taks="jbuLKuamEaiNPXJfhfC9kaXYcoSSfRIgTldwuQYCcUJzEGNukU"
tat= "2430470406-45gX6ihMxnKQQmjX2yR1VoaTQIddgY5bT7OSOzT"
tats="bHS4NkMwBFaysdVqnsT25xhNzZwEbM64KPdpRDB6RqZ2Z"
stream=MyStreamer(tw4.tak,tw4.taks,tw4.tat,tw4.tats)
stream.statuses.filter(track="#aao0")
| 2.34375 | 2 |
faker/providers/miscelleneous.py | kaflesudip/faker | 1 | 12795790 | # coding=utf-8
# module provided just for backward compatibility
from .misc import *
| 1.226563 | 1 |
tilosutils/package_info.py | dertilo/util | 0 | 12795791 | <gh_stars>0
# heavily inspired by https://github.com/NVIDIA/NeMo
MAJOR = 0
MINOR = 1
PATCH = 0
PRE_RELEASE = ''
# Use the following formatting: (major, minor, patch, pre-release)
VERSION = (MAJOR, MINOR, PATCH, PRE_RELEASE)
__shortversion__ = '.'.join(map(str, VERSION[:3]))
__version__ = '.'.join(map(str, VERSION[:3])) + ''.join(VERSION[3:])
__package_name__ = 'tilosutils'
__contact_names__ = 'dertilo'
__contact_emails__ = '<EMAIL>'
__homepage__ = 'https://github.com/dertilo'
__repository_url__ = 'https://github.com/dertilo/tilosutils'
__download_url__ = 'https://github.com/dertilo/tilosutils'
__description__ = 'python code'
__license__ = 'MIT License'
__keywords__ = 'machine learning, NLP, pytorch, tts, speech, language'
| 1.679688 | 2 |
setup.py | Narcissist1/wechat-pay | 9 | 12795792 | <gh_stars>1-10
from setuptools import setup
setup(
name='wechat-pay-sdk',
packages=['wechatpay'],
version='0.6.2',
description='A sdk for wechat pay',
author='<NAME>',
license='MIT',
include_package_data=True,
author_email='<EMAIL>',
url='https://github.com/Narcissist1/wechat-pay',
download_url='https://github.com/Narcissist1/wechat-pay/archive/0.1.tar.gz',
keywords=['wechat', 'pay'],
classifiers=[],
install_requires=[
'xmltodict',
'requests',
'dicttoxml',
]
)
| 1.359375 | 1 |
plugin.video.vstream/resources/sites/streamingk_com.py | akuala/REPO.KUALA | 2 | 12795793 | <gh_stars>1-10
# -*- coding: utf-8 -*-
# Vstream https://github.com/Kodi-vStream/venom-xbmc-addons
from resources.lib.gui.hoster import cHosterGui
from resources.lib.gui.gui import cGui
from resources.lib.handler.inputParameterHandler import cInputParameterHandler
from resources.lib.handler.outputParameterHandler import cOutputParameterHandler
from resources.lib.handler.requestHandler import cRequestHandler
from resources.lib.parser import cParser
from resources.lib.util import cUtil
from resources.lib.comaddon import progress #, VSlog
from resources.lib.multihost import cJheberg
import re, unicodedata
# clone de dpstreaming.tv
SITE_IDENTIFIER = 'streamingk_com'
SITE_NAME = 'StreamingK'
SITE_DESC = 'Films, Séries & Mangas en streaming. Tout les meilleurs streaming en illimité.'
URL_MAIN = 'https://streamingk.net/'
MOVIE_NEWS = (URL_MAIN + 'category/films/', 'showMovies')
MOVIE_MOVIE = (URL_MAIN + 'category/films/', 'showMovies')
MOVIE_VOSTFR = (URL_MAIN + 'category/films/vostfr-films/', 'showMovies')
MOVIE_GENRES = (True, 'showGenres')
SERIE_SERIES = (URL_MAIN + 'category/series-tv/', 'showMovies')
SERIE_NEWS = (URL_MAIN + 'category/series-tv/', 'showMovies')
SERIE_LIST = (True, 'showList')
SERIE_VFS = (URL_MAIN + 'category/series-tv/series-streaming-vf/', 'showMovies')
SERIE_VOSTFR = (URL_MAIN + 'category/series-tv/series-streaming-vostfr/', 'showMovies')
REPLAYTV_NEWS = (URL_MAIN + 'category/emissions-tv/', 'showMovies')
REPLAYTV_REPLAYTV = (URL_MAIN + 'category/emissions-tv/', 'showMovies')
URL_SEARCH = (URL_MAIN + '?s=', 'showMovies')
URL_SEARCH_MOVIES = (URL_MAIN + '?s=', 'showMovies')
URL_SEARCH_SERIES = (URL_MAIN + '?s=', 'showMovies')
FUNCTION_SEARCH = 'showMovies'
def load():
oGui = cGui()
oOutputParameterHandler = cOutputParameterHandler()
oOutputParameterHandler.addParameter('siteUrl', 'http://venom/')
oGui.addDir(SITE_IDENTIFIER, 'showMoviesSearch', 'Recherche', 'search.png', oOutputParameterHandler)
oOutputParameterHandler = cOutputParameterHandler()
oOutputParameterHandler.addParameter('siteUrl', MOVIE_NEWS[0])
oGui.addDir(SITE_IDENTIFIER, MOVIE_NEWS[1], 'Films (Derniers ajouts)', 'news.png', oOutputParameterHandler)
oOutputParameterHandler = cOutputParameterHandler()
oOutputParameterHandler.addParameter('siteUrl', MOVIE_GENRES[0])
oGui.addDir(SITE_IDENTIFIER, MOVIE_GENRES[1], 'Films (Genres)', 'genres.png', oOutputParameterHandler)
oOutputParameterHandler = cOutputParameterHandler()
oOutputParameterHandler.addParameter('siteUrl', SERIE_NEWS[0])
oGui.addDir(SITE_IDENTIFIER, SERIE_NEWS[1], 'Séries (Derniers ajouts)', 'news.png', oOutputParameterHandler)
oOutputParameterHandler = cOutputParameterHandler()
oOutputParameterHandler.addParameter('siteUrl', SERIE_LIST[0])
oGui.addDir(SITE_IDENTIFIER, SERIE_LIST[1], 'Séries (Liste)', 'listes.png', oOutputParameterHandler)
oOutputParameterHandler = cOutputParameterHandler()
oOutputParameterHandler.addParameter('siteUrl', SERIE_VFS[0])
oGui.addDir(SITE_IDENTIFIER, SERIE_VFS[1], 'Séries (VF)', 'vf.png', oOutputParameterHandler)
oOutputParameterHandler = cOutputParameterHandler()
oOutputParameterHandler.addParameter('siteUrl', SERIE_VOSTFR[0])
oGui.addDir(SITE_IDENTIFIER, SERIE_VOSTFR[1], 'Séries (VOSTFR)', 'vostfr.png', oOutputParameterHandler)
oOutputParameterHandler = cOutputParameterHandler()
oOutputParameterHandler.addParameter('siteUrl', REPLAYTV_NEWS[0])
oGui.addDir(SITE_IDENTIFIER, REPLAYTV_NEWS[1], 'Emissions TV', 'replay.png', oOutputParameterHandler)
oGui.setEndOfDirectory()
def showMoviesSearch():
oGui = cGui()
sSearchText = oGui.showKeyBoard()
if (sSearchText != False):
sUrl = URL_SEARCH[0] + sSearchText
showMovies(sUrl)
oGui.setEndOfDirectory()
return
def showGenres():
oGui = cGui()
liste = []
liste.append( ['Action', URL_MAIN + 'category/films/action/'] )
liste.append( ['Animation', URL_MAIN + 'category/films/animation/'] )
liste.append( ['Arts Martiaux', URL_MAIN + 'category/films/arts-martiaux/'] )
liste.append( ['Aventure', URL_MAIN + 'category/films/aventure-films/'] )
liste.append( ['Biopic', URL_MAIN + 'category/films/biopic/'] )
liste.append( ['Comédie', URL_MAIN + 'category/films/comedie/'] )
liste.append( ['Comédie Dramatique', URL_MAIN + 'category/films/comedie-dramatique/'] )
liste.append( ['Documentaire', URL_MAIN + 'category/documentaire/'] )
liste.append( ['Drame', URL_MAIN + 'category/films/drame/'] )
liste.append( ['Espionnage', URL_MAIN + 'category/films/espionnage/'] )
liste.append( ['Famille', URL_MAIN + 'category/films/famille/'] )
liste.append( ['Fantastique', URL_MAIN + 'category/films/fantastique/'] )
liste.append( ['Guerre', URL_MAIN + 'category/films/guerre/'] )
liste.append( ['Historique', URL_MAIN + 'category/films/historique/'] )
liste.append( ['Horreur', URL_MAIN + 'category/films/horreur/'] )
liste.append( ['Musical', URL_MAIN + 'category/films/musical/'] )
liste.append( ['Policier', URL_MAIN + 'category/films/policier/'] )
liste.append( ['Romance', URL_MAIN + 'category/films/romance/'] )
liste.append( ['Science-Fiction', URL_MAIN + 'category/films/science-fiction/'] )
liste.append( ['Spectacle', URL_MAIN + 'category/films/spectacle/'] )
liste.append( ['Thriller', URL_MAIN + 'category/films/thriller/'] )
liste.append( ['Western', URL_MAIN + 'category/films/western/'] )
liste.append( ['VOSTFR', URL_MAIN + 'category/films/vostfr-films/'] )
liste.append( ['BLURAY 1080p/720p', URL_MAIN + 'category/films/bluray-1080p-720p/'] )
liste.append( ['BLURAY 3D', URL_MAIN + 'category/films/bluray-3d/'] )
liste.append( ['Emissions TV', URL_MAIN + 'category/emissions-tv/'] )
for sTitle, sUrl in liste:
oOutputParameterHandler = cOutputParameterHandler()
oOutputParameterHandler.addParameter('siteUrl', sUrl)
oGui.addDir(SITE_IDENTIFIER, 'showMovies', sTitle, 'genres.png', oOutputParameterHandler)
oGui.setEndOfDirectory()
def showList():
oGui = cGui()
liste = []
liste.append( ['0-9', URL_MAIN + 'category/series-tv/0-9/'] )
liste.append( ['A-B-C', URL_MAIN + 'category/series-tv/a-b-c/'] )
liste.append( ['D-E-F', URL_MAIN + 'category/series-tv/d-e-f/'] )
liste.append( ['G-H-I', URL_MAIN + 'category/series-tv/g-h-i/'] )
liste.append( ['J-K-L', URL_MAIN + 'category/series-tv/j-k-l/'] )
liste.append( ['M-N-O', URL_MAIN + 'category/series-tv/m-n-o/'] )
liste.append( ['P-Q-R', URL_MAIN + 'category/series-tv/p-q-r/'] )
liste.append( ['S-T-U', URL_MAIN + 'category/series-tv/s-t-u/'] )
liste.append( ['V-W-X-Y-Z', URL_MAIN + 'category/series-tv/v-w-x-y-z/'] )
for sTitle, sUrl in liste:
oOutputParameterHandler = cOutputParameterHandler()
oOutputParameterHandler.addParameter('siteUrl', sUrl)
oGui.addDir(SITE_IDENTIFIER, 'showMovies', 'Lettres [COLOR coral]' + sTitle + '[/COLOR]', 'az.png', oOutputParameterHandler)
oGui.setEndOfDirectory()
def showMovies(sSearch = ''):
oGui = cGui()
if sSearch:
sUrl = sSearch.replace(' ', '+')
sPattern = '<div class="post-thumbnail".+?<a href="([^"]+)".+?(?:src="([^"]+(?:png|jpeg|jpg)|)").+?alt="([^"]+)"'
else:
oInputParameterHandler = cInputParameterHandler()
sUrl = oInputParameterHandler.getValue('siteUrl')
sPattern = '<div class="post-thumbnail".+?<a href="([^"]+)".+?(?:src="([^"]+(?:png|jpeg|jpg)|)").+?alt="([^"]+)".+?<p>([^<]+)</p>'
oRequestHandler = cRequestHandler(sUrl)
sHtmlContent = oRequestHandler.request()
# Magouille pour virer les 3 ligne en trop en cas de recherche
sHtmlContent = sHtmlContent.replace('quelle-est-votre-serie-preferee', '<>')
sHtmlContent = sHtmlContent.replace('top-series-du-moment', '<>')
sHtmlContent = sHtmlContent.replace('listes-des-series-annulees-et-renouvelees', '<>')
oParser = cParser()
aResult = oParser.parse(sHtmlContent, sPattern)
if (aResult[0] == False):
oGui.addText(SITE_IDENTIFIER)
if (aResult[0] == True):
total = len(aResult[1])
progress_ = progress().VScreate(SITE_NAME)
for aEntry in aResult[1]:
progress_.VSupdate(progress_, total)
if progress_.iscanceled():
break
# Si recherche et trop de resultat, on nettoye
if sSearch and total > 2:
if cUtil().CheckOccurence(sSearch.replace(URL_SEARCH[0], ''), aEntry[2]) == 0:
continue
sUrl1 = aEntry[0]
sTitle = aEntry[2].replace('Saiosn', 'Saison')
if 'Brouillon' in sTitle:
sTitle = sUrl1.rsplit('/', 2)[1]
sTitle = sTitle.replace('-streaming-telecharger', '').replace('-', ' ')
sTitle = sTitle.replace(' [Streaming]', '')
sTitle = sTitle.replace(' [Telecharger]', '').replace(' [Telechargement]', '')
sDisplayTitle = sTitle
# on retire la qualité
sTitle = re.sub('\[\w+]', '', sTitle)
sTitle = re.sub('\[\w+ \w+]', '', sTitle)
sThumb = aEntry[1]
if sSearch:
sDesc = ''
else:
sDesc = aEntry[3].replace('[…]', '').replace('…', '...').replace('’', '\'').replace('’', '\'').replace('…', '...')
oOutputParameterHandler = cOutputParameterHandler()
oOutputParameterHandler.addParameter('siteUrl', sUrl1)
oOutputParameterHandler.addParameter('sMovieTitle', sTitle)
oOutputParameterHandler.addParameter('sThumb', sThumb)
if '-filmographie-streaming' in aEntry[1]:
pass
elif 'quelle-est-votre-serie-preferee' in aEntry[1]:
pass
elif 'series' in sUrl1 or re.match('.+?saison [0-9]+', sTitle, re.IGNORECASE):
oGui.addTV(SITE_IDENTIFIER, 'showSeries', sDisplayTitle, '', sThumb, sDesc, oOutputParameterHandler)
elif 'mangas' in sUrl:
oGui.addTV(SITE_IDENTIFIER, 'showSeries', sDisplayTitle, '', sThumb, sDesc, oOutputParameterHandler)
else:
oGui.addMovie(SITE_IDENTIFIER, 'showHosters', sDisplayTitle, '', sThumb, sDesc, oOutputParameterHandler)
progress_.VSclose(progress_)
if not sSearch:
sNextPage = __checkForNextPage(sHtmlContent)
if (sNextPage != False):
oOutputParameterHandler = cOutputParameterHandler()
oOutputParameterHandler.addParameter('siteUrl', sNextPage)
oGui.addNext(SITE_IDENTIFIER, 'showMovies', '[COLOR teal]Suivant >>>[/COLOR]', oOutputParameterHandler)
oGui.setEndOfDirectory()
def __checkForNextPage(sHtmlContent):
sPattern = '<a class="next page-numbers" href="([^"]+)"'
oParser = cParser()
aResult = oParser.parse(sHtmlContent, sPattern)
if (aResult[0] == True):
return aResult[1][0]
return False
def showSeries(sLoop = False):
oGui = cGui()
oParser = cParser()
oInputParameterHandler = cInputParameterHandler()
sUrl = oInputParameterHandler.getValue('siteUrl')
sMovieTitle = oInputParameterHandler.getValue('sMovieTitle')
sThumb = oInputParameterHandler.getValue('sThumb')
oRequestHandler = cRequestHandler(sUrl)
sHtmlContent = oRequestHandler.request()
sHtmlContent = sHtmlContent.decode('utf-8', "replace")
sHtmlContent = unicodedata.normalize('NFD', sHtmlContent).encode('ascii', 'ignore').decode('unicode_escape') # vire accent et '\'
sHtmlContent = sHtmlContent.encode('utf-8') # On remet en utf-8
# Réécriture de sHtmlContent pour prendre les liens et pour récuperer le dernier episode
sHtmlContent = sHtmlContent.replace('<span style="color: #ff9900;">New</span><b> </b>', '')
sHtmlContent = sHtmlContent.replace('<b> </b>', ' ')
sHtmlContent = sHtmlContent.replace('<b></b>', ' ')
sHtmlContent = sHtmlContent.replace('<span class="su-lightbox" data-mfp-src', '<a href')
sHtmlContent = sHtmlContent.replace('https://cut-urls.com/st?api=d6e46f2fcd4bfed906a9f3ecbbb6830e862b3afb&url=', '')
# récupération du Synopsis
sDesc = ''
try:
sPattern = '</p><p style="text-align: center;">([^<]+)</p><p style="text-align: center;">'
aResult = oParser.parse(sHtmlContent, sPattern)
if aResult[0]:
sDesc = aResult[1][0]
sDesc = sDesc.replace('’', '\'').replace('…', '...')
except:
pass
sPattern = '<span style="color: #33cccc;[^<>"]*">(?:<(?:strong|b)>)((?:Stream|Telec)[^<>]+)|"center">(.pisode[^<]{2,12})*<(?!\/a>)([^<>]*a href="http.+?)(?:<.p>|<br|<.div)'
aResult = oParser.parse(sHtmlContent, sPattern)
# astuce en cas d'episode unique
# if (aResult[0] == False) and (sLoop == False):
# #oGui.setEndOfDirectory()
# serieHosters(True)
# return
if (aResult[0] == True):
total = len(aResult[1])
progress_ = progress().VScreate(SITE_NAME)
for aEntry in aResult[1]:
progress_.VSupdate(progress_, total)
if progress_.iscanceled():
break
if aEntry[0]: # stream ou telechargement
oGui.addText(SITE_IDENTIFIER, '[COLOR red]' + aEntry[0] + '[/COLOR]')
else: # Saisons et episodes
sUrl = aEntry[2]
SXXEX = re.search('>(S[0-9]{2}E[0-9]{2})<', sUrl)
HOST = re.search('a href="https*:\/\/([^.]+)', sUrl)
if SXXEX:
# on vire le double affichage des saisons
sTitle = re.sub(' - Saison \d+', '', sMovieTitle) + ' ' + SXXEX.group(1)
if HOST:
HOST = HOST.group(1).split('/')[0]
sDisplayTitle = sTitle + ' [COLOR coral]' + HOST.capitalize() + '[/COLOR]'
else:
sTitle = sMovieTitle + ' ' + aEntry[1].replace(' New', '')
sDisplayTitle = sTitle
oOutputParameterHandler = cOutputParameterHandler()
oOutputParameterHandler.addParameter('siteUrl', sUrl)
oOutputParameterHandler.addParameter('sMovieTitle', sTitle)
oOutputParameterHandler.addParameter('sThumb', sThumb)
oGui.addMisc(SITE_IDENTIFIER, 'serieHosters', sDisplayTitle, '', sThumb, sDesc, oOutputParameterHandler)
progress_.VSclose(progress_)
oGui.setEndOfDirectory()
def showHosters(sLoop = False):
oGui = cGui()
oInputParameterHandler = cInputParameterHandler()
sUrl = oInputParameterHandler.getValue('siteUrl')
sMovieTitle = oInputParameterHandler.getValue('sMovieTitle')
sThumb = oInputParameterHandler.getValue('sThumb')
oRequestHandler = cRequestHandler(sUrl)
sHtmlContent = oRequestHandler.request()
# Réécriture de sHtmlContent pour récuperer la qualité
sHtmlContent = sHtmlContent.replace('<span style="color: #ff9900;"><strong>', '<strong><span style="color: #ff9900;">')
oParser = cParser()
sPattern = '<strong><span style="color: #ff9900;">([^<]+)<|<a class="large button.+?" href="([^<>"]+?)" target="(?:_blank|vid)"'
aResult = oParser.parse(sHtmlContent, sPattern)
# Si il y a rien a afficher c'est peut etre une serie
if (len(aResult) == 0) and (sLoop == False):
# oGui.setEndOfDirectory()
showSeries(True)
return
if (aResult[0] == True):
for aEntry in aResult[1]:
if aEntry[0]:
oGui.addText(SITE_IDENTIFIER, '[COLOR red]' + aEntry[0] + '[/COLOR]')
else:
sHosterUrl = aEntry[1]
# pour récuperer tous les liens
if '&url=' in sHosterUrl:
sHosterUrl = sHosterUrl.split('&url=')[1]
# pour récuperer le lien jwplayer(GoogleDrive)
if 'filmhdstream' in sHosterUrl:
oRequestHandler = cRequestHandler(sHosterUrl)
sHtmlContent = oRequestHandler.request()
sPattern = '<iframe.+?src="([^"]+)"'
aResult = oParser.parse(sHtmlContent, sPattern)
if (aResult[0] == True):
for aEntry in aResult[1]:
sHosterUrl = aEntry
oHoster = cHosterGui().checkHoster(sHosterUrl)
if (oHoster != False):
oHoster.setDisplayName(sMovieTitle)
oHoster.setFileName(sMovieTitle)
cHosterGui().showHoster(oGui, oHoster, sHosterUrl, sThumb)
# pour récuperer les liens jheberg
elif 'jheberg' in sHosterUrl:
aResult = cJheberg().GetUrls(sHosterUrl)
if aResult:
for aEntry in aResult:
sHosterUrl = aEntry
oHoster = cHosterGui().checkHoster(sHosterUrl)
if (oHoster != False):
oHoster.setDisplayName(sMovieTitle)
oHoster.setFileName(sMovieTitle)
cHosterGui().showHoster(oGui, oHoster, sHosterUrl, sThumb)
else:
oHoster = cHosterGui().checkHoster(sHosterUrl)
if (oHoster != False):
oHoster.setDisplayName(sMovieTitle)
oHoster.setFileName(sMovieTitle)
cHosterGui().showHoster(oGui, oHoster, sHosterUrl, sThumb)
oGui.setEndOfDirectory()
def serieHosters():
oGui = cGui()
oInputParameterHandler = cInputParameterHandler()
sUrl = oInputParameterHandler.getValue('siteUrl')
sMovieTitle = oInputParameterHandler.getValue('sMovieTitle')
sThumb = oInputParameterHandler.getValue('sThumb')
sPattern = 'href="([^"]+)"'
oParser = cParser()
aResult = oParser.parse(sUrl, sPattern)
if (aResult[0] == True):
for aEntry in aResult[1]:
sHosterUrl = aEntry
# pour récuperer tous les liens
if '&url=' in sHosterUrl:
sHosterUrl = sHosterUrl.split('&url=')[1]
# pour récuperer le lien jwplayer(GoogleDrive)
if 'filmhdstream' in sHosterUrl:
oRequestHandler = cRequestHandler(sHosterUrl)
sHtmlContent = oRequestHandler.request()
sPattern = '<iframe.+?src="([^"]+)"'
aResult = oParser.parse(sHtmlContent, sPattern)
if (aResult[0] == True):
for aEntry in aResult[1]:
sHosterUrl = aEntry
oHoster = cHosterGui().checkHoster(sHosterUrl)
if (oHoster != False):
oHoster.setDisplayName(sMovieTitle)
oHoster.setFileName(sMovieTitle)
cHosterGui().showHoster(oGui, oHoster, sHosterUrl, sThumb)
# pour récuperer les liens jheberg
elif 'jheberg' in sHosterUrl:
aResult = cJheberg().GetUrls(sHosterUrl)
if aResult:
for aEntry in aResult:
sHosterUrl = aEntry
oHoster = cHosterGui().checkHoster(sHosterUrl)
if (oHoster != False):
oHoster.setDisplayName(sMovieTitle)
oHoster.setFileName(sMovieTitle)
cHosterGui().showHoster(oGui, oHoster, sHosterUrl, sThumb)
else:
oHoster = cHosterGui().checkHoster(sHosterUrl)
if (oHoster != False):
oHoster.setDisplayName(sMovieTitle)
oHoster.setFileName(sMovieTitle)
cHosterGui().showHoster(oGui, oHoster, sHosterUrl, sThumb)
oGui.setEndOfDirectory()
| 1.65625 | 2 |
modules/skin_detection.py | Merkll/llcolorizer | 2 | 12795794 | <filename>modules/skin_detection.py
import cv2
import numpy as np
import matplotlib.pyplot as plt
min_HSV = np.array([0, 58, 30], dtype = "uint8")
max_HSV = np.array([33, 255, 255], dtype = "uint8")
def get_skin_region(image):
image_BGR = cv2.imread(image)
image_HSV = cv2.cvtColor(image_BGR, cv2.COLOR_BGR2HSV)
return cv2.inRange(image_HSV, min_HSV, max_HSV), image_BGR
def get_skin(image):
skin_region, image_BGR = get_skin_region(image)
image_HSV = cv2.bitwise_and(image_BGR, image_BGR, mask = skin_region)
return image_HSV, np.hstack([image_BGR, image_HSV]) | 3.109375 | 3 |
api/edge_api/identities/views.py | SolidStateGroup/Bullet-Train-API | 126 | 12795795 | import base64
import json
import typing
import marshmallow
from boto3.dynamodb.conditions import Key
from drf_yasg2.utils import swagger_auto_schema
from flag_engine.api.schemas import APITraitSchema
from flag_engine.identities.builders import (
build_identity_dict,
build_identity_model,
)
from rest_framework import status, viewsets
from rest_framework.decorators import action
from rest_framework.exceptions import NotFound, ValidationError
from rest_framework.permissions import IsAuthenticated
from rest_framework.response import Response
from app.pagination import EdgeIdentityPagination
from edge_api.identities.serializers import (
EdgeIdentityFeatureStateSerializer,
EdgeIdentityFsQueryparamSerializer,
EdgeIdentitySerializer,
EdgeIdentityTraitsSerializer,
)
from environments.identities.models import Identity
from environments.models import Environment
from environments.permissions.constants import MANAGE_IDENTITIES
from environments.permissions.permissions import NestedEnvironmentPermissions
from features.permissions import IdentityFeatureStatePermissions
from projects.exceptions import DynamoNotEnabledError
from .exceptions import TraitPersistenceError
trait_schema = APITraitSchema()
class EdgeIdentityViewSet(viewsets.ModelViewSet):
serializer_class = EdgeIdentitySerializer
pagination_class = EdgeIdentityPagination
lookup_field = "identity_uuid"
dynamo_identifier_search_functions = {
"EQUAL": lambda identifier: Key("identifier").eq(identifier),
"BEGINS_WITH": lambda identifier: Key("identifier").begins_with(identifier),
}
def initial(self, request, *args, **kwargs):
environment = self.get_environment_from_request()
if not environment.project.enable_dynamo_db:
raise DynamoNotEnabledError()
super().initial(request, *args, **kwargs)
def _get_search_function_and_value(
self,
search_query: str,
) -> typing.Tuple[typing.Callable, str]:
if search_query.startswith('"') and search_query.endswith('"'):
return self.dynamo_identifier_search_functions[
"EQUAL"
], search_query.replace('"', "")
return self.dynamo_identifier_search_functions["BEGINS_WITH"], search_query
def get_object(self):
return Identity.dynamo_wrapper.get_item_from_uuid_or_404(
self.kwargs["identity_uuid"]
)
def get_queryset(self):
page_size = self.pagination_class().get_page_size(self.request)
previous_last_evaluated_key = self.request.GET.get("last_evaluated_key")
search_query = self.request.query_params.get("q")
start_key = None
if previous_last_evaluated_key:
start_key = json.loads(base64.b64decode(previous_last_evaluated_key))
if not search_query:
return Identity.dynamo_wrapper.get_all_items(
self.kwargs["environment_api_key"], page_size, start_key
)
search_func, search_identifier = self._get_search_function_and_value(
search_query
)
identity_documents = Identity.dynamo_wrapper.search_items_with_identifier(
self.kwargs["environment_api_key"],
search_identifier,
search_func,
page_size,
start_key,
)
return identity_documents
def get_permissions(self):
return [
IsAuthenticated(),
NestedEnvironmentPermissions(
action_permission_map={
"retrieve": MANAGE_IDENTITIES,
"get_traits": MANAGE_IDENTITIES,
"update_traits": MANAGE_IDENTITIES,
}
),
]
def get_environment_from_request(self):
"""
Get environment object from URL parameters in request.
"""
return Environment.objects.get(api_key=self.kwargs["environment_api_key"])
def perform_destroy(self, instance):
Identity.dynamo_wrapper.delete_item(instance["composite_key"])
@swagger_auto_schema(
responses={200: EdgeIdentityTraitsSerializer(many=True)},
)
@action(detail=True, methods=["get"], url_path="list-traits")
def get_traits(self, request, *args, **kwargs):
identity = self.get_object()
data = trait_schema.dump(identity["identity_traits"], many=True)
return Response(data=data, status=status.HTTP_200_OK)
@swagger_auto_schema(
method="put",
request_body=EdgeIdentityTraitsSerializer,
responses={200: EdgeIdentityTraitsSerializer()},
)
@action(detail=True, methods=["put"], url_path="update-traits")
def update_traits(self, request, *args, **kwargs):
environment = self.get_environment_from_request()
if not environment.project.organisation.persist_trait_data:
raise TraitPersistenceError()
identity = build_identity_model(self.get_object())
try:
trait = trait_schema.load(request.data)
except marshmallow.ValidationError as validation_error:
raise ValidationError(validation_error) from validation_error
identity.update_traits([trait])
Identity.dynamo_wrapper.put_item(build_identity_dict(identity))
data = trait_schema.dump(trait)
return Response(data, status=status.HTTP_200_OK)
class EdgeIdentityFeatureStateViewSet(viewsets.ModelViewSet):
permission_classes = [IsAuthenticated, IdentityFeatureStatePermissions]
lookup_field = "featurestate_uuid"
serializer_class = EdgeIdentityFeatureStateSerializer
# Patch is not supported
http_method_names = [
"get",
"post",
"put",
"delete",
"head",
"options",
"trace",
]
pagination_class = None
def initial(self, request, *args, **kwargs):
super().initial(request, *args, **kwargs)
identity_document = Identity.dynamo_wrapper.get_item_from_uuid_or_404(
self.kwargs["edge_identity_identity_uuid"]
)
self.identity = build_identity_model(identity_document)
def get_object(self):
featurestate_uuid = self.kwargs["featurestate_uuid"]
try:
featurestate = next(
filter(
lambda fs: fs.featurestate_uuid == featurestate_uuid,
self.identity.identity_features,
)
)
except StopIteration:
raise NotFound()
return featurestate
@swagger_auto_schema(query_serializer=EdgeIdentityFsQueryparamSerializer())
def list(self, request, *args, **kwargs):
q_params_serializer = EdgeIdentityFsQueryparamSerializer(
data=self.request.query_params
)
q_params_serializer.is_valid(raise_exception=True)
identity_features = self.identity.identity_features
feature = q_params_serializer.data.get("feature")
if feature:
identity_features = filter(
lambda fs: fs.feature.id == feature, identity_features
)
serializer = self.get_serializer(identity_features, many=True)
return Response(data=serializer.data, status=status.HTTP_200_OK)
def perform_destroy(self, instance):
self.identity.identity_features.remove(instance)
Identity.dynamo_wrapper.put_item(build_identity_dict(self.identity))
| 1.796875 | 2 |
uninas/training/metrics/abstract.py | cogsys-tuebingen/uninas | 18 | 12795796 | import torch
from collections import defaultdict
from uninas.data.abstract import AbstractDataSet
from uninas.models.networks.abstract import AbstractNetwork
from uninas.training.result import ResultValue
from uninas.utils.args import ArgsInterface, Namespace, Argument
class AbstractMetric(ArgsInterface):
"""
Metrics during (supervised) network training,
between network outputs and some targets
"""
def __init__(self, head_weights: list, **kwargs):
super().__init__()
self.head_weights = head_weights
for k, v in kwargs.items():
self.__setattr__(k, v)
def get_log_name(self) -> str:
raise NotImplementedError
@classmethod
def from_args(cls, args: Namespace, index: int, data_set: AbstractDataSet, head_weights: list) -> 'AbstractMetric':
"""
:param args: global arguments namespace
:param index: index of this metric
:param data_set: data set that is evaluated on
:param head_weights: how each head is weighted
"""
all_parsed = cls._all_parsed_arguments(args, index=index)
return cls(head_weights=head_weights, **all_parsed)
@classmethod
def _to_dict(cls, key: str, prefix: str, name: str, dct: dict) -> dict:
""" adds key and name to all dict entries """
s = "%s/%s" % (key, name) if len(prefix) == 0 else "%s/%s/%s" % (prefix, key, name)
return {'%s/%s' % (s, k): v for k, v in dct.items()}
@classmethod
def _batchify_tensors(cls, logits: [torch.Tensor], targets: torch.Tensor) -> ([torch.Tensor], torch.Tensor):
"""
reshape all [batch, classes, n0, n1, ...] tensors into [batch, classes]
:param logits: network outputs
:param targets: output targets
"""
new_logits = []
for tensor in logits + [targets]:
shape = tensor.shape
if len(shape) > 2:
new_logits.append(tensor.transpose(0, 1).reshape(shape[1], -1).transpose(0, 1))
else:
new_logits.append(tensor)
return new_logits[:-1], new_logits[-1]
@classmethod
def _remove_onehot(cls, targets: torch.Tensor) -> torch.Tensor:
""" remove one-hot encoding from a [batch, classes] tensor """
if len(targets.shape) == 2:
return torch.argmax(targets, dim=-1)
return targets
@classmethod
def _ignore_with_index(cls, logits: [torch.Tensor], targets: torch.Tensor,
ignore_target_index=-999, ignore_prediction_index=-999) ->\
([torch.Tensor], torch.Tensor):
"""
remove all occurrences where the target equals the ignore index, prevent logits from predicting an ignored class
:param logits: network outputs, each has the [batch, classes] shape
:param targets: output targets, has the [batch] shape
:param ignore_target_index: remove all samples where the target matches this index
:param ignore_prediction_index: if the network predicts this index, choose the next most-likely prediction instead
"""
# remove all occurrences where the target equals the ignore index
if ignore_target_index >= 0:
to_use = targets != ignore_target_index
logits = [lg[to_use] for lg in logits]
targets = targets[to_use]
# prevent logits from predicting an ignored class
if ignore_prediction_index >= 0:
new_logits = [lg.clone().detach_() for lg in logits]
for lg in new_logits:
min_ = lg.min(axis=1).values
lg[:, ignore_prediction_index] = min_
logits = new_logits
return logits, targets
def get_accumulated_stats(self, key: str) -> {str: torch.Tensor}:
""" get the averaged statistics for a specific key """
return {}
def eval_accumulated_stats(self, save_dir: str, key: str, prefix="", epoch: int = None, stats: dict = None) -> dict:
"""
visualize/log this metric
:param save_dir: if stats are visualized, where to save them
:param key: key to log
:param prefix: string prefix added in front of each dict key
:param epoch: optional int
:param stats: {str: tensor} or {str: [tensor]}
:return: usually empty dict if stats are visualized, otherwise the result of accumulating the stats
"""
return {}
def reset(self, key: str = None):
""" reset tracked stats for a specific key, or all (if key == None) """
pass
def on_epoch_start(self, epoch: int, is_last=False):
pass
def evaluate(self, net: AbstractNetwork,
inputs: torch.Tensor, logits: [torch.Tensor], targets: torch.Tensor, key: str) -> {str: ResultValue}:
"""
:param net: evaluated network
:param inputs: network inputs
:param logits: network outputs
:param targets: output targets
:param key: prefix for the dict keys, e.g. "train" or "test"
:return: dictionary of string keys with corresponding results
"""
raise NotImplementedError
def _evaluate(self, net: AbstractNetwork, inputs: torch.Tensor,
logits: [torch.Tensor], targets: torch.Tensor) -> {str: ResultValue}:
"""
:param net: evaluated network
:param inputs: network inputs
:param logits: network outputs
:param targets: output targets
:return: dictionary of string keys with corresponding results
"""
raise NotImplementedError
class AbstractLogMetric(AbstractMetric):
"""
A metric that is logged epoch-wise to the output stream and loggers (e.g. tensorboard),
all single results of _evaluate() are weighted averaged later, by how the batch sizes of each single result
"""
def get_log_name(self) -> str:
raise NotImplementedError
def evaluate(self, net: AbstractNetwork,
inputs: torch.Tensor, logits: [torch.Tensor], targets: torch.Tensor, key: str) -> {str: ResultValue}:
"""
:param net: evaluated network
:param inputs: network inputs
:param logits: network outputs
:param targets: output targets
:param key: prefix for the dict keys, e.g. "train" or "test"
:return: dictionary of string keys with corresponding results
"""
with torch.no_grad():
cur = self._evaluate(net, inputs, logits, targets)
cur = {k: v.unsqueeze() for k, v in cur.items()}
return self._to_dict(key, "", self.get_log_name(), cur)
def _evaluate(self, net: AbstractNetwork, inputs: torch.Tensor,
logits: [torch.Tensor], targets: torch.Tensor) -> {str: ResultValue}:
"""
:param net: evaluated network
:param inputs: network inputs
:param logits: network outputs
:param targets: output targets
:return: dictionary of string keys with corresponding results
"""
raise NotImplementedError
class AbstractAccumulateMetric(AbstractMetric):
"""
A metric that accumulates stats first
"""
def __init__(self, head_weights: list, each_epochs=-1, **kwargs):
super().__init__(head_weights, **kwargs)
self.stats = defaultdict(dict)
self.each_epochs = each_epochs
self.is_active = False
def get_log_name(self) -> str:
raise NotImplementedError
@classmethod
def _combine_tensors(cls, dict_key: str, tensors: [torch.Tensor]) -> torch.Tensor:
""" how to combine tensors if they are gathered from distributed training or from different batches """
return sum(tensors)
@classmethod
def args_to_add(cls, index=None) -> [Argument]:
""" list arguments to add to argparse when this class (or a child class) is chosen """
return super().args_to_add(index) + [
Argument('each_epochs', default=-1, type=int, help='visualize each n epochs, only last if <=0'),
]
def reset(self, key: str = None):
""" reset tracked stats for a specific key, or all (if key == None) """
keys = [key] if isinstance(key, str) else list(self.stats.keys())
for k in keys:
self.stats[k].clear()
def on_epoch_start(self, epoch: int, is_last=False):
self.reset(key=None)
self.is_active = is_last or ((self.each_epochs > 0) and ((epoch + 1) % self.each_epochs == 0))
def evaluate(self, net: AbstractNetwork,
inputs: torch.Tensor, logits: [torch.Tensor], targets: torch.Tensor, key: str) -> {str: torch.Tensor}:
"""
:param net: evaluated network
:param inputs: network inputs
:param logits: network outputs
:param targets: output targets
:param key: prefix for the dict keys, e.g. "train" or "test"
:return: dictionary of string keys with corresponding [scalar] tensors
"""
if not self.is_active:
return {}
with torch.no_grad():
cur = self._evaluate(net, inputs, logits, targets)
# add all values to current stat dict
for k, v in cur.items():
if k in self.stats[key]:
self.stats[key][k] = self._combine_tensors(k, [self.stats[key][k], v.value])
else:
self.stats[key][k] = v.value
return {}
def _evaluate(self, net: AbstractNetwork, inputs: torch.Tensor,
logits: [torch.Tensor], targets: torch.Tensor) -> {str: ResultValue}:
"""
:param net: evaluated network
:param inputs: network inputs
:param logits: network outputs
:param targets: output targets
:return: dictionary of string keys with corresponding results
"""
raise NotImplementedError
def get_accumulated_stats(self, key: str) -> {str: torch.Tensor}:
""" get the averaged statistics for a specific key """
return self.stats.get(key, {})
def eval_accumulated_stats(self, save_dir: str, key: str, prefix="", epoch: int = None, stats: dict = None) -> dict:
"""
visualize/log this metric
:param save_dir: if stats are visualized, where to save them
:param key: key to log
:param prefix: string prefix added in front of each dict key
:param epoch: optional int
:param stats: {str: tensor} or {str: [tensor]}
:return: usually empty dict if stats are visualized, otherwise the result of accumulating the stats
"""
if stats is None:
stats = self.get_accumulated_stats(key)
else:
with torch.no_grad():
stats = {k: self._combine_tensors(k, v) if isinstance(v, list) else v for k, v in stats.items()}
if len(stats) > 0:
if isinstance(epoch, int):
save_dir = '%s/epoch_%d/' % (save_dir, epoch)
self._viz_stats(save_dir, key, prefix, stats)
return self._to_dict(key, prefix, self.get_log_name(), self._compute_stats(save_dir, key, stats))
return {}
def _compute_stats(self, save_dir: str, key: str, stats: dict) -> dict:
""" compute this metric """
return {}
def _viz_stats(self, save_dir: str, key: str, prefix: str, stats: dict):
""" visualize this metric """
pass
| 2.578125 | 3 |
pymux/panes.py | jonathanslenders/old-pymux | 0 | 12795797 | <reponame>jonathanslenders/old-pymux
from libpymux.panes import ExecPane
import os
class BashPane(ExecPane):
def __init__(self, pane_executor, pymux_pane_env):
super().__init__(pane_executor)
self._pymux_pane_env = pymux_pane_env
def _do_exec(self):
os.environ['PYMUX_PANE'] = self._pymux_pane_env
os.execv('/bin/bash', ['bash'])
| 2.3125 | 2 |
LargeSample/BatchFiles/run_relax.py | jJosephM/SkyrmionDome | 0 | 12795798 | import subprocess
path_oommf = 'C:/Users/jmank/Desktop/oommf12b4_20200930_86_x64/oommf/oommf.tcl'
mif_file = 'C:/Users/jmank/Desktop/oommf12b4_20200930_86_x64/oommf/Skyrmion/skyrmionDome2.mif'
length = 2
param_string = ' boxsi '#-parameters "integer_length % s" ' % length
threads_string = ' -threads 28 '
oommf_command = 'tclsh ' + path_oommf + param_string + threads_string + mif_file
subprocess.call(oommf_command, shell=True)
| 1.71875 | 2 |
src/tanuki/data_backend/data_backend.py | M-J-Murray/tanuki | 0 | 12795799 | from __future__ import annotations
from abc import abstractclassmethod, abstractmethod, abstractproperty
from typing import (
TYPE_CHECKING,
Any,
Generator,
Generic,
Optional,
Type,
TypeVar,
Union,
)
import numpy as np
from pandas import DataFrame
from tanuki.data_store.data_type import DataType
from tanuki.database.data_token import DataToken
if TYPE_CHECKING:
from tanuki.data_store.index.index import Index
from tanuki.data_store.index.index_alias import IndexAlias
from tanuki.data_store.query import Query
B = TypeVar("B", bound="DataBackend")
class LocIndexer(Generic[B]):
@abstractmethod
def __getitem__(self, item: Union[int, list, slice]) -> B:
raise NotImplementedError()
class ILocIndexer(Generic[B]):
@abstractmethod
def __getitem__(self, item: Union[Any, list, slice]) -> B:
raise NotImplementedError()
class DataBackend:
@abstractmethod
def is_link(self: B) -> bool:
raise NotImplementedError()
@abstractmethod
def link_token(self: B) -> Optional[DataToken]:
raise NotImplementedError()
@abstractmethod
def to_pandas(self) -> DataFrame:
raise NotImplementedError()
@abstractproperty
def columns(self) -> list[str]:
raise NotImplementedError()
@abstractproperty
def values(self) -> np.ndarray:
raise NotImplementedError()
@abstractproperty
def dtypes(self) -> dict[str, DataType]:
raise NotImplementedError()
@abstractmethod
def cast_columns(self, column_dtypes: dict[str, type]) -> DataBackend:
raise NotImplementedError()
@abstractmethod
def to_dict(self, orient) -> dict[str, any]:
raise NotImplementedError()
@abstractproperty
def index(self) -> Index:
raise NotImplementedError()
@abstractproperty
def index_name(self) -> Union[str, list[str]]:
raise NotImplementedError()
@abstractproperty
def loc(self: B) -> LocIndexer[B]:
raise NotImplementedError()
@abstractproperty
def iloc(self: B) -> ILocIndexer[B]:
raise NotImplementedError()
@abstractmethod
def equals(self, other: Any) -> bool:
raise NotImplementedError()
@abstractmethod
def __eq__(self, other: Any) -> DataFrame:
raise NotImplementedError()
@abstractmethod
def __ne__(self, other: Any) -> DataFrame:
raise NotImplementedError()
@abstractmethod
def __gt__(self, other: Any) -> DataFrame:
raise NotImplementedError()
@abstractmethod
def __ge__(self, other: Any) -> DataFrame:
raise NotImplementedError()
@abstractmethod
def __lt__(self, other: Any) -> DataFrame:
raise NotImplementedError()
@abstractmethod
def __le__(self, other: Any) -> DataFrame:
raise NotImplementedError()
@abstractmethod
def __len__(self) -> int:
raise NotImplementedError()
@abstractmethod
def __iter__(self) -> Generator[str, None, None]:
raise NotImplementedError()
@abstractmethod
def iterrows(self) -> Generator[tuple[int, B], None, None]:
raise NotImplementedError()
@abstractmethod
def itertuples(self, ignore_index: bool = False) -> Generator[tuple, None, None]:
raise NotImplementedError()
@abstractmethod
def __getitem__(self, item: Union[str, list[bool]]) -> Any:
raise NotImplementedError()
@abstractmethod
def getitems(self, item: list[str]) -> B:
raise NotImplementedError()
@abstractmethod
def getmask(self, mask: list[bool]) -> B:
raise NotImplementedError()
@abstractmethod
def query(self, query: Query) -> B:
raise NotImplementedError()
@abstractmethod
def __setitem__(self, item: str, value: Any) -> None:
raise NotImplementedError()
@abstractmethod
def get_index(self, index_alias: IndexAlias) -> Index:
raise NotImplementedError()
@abstractmethod
def set_index(self: B, index: Union[Index, IndexAlias]) -> B:
raise NotImplementedError()
@abstractmethod
def reset_index(self: B) -> B:
raise NotImplementedError()
@abstractmethod
def append(self: B, new_backend: B, ignore_index: bool = False) -> B:
raise NotImplementedError()
@abstractmethod
def drop_indices(self: B, indices: list[int]) -> B:
raise NotImplementedError()
@abstractclassmethod
def concat(cls: Type[B], all_backends: list[B], ignore_index: bool = False) -> B:
raise NotImplementedError()
@abstractmethod
def nunique(self: B) -> int:
raise NotImplementedError()
@abstractmethod
def __str__(self: B) -> str:
raise NotImplementedError()
@abstractmethod
def __repr__(self: B) -> str:
raise NotImplementedError()
| 2.546875 | 3 |
runBowtie2.py | TaliaferroLab/AnalysisScripts | 0 | 12795800 | <gh_stars>0
#python3
import os
import subprocess
samples = ['CAD_Neurite_Rep1_S68', 'CAD_Neurite_Rep2_S69', 'CAD_Neurite_Rep3_S70', 'CAD_Neurite_Rep4_S71', 'CAD_Neurite_Rep5_S72',
'CAD_Soma_Rep1_S63', 'CAD_Soma_Rep2_S64', 'CAD_Soma_Rep3_S65', 'CAD_Soma_Rep4_S66', 'CAD_Soma_Rep5_S67',
'N2A_Neurite_Rep1_S58', 'N2A_Neurite_Rep2_S59', 'N2A_Neurite_Rep3_S60', 'N2A_Neurite_Rep4_S61', 'N2A_Neurite_Rep5_S62',
'N2A_Soma_Rep1_S53', 'N2A_Soma_Rep2_S54', 'N2A_Soma_Rep3_S55', 'N2A_Soma_Rep4_S56', 'N2A_Soma_Rep5_S57']
samplenames = ['CADNeuriteRep1', 'CADNeuriteRep2', 'CADNeuriteRep3', 'CADNeuriteRep4', 'CADNeuriteRep5',
'CADSomaRep1', 'CADSomaRep2', 'CADSomaRep3', 'CADSomaRep4', 'CADSomaRep5',
'N2ANeuriteRep1', 'N2ANeuriteRep2', 'N2ANeuriteRep3', 'N2ANeuriteRep4', 'N2ANeuriteRep5',
'N2ASomaRep1', 'N2ASomaRep2', 'N2ASomaRep3', 'N2ASomaRep4', 'N2ASomaRep5']
readdir = '/beevol/home/taliaferro/data/cisElementScreen/Fractionation/EqualRNAamount/RawReads/trimmed'
indexfile = '/beevol/home/taliaferro/data/cisElementScreen/Fractionation/EqualRNAamount/Alignments/Bowtie2Index/mm10oligos'
outputdir = '/beevol/home/taliaferro/data/cisElementScreen/Fractionation/EqualRNAamount/Alignments'
for idx, sample in enumerate(samples):
print('Aligning {0}, sample {1} of {2}...'.format(sample, idx + 1, len(samples)))
forreads = os.path.join(readdir, '{0}.R1.trimmed.fq.gz'.format(sample))
revreads = os.path.join(readdir, '{0}.R2.trimmed.fq.gz'.format(sample))
samname = os.path.join(outputdir, samplenames[idx] + '.sam')
statsout = os.path.join(outputdir, '{0}.bowtiestats.txt'.format(samplenames[idx]))
command = ['bowtie2', '-q', '--end-to-end', '--fr', '--no-discordant', '--no-unal', '-p', '8', '-x', indexfile, '-1', forreads, '-2', revreads, '-S', samname]
with open(statsout, 'w') as outfh:
subprocess.call(command, stderr = outfh) | 1.59375 | 2 |
dit/multivariate/secret_key_agreement/no_communication.py | leoalfonso/dit | 1 | 12795801 | """
Secret Key Agreement Rate when communication is not permitted.
"""
from .. import gk_common_information
from ...utils import unitful
__all__ = [
'no_communication_skar',
]
@unitful
def no_communication_skar(dist, rv_x, rv_y, rv_z, rv_mode=None):
"""
The rate at which X and Y can agree upon a key with Z eavesdropping,
and no public communication.
Parameters
----------
dist : Distribution
The distribution of interest.
rv_x : iterable
The indices to consider as the X variable, Alice.
rv_y : iterable
The indices to consider as the Y variable, Bob.
rv_z : iterable
The indices to consider as the Z variable, Eve.
rv_mode : str, None
Specifies how to interpret `rvs` and `crvs`. Valid options are:
{'indices', 'names'}. If equal to 'indices', then the elements of
`crvs` and `rvs` are interpreted as random variable indices. If
equal to 'names', the the elements are interpreted as random
variable names. If `None`, then the value of `dist._rv_mode` is
consulted, which defaults to 'indices'.
Returns
-------
skar : float
The no-communication secret key agreement rate.
"""
return gk_common_information(dist, [rv_x, rv_y], rv_z, rv_mode=rv_mode)
| 3.15625 | 3 |
scripts/auth-server.py | cazz0059/stmonitor | 6 | 12795802 | SERVER_HOST = '127.0.0.1'
SERVER_PORT = 1335
MAX_GET_REQUESTS = 10
import re, socket
import random
import string
MSG_AUTH_RE = re.compile('''^AUTH +(.+) +(.+)''')
MSG_GET_RE = re.compile('''^GET +(.+) +(.+)''')
MSG_RVK_RE = re.compile('''^RVK +(.+)''')
def serve(srv):
while 1:
print('[S] Waiting for new connections')
(s, address) = srv.accept()
print('[S] New connection from', address)
handle_connection(s)
print('[S] Closing connection')
s.close()
def handle_connection(s):
print('[S] Waiting for request')
auth = False
while (not auth):
req = s.recv(1024).decode().strip()
print('[S] Received: ' + req)
m = MSG_AUTH_RE.match(req)
if (m is not None):
letters = string.ascii_letters
token = ''.join(random.choice(letters) for i in range(20))
reply = 'SUCC ' + token
print('[S] Replying: ', reply)
s.sendall(str.encode(reply + '\n'))
auth = True
getRequests = 0
while(auth):
req = s.recv(1024).decode().strip()
print('[S] Received: ' + req)
m_get = MSG_GET_RE.match(req)
m_rvk = MSG_RVK_RE.match(req)
if (m_get is not None):
if (getRequests < MAX_GET_REQUESTS):
reply = 'RES content'
print('[S] Replying: ', reply)
s.sendall(str.encode(reply + '\n'))
getRequests += 1
else:
reply = 'TIMEOUT'
print('[S] Replying: ', reply)
s.sendall(str.encode(reply + '\n'))
auth = False
elif (m_rvk is not None):
auth = True
break
else:
print('[S] Invalid message')
if (__name__ == '__main__'):
# SERVER_PORT = int(argv[1])
print('[S] Auth server starting. Press Ctrl+C to quit')
srv = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
srv.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1) # Avoid TIME_WAIT
srv.bind((SERVER_HOST, SERVER_PORT))
print('[S] Listening on ', SERVER_HOST, SERVER_PORT)
srv.listen(8)
serve(srv)
srv.close() | 2.78125 | 3 |
utils/bundle/client.py | gravitationalwavedc/gwcloud_job_client | 0 | 12795803 | <filename>utils/bundle/client.py<gh_stars>0
"""
Adapted from https://gist.github.com/grantjenks/095de18c51fa8f118b68be80a624c45a
"""
import http.client
import socket
import xmlrpc.client
class UnixStreamHTTPConnection(http.client.HTTPConnection):
def connect(self):
self.sock = socket.socket(
socket.AF_UNIX, socket.SOCK_STREAM
)
self.sock.connect(self.host)
class UnixStreamTransport(xmlrpc.client.Transport, object):
def __init__(self, socket_path):
self.socket_path = socket_path
super().__init__()
def make_connection(self, host):
return UnixStreamHTTPConnection(self.socket_path)
class UnixStreamXMLRPCClient(xmlrpc.client.ServerProxy):
def __init__(self, addr, **kwargs):
transport = UnixStreamTransport(addr)
super().__init__(
"http://", transport=transport, **kwargs
)
| 2.15625 | 2 |
src/state.py | omegagussan/green-sally | 0 | 12795804 | #!/usr/bin/python3
import json
state_file_path = "resources/state.json"
version = 0
class StateVersionException(Exception):
pass
def _get_state():
with open(state_file_path, 'r') as state_file:
data = json.load(state_file)
if data['version'] == 0:
data.pop('version', None)
return data
else:
raise StateVersionException(f"No logic to parse state with version: {version} implemented")
def set_state(d):
dc = d.copy()
dc['version'] = version
with open(state_file_path, 'w') as state_file:
json.dump(dc, state_file, sort_keys=True, indent=4)
class State:
def __init__(self):
self.state = _get_state()
pass
def set_state_value(self, key, value):
self.state[key] = value
def get_value(self, key):
return self.state[key]
def __exit__(self, exc_type, exc_val, exc_tb):
set_state(self.state)
| 2.875 | 3 |
tests/test_utils.py | MarinFCM/rain_alert | 1 | 12795805 | from rain_alert.utils import RECEIVERS_FILE_PATH
import unittest
from .context import utils
import os
class TestGetReceivers(unittest.TestCase):
def test_no_file(self):
# set the path of the receivers to this folder because in tests folder
# we dont have the receivers file
utils.RECEIVERS_FILE_PATH = os.path.split(utils.RECEIVERS_FILE_PATH)[-1]
self.assertTrue(True)
"""
class TestGetCredentials(unittest.TestCase):
...
"""
if __name__ == '__main__':
unittest.main()
| 2.40625 | 2 |
tpop/projet1/justine.py | justinemajor/gph | 0 | 12795806 | import numpy as np
import matplotlib.pyplot as plt
noir = [71, 69, 69, 70, 73, 70, 75, 75, 74, 72, 72, 72, 67, 69, 76, 76, 77, 77, 74, 72, 79, 79, 71, 71, 75, 74, 74, 73, 72, 73, 74]
bleu = [73, 72, 73, 71, 71, 76, 77, 74, 74, 73, 82, 79, 79, 70, 72, 72, 74, 72 ,72, 70, 70, 73, 71, 73, 72, 72, 73, 72, 71, 71, 70]
orange = [73, 72, 71, 75, 72, 75, 70, 70, 70, 73, 74, 72, 73, 73, 72, 72, 72, 71, 71, 72, 69, 69, 74, 71, 71, 76, 76, 73, 73, 84, 68]
"""moyNoir = np.round(np.mean(noir), 2)
moyBleu = np.round(np.mean(bleu), 2)
moyOrange = np.round(np.mean(orange), 2)
print(moyNoir, moyBleu, moyOrange)
stdNoir = np.round(np.std(noir), 2)
stdBleu = np.round(np.std(bleu), 2)
stdOrange = np.round(np.std(orange), 2)
print(stdNoir, stdBleu, stdOrange)""" | 3.015625 | 3 |
perfil/migrations/0002_auto_20211018_0921.py | Felipe-007/Ecommerce | 0 | 12795807 | # Generated by Django 3.2.7 on 2021-10-18 12:21
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('perfil', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='perfil',
name='endereco',
field=models.CharField(max_length=50, verbose_name='Endereço'),
),
migrations.AlterField(
model_name='perfil',
name='numero',
field=models.CharField(max_length=5, verbose_name='Número'),
),
migrations.AlterField(
model_name='perfil',
name='usuario',
field=models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL, verbose_name='Usuário'),
),
]
| 1.539063 | 2 |
boardroom2.py | robscallsign/HypeMan | 11 | 12795808 | <reponame>robscallsign/HypeMan
import gspread
import json
import os
import time
from datetime import datetime
import matplotlib.pyplot as plt
import matplotlib.colors as mcolors
from matplotlib.table import Table
from matplotlib.font_manager import FontProperties
import numpy as np
import statistics
import sys, getopt
from oauth2client.service_account import ServiceAccountCredentials
#from datetime import datetime
#print ('Number of arguments:', len(sys.argv), 'arguments.')
#print ('Argument List:', str(sys.argv))
#if len(sys.argv)== 2:
# print('Argument Number 2: ', str(sys.argv[1]))
def updateDatabase(path):
if not os.path.isfile(path) or time.time() - getModificationTimeSeconds(path) > 1:
print('Updating from Google.')
updateFromGoogle()
else:
print('Less than one hour since last refresh, skipping pull from google.')
def updateFromGoogle():
try:
scope = ['https://spreadsheets.google.com/feeds','https://www.googleapis.com/auth/drive']
creds = ServiceAccountCredentials.from_json_keyfile_name('HypeManLSO-358d4493fc1d.json', scope)
client = gspread.authorize(creds)
sheet = client.open('HypeMan_LSO_Grades').sheet1
list_of_hashes = sheet.get_all_records()
with open('data.txt', 'w') as outfile:
json.dump(list_of_hashes, outfile)
except:
print('Exception thrown in updateFromGoogle')
return
print('Local HypeMan LSO grade database updated from Google Sheets.')
def getModificationTimeSeconds(path):
ct = time.time()
try:
modification_time = os.path.getmtime(path)
#print("Last modification time since the epoch:", modification_time)
except OSError:
print("Path '%s' does not exists or is inaccessible" %path)
return ct-4000
return modification_time
def calculateGradeCivilian(curList):
gradeCell = {}
gradeCell['score'] = -1
gradeCell['icon'] = ''
gradeCell['bg'] = '#FFFFFF'
pt = float(-1.0)
for i in curList:
if i['case'] == 3 and not '3' in gradeCell['icon']:
gradeCell['icon'] += '3'
if i['case'] == 2 and not '2' in gradeCell['icon']:
gradeCell['icon'] += '2'
try:
tmp = float(i['points'])
if tmp > pt:
pt = tmp
if tmp == 5 and not '5' in gradeCell['icon']:
gradeCell['icon']+= '5'
except:
pt=0
gradeCell['bg'] = colorFromPoints(pt)
gradeCell['score'] = pt
# if not gradeCell['score']:
# print('what')
return gradeCell
def colorFromPoints(g):
bluegraycolor = '#708286'
glossgraycolor = '#5F615E'
redcolor = '#ED1B24'
browncolor = '#835C3B'
orangecolor = '#d17a00'
yellowcolor = '#b6c700'
greencolor = '#0bab35'
bluecolor = '#01A2EA'
blankcell='#FFFFFF'
blackcolor = '#000000'
color = 'blankcell'
if g == -1:
color=bluegraycolor
elif g == 0:
color=blackcolor
elif g == 1:
color=redcolor
elif g == 2.0:
color=browncolor
elif g == 2.5:
color=bluecolor
elif g == 3.0:
color = yellowcolor
elif g == 4.0:
color = greencolor
elif g == 4.5:
color = greencolor
elif g == 5:
color = greencolor
elif g == 5.5:
color = bluegraycolor
else:
color = blankcell
return color
def calculateGradeTailhooker(curList):
# loop through their grades and find their FIRST wire
gradeCell = {}
gradeCell['score'] = 1
gradeCell['icon'] = ''
gradeCell['bg'] = '#FFFFFF'
pts = []
count = 0
for i in curList:
count = count + 1
#print(' Calculate grade iteration: ', count)
# skip WOFDS
if 'WOFD' in i['grade']:
continue
if not i['wire']:
#print('Empty.')
pts.append(i['points'])
else:
#print('not empty')
if not i['finalscore']:
gradeCell['score'] = i['points']
else:
gradeCell['score'] = i['finalscore']
pts.append(i['points'])
gradeCell['bg'] = colorFromPoints(min(pts))
if i['case'] == 3:
gradeCell['icon']+='3'
return gradeCell
if len(pts) == 0:
gradeCell['score'] = 1
pts.append(1)
else:
gradeCell['score'] = statistics.mean(pts)
gradeCell['bg'] = colorFromPoints(min(pts))
return gradeCell
def calculateGrade(curList, ruleset):
if ruleset == 'best':
return calculateGradeCivilian(curList)
if ruleset == 'first':
return calculateGradeTailhooker(curList)
def calculatePilotRow(data, name, ruleset):
#print(name)
boardRow = [];
uniqueDates = []
for i in reversed(data):
#grade = grade0
if name == i['pilot']:
if i['ServerDate'] not in uniqueDates:
uniqueDates.append(i['ServerDate'])
for i in uniqueDates:
#print(i)
curList = [];
for j in data:
if name == j['pilot'] and j['ServerDate'] == i:
curList.append(j)
ithPilotGrade = calculateGrade(curList,ruleset)
boardRow.append(ithPilotGrade)
# if not haveDate:
# curDate = i['ServerDate']
# haveDate = True
#
# if curDate == i['ServerDate']:
# curList.append(i)
#
# else:
# curDate = i['ServerDate']
# grade = calculateGrade(curList, grade0)
# boardRow.append(grade)
# curList = [];
# curList.append(i)
#print(boardRow)
return boardRow
def CalculateAverageScore(pilotRow):
score = 0.0
for i in pilotRow:
score = score + i['score']
finalscore = score/len(pilotRow)
#print(finalscore)
return finalscore
def plotSquadron(pilotRows, options):
#print('PlotSquadron')
maxLength = 0
for i in pilotRows:
if len(i) > maxLength:
maxLength = len(i)
if maxLength < options['maxRows']:
maxLength = options['maxRows']
fig = plt.figure(figsize=(6, 3), dpi=250)
ax = fig.add_subplot(1,1,1)
frame1 = plt.gca()
frame1.axes.get_xaxis().set_ticks([])
frame1.axes.get_yaxis().set_ticks([])
tb = Table(ax, bbox=[0, 0, 1, 1])
#tb.scale(0.25, 1)
tb.auto_set_font_size(False)
n_cols = maxLength+2
n_rows = len(pilots)+1
width, height = 100 / n_cols, 100.0 / n_rows
#height = height/10
shithot ='🎖️'
anchor='⚓'
goldstar = '⭐'
goldstar = '★'
case3 = '•'
case3= '◉'
case2 = '⊙'
case2 = '○'
#case2 = '○'
#case2 = '∘'
#unicorn='✈️'
blankcell='#FFFFFF'
#colors=['red','orange','orange','yellow','lightgreen'] #078a21
#colors=['#a00000','#835C3B','#d17a00','#b6c700','#0bab35','#057718','#057718']
colors=['#a00000','#d17a00','#d17a00','#b6c700','#0bab35','#057718','#057718']
# redcolor = '#a00000'
# browncolor = '#835C3B'
# orangecolor = '#d17a00'
# yellowcolor = '#b6c700'
# greencolor = '#0bab35'
# bluecolor = '#01A2EA'
#try:
# minDate = data[-1]['ServerDate']
# maxDate = data[0]['ServerDate']
#except:
# minDate =''
# maxDate = ''
textcolor = '#000000'
edgecolor = '#708090'
cell = tb.add_cell(0,0,10*width,height,text='Callsign',loc='center',facecolor=blankcell) #edgecolor='none'
cell.get_text().set_color(textcolor)
cell.set_text_props(fontproperties=FontProperties(weight='bold',size=8))
cell.set_edgecolor(edgecolor)
cell.set_linewidth(0.5)
#cell.set_fontsize(24)
cell = tb.add_cell(0,1,width,height,text='',loc='center',facecolor=blankcell) #edgecolor='none'
cell.get_text().set_color(textcolor)
cell.set_edgecolor(edgecolor)
cell.set_linewidth(0.5)
cell.set_text_props(fontproperties=FontProperties(weight='bold',size=8))
cell.set_edgecolor(edgecolor)
#cell.set_fontsize(24)
currentMonthSQ = datetime.now().month
titlestr = ' '+options['squadron']
count = 0
for col_idx in range(2,options['maxCols']+2):
text = ''
if count < len(titlestr):
text = titlestr[count]
count = count + 1
cell = tb.add_cell(0, col_idx, width, height,
text=text.upper(),
loc='center',
facecolor=blankcell)
cell.set_linewidth(0.5)
cell.set_edgecolor(edgecolor)
cell.get_text().set_color(textcolor)
cell.set_text_props(fontproperties=FontProperties(weight='bold',size=8))
cell.set_edgecolor(edgecolor)
#cell.set_text_props(family='')
#titlestr = 'JOW Greenie Board ' + minDate + ' to ' + maxDate
minRows = len(pilots)
if minRows < options['maxRows']:
minRows = options['maxRows']
#for p_idx in range(0,len(pilots)):
for p_idx in range(0,minRows):
row_idx = p_idx+1
rd = []
name = ''
scoreText = ''
if p_idx < len(pilots):
name = pilots[p_idx]
rd = pilotRows[name]
# avg = statistics.mean(rd)
avg = CalculateAverageScore(rd)
scoreText = round(avg,1)
cell = tb.add_cell(row_idx,0,10*width,height,text=name,loc='center',facecolor=blankcell,edgecolor='blue') #edgecolor='none'
cell.get_text().set_color(textcolor)
cell.set_text_props(fontproperties=FontProperties(weight='bold',size="7"))
cell.set_edgecolor(edgecolor)
cell.set_linewidth(0.5)
# name = pilots[p_idx];
cell = tb.add_cell(row_idx,1,width,height,text=scoreText,loc='center',facecolor=blankcell)
cell.get_text().set_color(textcolor)
cell.set_text_props(fontproperties=FontProperties(size="6.0"))
cell.set_edgecolor(edgecolor)
cell.set_linewidth(0.5)
col_idx = 2
for ij in rd:
color = ij['bg']
if not color:
color = blankcell
text = ''
if '3' in ij['icon']:
text = case3
elif '2' in ij['icon']:
text = case2
cell = tb.add_cell(row_idx,col_idx,width,height,text=text,loc='center',facecolor=color) #edgecolor='none'
cell.get_text().set_color('#333412')
cell.set_linewidth(0.5)
# cell.auto_set_font_size()
cell.set_text_props(fontproperties=FontProperties(weight='bold',size="10"))
cell.set_edgecolor(edgecolor)
col_idx = col_idx + 1
color = blankcell
text=''
# add the remaining cells to the end
for f in range(col_idx,options['maxCols']+2):
cell = tb.add_cell(row_idx,f,width,height,text=text,loc='center',facecolor=color) #edgecolor='none'
cell.set_linewidth(0.5)
cell.set_edgecolor(edgecolor)
#
#tb.set_fontsize(7)
ax.add_table(tb)
ax.set_axis_off()
ax.axis('off')
plt.box(False)
ax.get_xaxis().set_ticks([])
ax.get_yaxis().set_ticks([])
#plt.title(titlestr,color='w')
plt.savefig('board.png',transparent=False,bbox_inches='tight', pad_inches=0)
def plotDefaultBoard(pilotRows, options):
maxLength = 0
for i in pilotRows:
if len(i) > maxLength:
maxLength = len(i)
if maxLength < 17:
maxLength = 17
fig = plt.figure(dpi=150)
ax = fig.add_subplot(1,1,1)
frame1 = plt.gca()
frame1.axes.get_xaxis().set_ticks([])
frame1.axes.get_yaxis().set_ticks([])
tb = Table(ax, bbox=[0, 0, 1, 1])
tb.auto_set_font_size(False)
n_cols = maxLength+2
n_rows = len(pilots)+1
width, height = 100 / n_cols, 100.0 / n_rows
shithot ='🎖️'
anchor='⚓'
goldstar = '⭐'
goldstar = '★'
case3 = '•'
case3= '◉'
case2 = '⊙'
case2 = '○'
#case2 = '○'
#case2 = '∘'
#unicorn='✈️'
blankcell='#1A392A'
#colors=['red','orange','orange','yellow','lightgreen'] #078a21
#colors=['#a00000','#835C3B','#d17a00','#b6c700','#0bab35','#057718','#057718']
colors=['#a00000','#d17a00','#d17a00','#b6c700','#0bab35','#057718','#057718', '#708286','#5F615E']
redcolor = '#a00000'
bluegraycolor = '#708286'
glossgraycolor = '#5F615E'
browncolor = '#835C3B'
orangecolor = '#d17a00'
yellowcolor = '#b6c700'
greencolor = '#0bab35'
# try:
# minDate = data[-1]['ServerDate']
# maxDate = data[0]['ServerDate']
# except:
# minDate =''
# maxDate = ''
textcolor = '#FFFFF0'
edgecolor = '#708090'
cell = tb.add_cell(0,0,8*width,height,text='Callsign',loc='center',facecolor=blankcell) #edgecolor='none'
cell.get_text().set_color(textcolor)
cell.set_text_props(fontproperties=FontProperties(weight='bold',size=8))
cell.set_edgecolor(edgecolor)
#cell.set_fontsize(24)
cell = tb.add_cell(0,1,2*width,height,text='',loc='center',facecolor=blankcell) #edgecolor='none'
cell.get_text().set_color(textcolor)
cell.set_edgecolor(edgecolor)
cell.set_text_props(fontproperties=FontProperties(weight='bold',size=8))
cell.set_edgecolor(edgecolor)
#cell.set_fontsize(24)
currentMonth = datetime.now().month
titlestr = ' JOINT OPS WING' #+ str(currentMonth) + '/' + str(datetime.now().year)
print(titlestr)
count = 0
for col_idx in range(2,maxLength+2):
text = ''
if count < len(titlestr):
text = titlestr[count]
count = count + 1
cell = tb.add_cell(0, col_idx, width, height,
text=text,
loc='center',
facecolor=blankcell)
cell.set_edgecolor(edgecolor)
cell.get_text().set_color(textcolor)
cell.set_text_props(fontproperties=FontProperties(weight='bold',size=8))
cell.set_edgecolor(edgecolor)
#cell.set_text_props(family='')
# titlestr = 'JOW Greenie Board ' + minDate + ' to ' + maxDate
minRows = len(pilots)
if minRows < 12:
minRows = 12
#for p_idx in range(0,len(pilots)):
for p_idx in range(0,minRows):
row_idx = p_idx+1
rd = []
name = ''
scoreText = ''
if p_idx < len(pilots):
name = pilots[p_idx]
rd = pilotRows[name]
#avg = statistics.mean(rd)
avg = CalculateAverageScore(rd)
scoreText = round(avg,1)
if name.lower() == 'eese':
name = "SippyCup"
cell = tb.add_cell(row_idx,0,8*width,height,text=name,loc='center',facecolor=blankcell,edgecolor='blue') #edgecolor='none'
cell.get_text().set_color(textcolor)
cell.set_text_props(fontproperties=FontProperties(weight='bold',size="7.5"))
cell.set_edgecolor(edgecolor)
# name = pilots[p_idx];
cell = tb.add_cell(row_idx,1,2*width,height,text=scoreText,loc='center',facecolor=blankcell)
cell.get_text().set_color(textcolor)
cell.set_text_props(fontproperties=FontProperties(weight='bold',size="7.4"))
cell.set_edgecolor(edgecolor)
col_idx = 2
for g in rd:
color = g['bg']
text = ''
if '5.5' in g['icon']:
text = shithot
elif '3' in g['icon'] and '5' in g['icon']:
text = goldstar
elif '3' in g['icon']:
text = case3
elif '5' in g['icon']:
text = anchor
elif '2' in g['icon']:
text = case2
cell = tb.add_cell(row_idx,col_idx,width,height,text=text,loc='center',facecolor=color) #edgecolor='none'
cell.get_text().set_color('#333412')
# cell.auto_set_font_size()
cell.set_text_props(fontproperties=FontProperties(weight='bold',size="14"))
cell.set_edgecolor(edgecolor)
col_idx = col_idx + 1
color = blankcell
text=''
# add the remaining cells to the end
for f in range(col_idx,maxLength+2):
cell = tb.add_cell(row_idx,f,width,height,text=text,loc='center',facecolor=color) #edgecolor='none'
cell.set_edgecolor(edgecolor)
#tb.set_fontsize(7)
ax.add_table(tb)
ax.set_axis_off()
ax.axis('off')
plt.box(False)
ax.get_xaxis().set_ticks([])
ax.get_yaxis().set_ticks([])
#plt.title(titlestr,color='w')
plt.savefig('board.png',transparent=False,bbox_inches='tight', pad_inches=0)
# set defaults
airframe = ''
squadron = ''
ruleset = 'best'
#print('Length of argv: ' , len(sys.argv));
if len(sys.argv) >= 2:
if str(sys.argv[1]) == 'turkey':
airframe = ['F-14B', 'F-14A-135-GR']
elif str(sys.argv[1]) == 'hornet':
airframe = 'FA-18C_hornet'
elif str(sys.argv[1]) == 'scooter':
airframe = 'A-4E-C'
elif str(sys.argv[1]) == 'harrier':
airframe = 'AV8BNA'
elif str(sys.argv[1]) == 'goshawk':
airframe = 'T-45'
print('Aircraft: ', airframe)
if len(sys.argv) >= 3:
ruleset = str(sys.argv[2])
if len(sys.argv) >= 4:
squadron = str(sys.argv[3]);
print('Squadron: ', squadron)
print('Ruleset: ', ruleset)
lsoData = 'data.txt'
updateDatabase(lsoData)
with open('data.txt') as json_file:
data = json.load(json_file)
# go through and keep only a specified airframe
data2 = data
print('... size of data array: ' , str(len(data)))
count = 0
if airframe != '':
data2 = []
print('Keeping only grades for airframe: ', airframe)
for i in data:
# if i['airframe']
#if i['airframe'] == airframe:
if i['airframe'] in airframe:
data2.append(i)
# print('Deleting airframe: ', i['airframe'], ' was looking for: ' , airframe)
# data.remove(i)
count = count + 1
print('Number of rows kept: ', str(count))
data = data2
print('size of data array: ' , str(len(data)))
count = 0
if squadron != '':
data2 = []
print('Searching for squadron: ' , squadron)
for i in data:
name = i['pilot']
#print('Name: ' , name)
name = name.replace('-', '')
name = name.replace('_', '')
name = name.replace('[', '')
name = name.replace(']', '')
name = name.replace('|', '')
name = name.replace('\\', '')
name = name.replace('/', '')
name = name.replace('@', '')
name = name.lower()
index = name.find(squadron)
if index != -1:
data2.append(i)
count = count + 1;
#print('Keeping in squadron: ' , name)
# name = name.replace(squadron,'')
# if the squadron was empty just keep the original data
data = data2
data2 = []
print('Skipping WOFDs')
for i in data:
if not'WOFD' in i['grade']:
data2.append(i)
data = data2
print('Number remaining: ', str(len(data)))
pilots = []
pilotRows = {}
pilotDict = {}
# get the rows as they will appear in our Greenie Board
# set the default grade
#grade0={}; grade0['color']='white'; grade0['score']=0.0; grade0['symbol']='x'; grade0['grade']='--'
# if squadron is empty then lets trim the landings not in the current month
data2 = []
if squadron == '':
currentMonth = datetime.now().month
print('skipping landings not in current month')
for i in data:
#print(i)
idate = i['ServerDate'].split('/')
imonth = int(idate[1])
if imonth == currentMonth:
data2.append(i)
data = data2
if squadron != '':
currentMonthSQ = datetime.now().month
print('skipping landings not in current month')
for i in data:
#print(i)
idate = i['ServerDate'].split('/')
imonth = int(idate[1])
if imonth == currentMonthSQ:
data2.append(i)
data = data2
for i in reversed(data):
name = i['pilot']
if name not in pilots:
pilots.append(name)
pilotRow = calculatePilotRow(data, name, ruleset)
#print(name,' score: ' , pilotRow)
pilotRows[name] = (pilotRow)
# pilotDict[name]=pilotRow
options = {}
if squadron == '':
plotDefaultBoard(pilotRows, options)
else:
options['airframe'] = airframe
options['squadron'] = squadron
options['ruleset'] = ruleset
options['maxRows']=10
options['maxCols']=17
plotSquadron(pilotRows, options)
print('done') | 2.71875 | 3 |
cloudmesh/secchi/tensorflow/preprocessing/generate_tfrecord.py | cloudmesh/cloudmesh-secchi | 0 | 12795809 | <filename>cloudmesh/secchi/tensorflow/preprocessing/generate_tfrecord.py
import os
import io
import pandas as pd
import tensorflow as tf
import sys
sys.path.append("../../models/research")
from PIL import Image
from cloudmesh.secchi.tensorflow.utils_tf import dataset_util
from collections import namedtuple, OrderedDict
from cloudmesh.common.util import path_expand
class GenTF:
def __init__(self, str):
if str == 'train':
self.csv_input = os.path.join(path_expand('~/.cloudmesh/secchi/annotations'), 'train_labels.csv')
else:
self.csv_input = os.path.join(path_expand('~/.cloudmesh/secchi/annotations'), 'test_labels.csv')
self.img_path = os.path.join(path_expand('~/.cloudmesh/secchi/images'), str)
self.output_path = os.path.join(path_expand('~/.cloudmesh/secchi/annotations'), f"{str}.record")
self.label = 'disc'
def create(self):
writer = tf.io.TFRecordWriter(self.output_path)
print("csv_input: ", self.csv_input)
examples = pd.read_csv(self.csv_input)
grouped = self.split(examples, 'filename')
for group in grouped:
tf_example = self.create_tf_example(group, self.img_path)
writer.write(tf_example.SerializeToString())
writer.close()
print('Successfully created the TFRecords: {}'.format(self.output_path))
def class_text_to_int(self, row_label):
if row_label == self.label: # 'ship':
return 1
else:
None
def split(self, df, group):
data = namedtuple('data', ['filename', 'object'])
gb = df.groupby(group)
return [data(filename, gb.get_group(x)) for filename, x in zip(gb.groups.keys(), gb.groups)]
def create_tf_example(self, group, path):
with tf.io.gfile.GFile(os.path.join(path, '{}'.format(group.filename)), 'rb') as fid:
encoded_jpg = fid.read()
encoded_jpg_io = io.BytesIO(encoded_jpg)
image = Image.open(encoded_jpg_io)
width, height = image.size
filename = group.filename.encode('utf8')
image_format = b'jpg'
xmins = []
xmaxs = []
ymins = []
ymaxs = []
classes_text = []
classes = []
for index, row in group.object.iterrows():
xmins.append(row['xmin'] / width)
xmaxs.append(row['xmax'] / width)
ymins.append(row['ymin'] / height)
ymaxs.append(row['ymax'] / height)
classes_text.append(row['class'].encode('utf8'))
classes.append(self.class_text_to_int(row['class']))
tf_example = tf.train.Example(features=tf.train.Features(feature={
'image/height': dataset_util.int64_feature(height),
'image/width': dataset_util.int64_feature(width),
'image/filename': dataset_util.bytes_feature(filename),
'image/source_id': dataset_util.bytes_feature(filename),
'image/encoded': dataset_util.bytes_feature(encoded_jpg),
'image/format': dataset_util.bytes_feature(image_format),
'image/object/bbox/xmin': dataset_util.float_list_feature(xmins),
'image/object/bbox/xmax': dataset_util.float_list_feature(xmaxs),
'image/object/bbox/ymin': dataset_util.float_list_feature(ymins),
'image/object/bbox/ymax': dataset_util.float_list_feature(ymaxs),
'image/object/class/text': dataset_util.bytes_list_feature(classes_text),
'image/object/class/label': dataset_util.int64_list_feature(classes),
}))
return tf_example
| 2.5 | 2 |
robustgp_experiments/demo1d.py | markvdw/RobustGP | 16 | 12795810 | import gpflow
import matplotlib.pyplot as plt
import numpy as np
from robustgp import ConditionalVariance
X = np.random.rand(150, 1)
Y = 0.8 * np.cos(10 * X) + 1.2 * np.sin(8 * X + 0.3) + np.cos(17 * X) * 1.2 + np.random.randn(*X.shape) * 0.1
gpr = gpflow.models.GPR((X, Y), gpflow.kernels.SquaredExponential())
opt = gpflow.optimizers.Scipy()
opt_logs = opt.minimize(gpr.training_loss, gpr.trainable_variables, options=dict(maxiter=100))
k = gpflow.kernels.SquaredExponential()
gpflow.utilities.multiple_assign(k, gpflow.utilities.read_values(gpr.kernel))
Z_initer = ConditionalVariance()
sp = gpflow.models.SGPR((X, Y), k, Z_initer.compute_initialisation(X, 6, k)[0])
gpflow.utilities.multiple_assign(sp, gpflow.utilities.read_values(gpr))
pX = np.linspace(0, 1, 3000)[:, None]
m, v = sp.predict_f(pX)
ipm, _ = sp.predict_f(sp.inducing_variable.Z.value())
fig, (ax1, ax2) = plt.subplots(2, 1)
ax1.plot(X, Y, 'x')
ax1.plot(pX, m)
ax1.plot(sp.inducing_variable.Z.value(), ipm, 'o', color='C3')
deviation = (2 * (v + sp.likelihood.variance.value()) ** 0.5).numpy().flatten()
ax1.fill_between(pX.flatten(), m.numpy().flatten() - deviation, m.numpy().flatten() + deviation, alpha=0.3)
ax1.axvline(pX[np.argmax(v)].item(), color='C2')
ax1.set_ylabel("y")
ax2.plot(pX, v ** 0.5)
ax2.plot(sp.inducing_variable.Z.value(), sp.inducing_variable.Z.value() * 0.0, 'o', color='C3')
ax2.axvline(pX[np.argmax(v)].item(), color='C2')
ax2.set_xlabel("input $x$")
ax2.set_ylabel("$\mathbb{V}\,[p(f(x) | \mathbf{u}]^{0.5}$")
plt.show()
| 2.125 | 2 |
main.py | patrickhisnibrataas/gitlab-to-github-migration | 0 | 12795811 | import sys
import time
from src.github import Github
from src.gitlab import Gitlab
def exit(message):
print(message)
sys.exit()
if __name__ == "__main__":
# Get all gitlab repositories
gitlab = Gitlab()
gitlab_repos = gitlab.repositories()
if gitlab_repos == None:
exit('Not able to retreive gitlab repositories')
elif gitlab_repos == dict():
exit('Zero repositories was fetched from the gitlab account')
print ('Gitlab repositories found: ' + str(len(gitlab_repos)))
# Get all github repositories
github = Github()
github_repos = github.repositories()
if github_repos == None:
exit('Not able to retreive github repositories')
print ('Github repositories found: ' + str(len(github_repos)))
# Skip repositories that already exists on github
for key in github_repos.keys():
alternativeKey = str(key).replace('-', ' ')
if key in gitlab_repos.keys():
gitlab_repos.pop(key)
print(f'Repository "{key}" already exsists on Github and will not be exported from gitlab')
if alternativeKey in gitlab_repos.keys():
gitlab_repos.pop(alternativeKey)
print(f'Repository "{alternativeKey}" already exsists on Github and will not be exported from gitlab')
for name, url in gitlab_repos.items():
name = str(name).replace(' ', '-')
print(f'Starting import of repository: {name}')
# Create repository that does not exist
if github.repositoryCreate(name, '') == None:
print(f'Unable to create repository: {name}')
continue
# Start import to repository
if github.importStart(url, name) == None:
exit(f'Unable to start import of "{url}" to github repo named "{name}"')
# Check if import is done
status = ''
previousStatus = ''
finishedStatus = [
'complete',
'auth_failed',
'error',
'detection_needs_auth',
'detection_found_nothing',
'detection_found_multiple',
None
]
while status not in finishedStatus:
status = github.importStatus(name)
if previousStatus != status:
print(f'Status: {status}')
previousStatus = status
if status == 'importing':
# Enable transfer of git lfs files
if github.getLargeFiles(name) == None:
exit(f'Unable to get list of git lfs files in repo: {name}')
if github.lfsPreference(name) == None:
exit(f'Unable to set git lfs preference on: {name}')
time.sleep(1)
if status != 'complete':
exit(f'Import of "{name}" to Github finished with status: {status}')
print(f'Import of "{name}" to Github finished with status: {status}')
| 2.90625 | 3 |
opennmt/layers/common.py | dblandan/OpenNMT-tf | 2 | 12795812 | <reponame>dblandan/OpenNMT-tf
"""Defines common layers."""
import tensorflow as tf
from tensorflow.python.framework import function
@function.Defun(
python_grad_func=lambda x, dy: tf.convert_to_tensor(dy),
shape_func=lambda op: [op.inputs[0].get_shape()])
def convert_gradient_to_tensor(x):
"""Wraps :obj:`x` to convert its gradient to a tensor."""
return x
def embedding_lookup(params, ids):
"""Wrapper around ``tf.nn.embedding_lookup``.
This converts gradients of the embedding variable to tensors which allows
to use of optimizers that don't support sparse gradients (e.g. Adafactor).
Args:
params: The embedding tensor.
ids: The ids to lookup in :obj:`params`.
Returns:
A ``tf.Tensor``, the embeddings that correspond to :obj:`ids`.
"""
params = convert_gradient_to_tensor(params)
return tf.nn.embedding_lookup(params, ids)
| 2.5625 | 3 |
app/api/view.py | saury2013/Memento | 0 | 12795813 | # -*- coding: utf-8 -*-
from flask import make_response, request, jsonify
from flask_login import login_required
import json
from werkzeug.utils import secure_filename
import os
from app.models.fragment import Fragment
from app.models.branch import Branch
from app.models.tag import Tag
from app.api import api
from app.whoosh import search_helper
from app import base_dir
UPLOAD_FOLDER = 'static/resource/uploads/image/'
ALLOWED_EXTENSIONS = set(['bmp', 'webp', 'png', 'jpg', 'jpeg', 'gif'])
@api.route("/add_tag/", methods=['POST'])
@login_required
def add_tag():
# name = request.args.get('name', 0, type=int)
response = {"status": 500, "msg": "name is Null!"}
name = request.form['name']
if name != "":
tag = Tag.add(name)
if tag:
res = {"id": tag.id, "name": tag.name}
response['tag'] = res
response["status"] = 200
else:
response["msg"] = "tag has already exists!"
return make_response(json.dumps(response))
@api.route("/add_branch/", methods=['POST'])
@login_required
def add_branch():
response = {"status": 500, "msg": "name is Null!"}
name = request.form['name']
parent_id = request.form['parent']
if name != "":
branch = Branch.add(name, parent_id=parent_id)
if branch:
res = {"id": branch.id, "name": branch.name}
response['branch'] = res
response["status"] = 200
else:
response["msg"] = "branch has already exists!"
return make_response(json.dumps(response))
@api.route("/search/<string:keyword>")
def search(keyword):
res = search_helper.search(keyword)
data = {}
data["result"] = res
return jsonify(data)
def allowed_file(filename):
return '.' in filename and \
filename.rsplit('.', 1)[1].lower() in ALLOWED_EXTENSIONS
@api.route("/upload_image/",methods=['POST'])
def upload_image():
result = {
"success" : 0,
"message" : "",
"url" : ""
}
if request.method == "POST":
print(request.files)
if 'editormd-image-file' not in request.files:
result["message"] = "No file part"
return jsonify(result)
file = request.files['editormd-image-file']
if file.filename == '':
result["message"] = "No selected file"
return jsonify(result)
if file and allowed_file(file.filename):
filename = secure_filename(file.filename)
save_path = os.path.join(base_dir,UPLOAD_FOLDER,filename)
file.save(save_path)
result["success"] = 1
result["message"] = "Success"
result["url"] = "/"+ UPLOAD_FOLDER + filename
return jsonify(result)
| 2.3125 | 2 |
updi/link.py | leonerd/pyupdi | 197 | 12795814 | <reponame>leonerd/pyupdi
"""
Link layer in UPDI protocol stack
"""
import logging
import time
from updi.physical import UpdiPhysical
import updi.constants as constants
class UpdiDatalink(object):
"""
UPDI data link class handles the UPDI data protocol within the device
"""
def __init__(self, comport, baud):
self.logger = logging.getLogger("link")
# Create a UPDI physical connection
self.use24bit=False
self.updi_phy = UpdiPhysical(comport, baud)
# Initialise
self.init()
# Check
if not self.check():
# Send double break if all is not well, and re-check
self.updi_phy.send_double_break()
self.init()
if not self.check():
raise Exception("UPDI initialisation failed")
def set_24bit_updi(self, mode):
self.logger.info("Using 24-bit updi")
self.use24bit = mode
def init(self):
"""
Set the inter-byte delay bit and disable collision detection
"""
self.stcs(constants.UPDI_CS_CTRLB, 1 << constants.UPDI_CTRLB_CCDETDIS_BIT)
self.stcs(constants.UPDI_CS_CTRLA, 1 << constants.UPDI_CTRLA_IBDLY_BIT)
def check(self):
"""
Check UPDI by loading CS STATUSA
"""
if self.ldcs(constants.UPDI_CS_STATUSA) != 0:
self.logger.info("UPDI init OK")
return True
self.logger.info("UPDI not OK - reinitialisation required")
return False
def ldcs(self, address):
"""
Load data from Control/Status space
"""
self.logger.info("LDCS from 0x{0:02X}".format(address))
self.updi_phy.send([constants.UPDI_PHY_SYNC, constants.UPDI_LDCS | (address & 0x0F)])
response = self.updi_phy.receive(1)
if len(response) != 1:
# Todo - flag error
return 0x00
return response[0]
def stcs(self, address, value):
"""
Store a value to Control/Status space
"""
self.logger.info("STCS 0x{0:02X} to 0x{1:02X}".format(value, address))
self.updi_phy.send([constants.UPDI_PHY_SYNC, constants.UPDI_STCS | (address & 0x0F), value])
def ld(self, address):
"""
Load a single byte direct from a 16/24-bit address
"""
self.logger.info("LD from 0x{0:06X}".format(address))
if self.use24bit:
self.updi_phy.send(
[constants.UPDI_PHY_SYNC, constants.UPDI_LDS | constants.UPDI_ADDRESS_24 | constants.UPDI_DATA_8,
address & 0xFF, (address >> 8) & 0xFF, (address >> 16) & 0xFF])
else:
self.updi_phy.send(
[constants.UPDI_PHY_SYNC, constants.UPDI_LDS | constants.UPDI_ADDRESS_16 | constants.UPDI_DATA_8,
address & 0xFF, (address >> 8) & 0xFF])
return self.updi_phy.receive(1)[0]
def ld16(self, address):
"""
Load a 16-bit word directly from a 16/24-bit address
"""
self.logger.info("LD from 0x{0:06X}".format(address))
if self.use24bit:
self.updi_phy.send(
[constants.UPDI_PHY_SYNC, constants.UPDI_LDS | constants.UPDI_ADDRESS_24 | constants.UPDI_DATA_16,
address & 0xFF, (address >> 8) & 0xFF, (address >> 16) & 0xFF])
else:
self.updi_phy.send(
[constants.UPDI_PHY_SYNC, constants.UPDI_LDS | constants.UPDI_ADDRESS_16 | constants.UPDI_DATA_16,
address & 0xFF, (address >> 8) & 0xFF])
return self.updi_phy.receive(2)
def st(self, address, value):
"""
Store a single byte value directly to a 16/24-bit address
"""
self.logger.info("ST to 0x{0:06X}".format(address))
if self.use24bit:
self.updi_phy.send(
[constants.UPDI_PHY_SYNC, constants.UPDI_STS | constants.UPDI_ADDRESS_24 | constants.UPDI_DATA_8,
address & 0xFF, (address >> 8) & 0xFF, (address >> 16) & 0xFF])
else:
self.updi_phy.send(
[constants.UPDI_PHY_SYNC, constants.UPDI_STS | constants.UPDI_ADDRESS_16 | constants.UPDI_DATA_8,
address & 0xFF, (address >> 8) & 0xFF])
response = self.updi_phy.receive(1)
if len(response) != 1 or response[0] != constants.UPDI_PHY_ACK:
raise Exception("Error with st")
self.updi_phy.send([value & 0xFF])
response = self.updi_phy.receive(1)
if len(response) != 1 or response[0] != constants.UPDI_PHY_ACK:
raise Exception("Error with st")
def st16(self, address, value):
"""
Store a 16-bit word value directly to a 16/24-bit address
"""
self.logger.info("ST to 0x{0:06X}".format(address))
if self.use24bit:
self.updi_phy.send(
[constants.UPDI_PHY_SYNC, constants.UPDI_STS | constants.UPDI_ADDRESS_24 | constants.UPDI_DATA_16,
address & 0xFF, (address >> 8) & 0xFF, (address >> 16) & 0xFF])
else:
self.updi_phy.send(
[constants.UPDI_PHY_SYNC, constants.UPDI_STS | constants.UPDI_ADDRESS_16 | constants.UPDI_DATA_16,
address & 0xFF, (address >> 8) & 0xFF])
response = self.updi_phy.receive(1)
if len(response) != 1 or response[0] != constants.UPDI_PHY_ACK:
raise Exception("Error with st")
self.updi_phy.send([value & 0xFF, (value >> 8) & 0xFF])
response = self.updi_phy.receive(1)
if len(response) != 1 or response[0] != constants.UPDI_PHY_ACK:
raise Exception("Error with st")
def ld_ptr_inc(self, size):
"""
Loads a number of bytes from the pointer location with pointer post-increment
"""
self.logger.info("LD8 from ptr++")
self.updi_phy.send([constants.UPDI_PHY_SYNC, constants.UPDI_LD | constants.UPDI_PTR_INC |
constants.UPDI_DATA_8])
return self.updi_phy.receive(size)
def ld_ptr_inc16(self, words):
"""
Load a 16-bit word value from the pointer location with pointer post-increment
"""
self.logger.info("LD16 from ptr++")
self.updi_phy.send([constants.UPDI_PHY_SYNC, constants.UPDI_LD | constants.UPDI_PTR_INC |
constants.UPDI_DATA_16])
return self.updi_phy.receive(words << 1)
def st_ptr(self, address):
"""
Set the pointer location
"""
self.logger.info("ST to ptr")
if self.use24bit:
self.updi_phy.send(
[constants.UPDI_PHY_SYNC, constants.UPDI_ST | constants.UPDI_PTR_ADDRESS | constants.UPDI_DATA_24,
address & 0xFF, (address >> 8) & 0xFF, (address >> 16) & 0xFF])
else:
self.updi_phy.send(
[constants.UPDI_PHY_SYNC, constants.UPDI_ST | constants.UPDI_PTR_ADDRESS | constants.UPDI_DATA_16,
address & 0xFF, (address >> 8) & 0xFF])
response = self.updi_phy.receive(1)
if len(response) != 1 or response[0] != constants.UPDI_PHY_ACK:
raise Exception("Error with st_ptr")
def st_ptr_inc(self, data):
"""
Store data to the pointer location with pointer post-increment
"""
self.logger.info("ST8 to *ptr++")
self.updi_phy.send([constants.UPDI_PHY_SYNC, constants.UPDI_ST | constants.UPDI_PTR_INC | constants.UPDI_DATA_8,
data[0]])
response = self.updi_phy.receive(1)
if len(response) != 1 or response[0] != constants.UPDI_PHY_ACK:
raise Exception("ACK error with st_ptr_inc")
n = 1
while n < len(data):
self.updi_phy.send([data[n]])
response = self.updi_phy.receive(1)
if len(response) != 1 or response[0] != constants.UPDI_PHY_ACK:
raise Exception("Error with st_ptr_inc")
n += 1
def st_ptr_inc16(self, data):
"""
Store a 16-bit word value to the pointer location with pointer post-increment
Disable acks when we do this, to reduce latency.
"""
self.logger.info("ST16 to *ptr++")
ctrla_ackon = 1 << constants.UPDI_CTRLA_IBDLY_BIT # with acks enabled.
ctrla_ackoff = ctrla_ackon | (1 << constants.UPDI_CTRLA_RSD_BIT) # acks off. (RSD)
# (Response signature disable)
self.stcs(constants.UPDI_CS_CTRLA, ctrla_ackoff)
self.updi_phy.send([constants.UPDI_PHY_SYNC, constants.UPDI_ST | constants.UPDI_PTR_INC |
constants.UPDI_DATA_16] )
self.updi_phy.send(data) # No response expected.
# Re-enable acks
self.stcs(constants.UPDI_CS_CTRLA, ctrla_ackon)
def repeat(self, repeats):
"""
Store a value to the repeat counter
"""
if (repeats - 1) > constants.UPDI_MAX_REPEAT_SIZE:
raise Exception("Invalid repeat count!")
self.logger.info("Repeat {0:d}".format(repeats))
repeats -= 1
self.updi_phy.send([constants.UPDI_PHY_SYNC, constants.UPDI_REPEAT | constants.UPDI_REPEAT_BYTE,
repeats & 0xFF])
def read_sib(self):
"""
Read the SIB
"""
return self.updi_phy.sib()
def key(self, size, key):
"""
Write a key
"""
self.logger.info("Writing key")
if len(key) != 8 << size:
raise Exception("Invalid KEY length!")
self.updi_phy.send([constants.UPDI_PHY_SYNC, constants.UPDI_KEY | constants.UPDI_KEY_KEY | size])
self.updi_phy.send(list(reversed(list(key))))
| 2.71875 | 3 |
lib/launch_scripts/ARCHER/ptf_worker.py | melver/logan | 0 | 12795815 | <filename>lib/launch_scripts/ARCHER/ptf_worker.py
#!/usr/bin/env python2
# Needs to run with ARCHER's Python version.
"""
Task-farm worker script.
"""
import sys
import json
import subprocess
def main(argv):
json_path = argv[1]
parallel_workers = int(argv[2])
mpi_rank = int(argv[3]) - 1
if mpi_rank < 0 or mpi_rank >= parallel_workers:
raise Exception("Unexpected rank! {}".format(mpi_rank))
with open(json_path, 'r') as json_file:
processes = json.load(json_file)
for i in range(mpi_rank, len(processes), parallel_workers):
cmd = processes[i]['cmd']
tid = processes[i]['tid']
stdout_path = processes[i]['stdout']
stderr_path = processes[i]['stderr']
stdout_handle = open(stdout_path, 'wb')
stderr_handle = open(stderr_path, 'wb')
proc = subprocess.Popen(cmd, stdout=stdout_handle, stderr=stderr_handle)
if proc.wait() != 0:
print("WARNING: Process with tid {} exit with code {}!".format(
tid, proc.poll()))
stdout_handle.close()
stderr_handle.close()
# Does ptf run it so that __name__ == "__main__" ??
sys.exit(main(sys.argv))
| 2.421875 | 2 |
06_Alistirma/10_Cozunurluk_Ayarlama(SetTheResolution).py | REISOGLU53/OpenCV-Python | 2 | 12795816 | <reponame>REISOGLU53/OpenCV-Python
import cv2
cv2.namedWindow("LiveCam")
cap = cv2.VideoCapture(0, cv2.CAP_DSHOW)
print("weigth:"+str(cap.get(3)))
print("height:"+str(cap.get(4)))
cap.set(3, 800)
cap.set(4, 720)
print("weigth*:"+str(cap.get(3)))
print("height*:"+str(cap.get(4)))
while True:
ret, frame = cap.read()
if ret == 0: break
frame = cv2.flip(frame, 1)
cv2.imshow("LiveCam", frame)
if cv2.waitKey(1) & 0xFF==ord("q"):break
cap.release()
cv2.destroyAllWindows()
| 3.0625 | 3 |
demo.py | vietbt/EVRPpp | 5 | 12795817 | from env.evrp import EVRPEnv
from utils.config import read_config
import numpy as np
from env.worker import VectorizedEVRP
from utils.plot import convert_plt_to_numpy, convert_plt_to_tf, plot_tours
from utils import try_import_tensorflow
tf = try_import_tensorflow()
if __name__ == "__main__":
config = read_config("config.json")
env = EVRPEnv(config.env_config)
tours = [[1, 69, 85, 129, 119, 55, 13, 90, 66, 46, 62, 65, 45, 12, 101, 75, 82, 63, 97, 146, 19, 91, 26, 128], [1, 37, 17, 33, 8, 20, 86, 29, 134, 146, 48, 126, 34, 105, 133, 24, 22, 124, 84, 57, 92, 36, 98], [1, 116, 125, 49, 59, 47, 31, 122, 145, 88, 99, 32, 7, 80, 61, 112, 2, 108, 6, 42, 94, 117, 137], [1, 79, 87, 9, 73, 103, 54, 111, 115, 44, 3, 18, 127], [1, 121, 143, 56, 138, 83, 15, 38, 123, 78, 23, 120, 81, 141, 147, 16, 28, 107, 25, 67, 100, 74, 89, 72, 10, 131], [1, 14, 110, 144, 51, 52, 39, 104, 64, 113, 27, 76, 114, 11, 135, 93, 109, 118, 102, 21, 53, 41, 71, 5], [1, 50, 58, 96, 140, 147, 142, 132, 70, 40, 30, 43, 95, 4, 77, 130, 106, 139, 35, 68, 136, 60]]
plt = plot_tours(env, tours, 123)
plt.show()
| 1.859375 | 2 |
e9/9.py | neutronest/eulerproject-douby | 4 | 12795818 | for i in xrange(1000):
for j in xrange(1000 - i):
k = 1000 - i - j
if i and j and k and i * i + j * j == k * k:
print i, j, k
print i * j * k
| 2.609375 | 3 |
lib/googlecloudsdk/command_lib/resource_manager/tag_arguments.py | google-cloud-sdk-unofficial/google-cloud-sdk | 2 | 12795819 | # -*- coding: utf-8 -*- #
# Copyright 2019 Google LLC. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Utilities for defining CRM Tag arguments on a parser."""
from __future__ import absolute_import
from __future__ import division
from __future__ import unicode_literals
from googlecloudsdk.calliope import arg_parsers
from googlecloudsdk.calliope import base
def AddShortNameArgToParser(parser):
"""Adds positional argument to parser.
Args:
parser: ArgumentInterceptor, an argparse parser.
"""
parser.add_argument(
"short_name",
metavar="SHORT_NAME",
help=("User specified, friendly name of the TagKey or TagValue. The field"
" must be 1-63 characters, beginning and ending with an "
"alphanumeric character ([a-z0-9A-Z]) with dashes (-), "
"underscores ( _ ), dots (.), and alphanumerics between. "))
def AddParentArgToParser(parser, required=True, message=""):
"""Adds argument for the TagKey or TagValue's parent to the parser.
Args:
parser: ArgumentInterceptor, An argparse parser.
required: Boolean, to enforce --parent as a required flag.
message: String, replacement help text for flag.
"""
parser.add_argument(
"--parent",
metavar="PARENT",
required=required,
help=message if message else ("Parent of the resource."))
def AddDescriptionArgToParser(parser):
"""Adds argument for the TagKey's or TagValue's description to the parser.
Args:
parser: ArgumentInterceptor, An argparse parser.
"""
parser.add_argument(
"--description",
metavar="DESCRIPTION",
help=("User-assigned description of the TagKey or TagValue. "
"Must not exceed 256 characters."))
def AddPurposeArgToParser(parser):
"""Adds argument for the TagKey's purpose to the parser.
Args:
parser: ArgumentInterceptor, An argparse parser.
"""
parser.add_argument(
"--purpose",
metavar="PURPOSE",
choices=["GCE_FIREWALL"],
help=("Purpose specifier of the TagKey that can only be set on creation. "
"Specifying this field adds additional validation from the policy "
"system that corresponds to the purpose."))
def AddPurposeDataArgToParser(parser):
"""Adds argument for the TagKey's purpose data to the parser.
Args:
parser: ArgumentInterceptor, An argparse parser.
"""
parser.add_argument(
"--purpose-data",
type=arg_parsers.ArgDict(
spec={"network": str},
max_length=1,
),
help=("Purpose data of the TagKey that can only be set on creation. "
"This data is validated by the policy system that corresponds"
" to the purpose."))
def AddAsyncArgToParser(parser):
"""Adds async flag to the parser.
Args:
parser: ArgumentInterceptor, An argparse parser.
"""
base.ASYNC_FLAG.AddToParser(parser)
def AddResourceNameArgToParser(parser):
"""Adds resource name argument for the namespaced name or resource name to the parser.
Args:
parser: ArgumentInterceptor, An argparse parser.
"""
parser.add_argument(
"RESOURCE_NAME",
metavar="RESOURCE_NAME",
help=("Resource name or namespaced name. The resource name should "
"be in the form {resource_type}/{numeric_id}. The namespaced name "
"should be in the form {org_id}/{short_name} where short_name "
"must be 1-63 characters, beginning and ending with an "
"alphanumeric character ([a-z0-9A-Z]) with dashes (-), underscores "
"( _ ), dots (.), and alphanumerics between."))
def AddForceArgToParser(parser):
"""Adds force argument to the parser.
Args:
parser: ArgumentInterceptor, An argparse parser.
"""
parser.add_argument(
"--force", action="store_true", help=("Force argument to bypass checks."))
def AddPolicyFileArgToParser(parser):
"""Adds argument for the local Policy file to set.
Args:
parser: ArgumentInterceptor, An argparse parser.
"""
parser.add_argument(
"POLICY_FILE",
metavar="POLICY_FILE",
help=(
"Path to a local JSON or YAML formatted file containing a valid "
"policy. The output of the `get-iam-policy` command is a valid "
"file, as is any JSON or YAML file conforming to the structure of "
"a [Policy](https://cloud.google.com/iam/reference/rest/v1/Policy)."))
def AddTagValueArgToParser(parser):
"""Adds the TagValue argument to the parser.
Args:
parser: ArgumentInterceptor, An argparse parser.
"""
parser.add_argument(
"--tag-value",
metavar="TAG_VALUE",
required=True,
help=("Tag value name or namespaced name. The name should "
"be in the form tagValues/{numeric_id}. The namespaced name "
"should be in the form {org_id}/{tag_key_short_name}/{short_name} "
"where short_name must be 1-63 characters, beginning and ending "
"with an alphanumeric character ([a-z0-9A-Z]) with dashes (-), "
"underscores (_), dots (.), and alphanumerics between."))
def AddLocationArgToParser(parser, message):
"""Adds argument for the location.
Args:
parser: ArgumentInterceptor, An argparse parser.
message: String, help text for flag.
"""
parser.add_argument(
"--location", metavar="LOCATION", required=False, help=message)
| 1.882813 | 2 |
awsm/hooks/validators.py | jeevb/awsm | 0 | 12795820 | <reponame>jeevb/awsm
from awsm.validators import yaml_dict
from voluptuous import All, Coerce, Schema
HOOK_SCHEMA = Schema(All(Coerce(yaml_dict), {
'includes': [str],
'tasks': [dict]
}))
HOOK_VARS_SCHEMA = All(Coerce(yaml_dict), dict)
HOOKS_CFG_SCHEMA = Schema({
'vars': HOOK_VARS_SCHEMA,
str: HOOK_SCHEMA
})
| 2.03125 | 2 |
wordservice/src/app.py | mjm461/step-funtions-example | 0 | 12795821 | from flask import jsonify
from pyawsstarter import Logger
from wordservice import create_app
# Call the Application Factory function to construct a Flask application instance
# using the standard configuration defined in /instance/flask.cfg
application = create_app('flask.cfg')
@application.errorhandler(Exception)
def handle_invalid_usage(error):
response = jsonify(
{
'error': str(error)
}
)
response.status_code = 401 # Don't do it this way, just for an example
return response
if __name__ == '__main__':
Logger.get_logger('wordservice').info('Starting wordservice')
application.run(host='0.0.0.0', port=8080)
| 2.828125 | 3 |
dbmanage/passforget/urls.py | bopopescu/sbdb_new | 3 | 12795822 | from django.conf.urls import url
from django.contrib import admin
import views
admin.autodiscover()
urlpatterns = [
url(r'^pass_forget/$', views.pass_forget, name='pass_forget'),
# url(r'^pass_rec/$', views.pass_rec, name='pass_rec'),
]
| 1.507813 | 2 |
bestbuyapi/api/stores.py | lv10/bestbuyapi | 10 | 12795823 | from ..api.base import BestBuyCore
from ..constants import STORES_API
from ..utils.exceptions import BestBuyStoresAPIError
class BestBuyStoresAPI(BestBuyCore):
def _api_name(self):
return STORES_API
# =================================
# Search by store by name or id
# =================================
def search_by_id(self, store_id, **kwargs):
"""Searches the stores api given an id"""
payload = {"query": f"storeId={store_id}", "params": kwargs}
return self._call(payload)
| 2.546875 | 3 |
DP/Stock_Buy_Sell_1.py | anilpai/leetcode | 0 | 12795824 | '''
Say you have an array for which the ith element is the price of a given stock on day i.
If you were only permitted to complete at most one transaction
(ie, buy one and sell one share of the stock), design an algorithm to find the maximum profit.
Input: [7, 1, 5, 3, 6, 4]
Output: 5
Input: [7, 6, 4, 3, 1]
Output: 0
'''
class Solution(object):
def maxProfit(self, prices):
"""
:type prices: List[int]
:rtype: int
"""
max_profit, min_price = 0, float('inf')
for price in prices:
min_price = min(min_price, price)
profit = price - min_price
max_profit = max(max_profit, profit)
return max_profit
def maxProfit2(self, prices):
'''
Kadane's algorithm.
'''
maxCur = 0
maxSoFar = 0
for i in range(1, len(prices)):
maxCur = max(0, maxCur + prices[i] - prices[i-1])
maxSoFar = max(maxCur, maxSoFar)
return maxSoFar
if __name__=='__main__':
s = Solution()
transaction1 = [7, 1, 5, 3, 6, 4]
transaction2 = [7, 6, 4, 3, 1]
print(s.maxProfit(transaction1))
print(s.maxProfit(transaction2))
print(s.maxProfit2(transaction1))
print(s.maxProfit2(transaction2))
t3 = [1, 7, 4, 11]
t4 = [0, 6, -3, 7]
print (s.maxProfit2(t3))
print (s.maxProfit2(t4))
| 3.84375 | 4 |
src/enums.py | JQGoh/multivariate_time_series_pipeline | 1 | 12795825 | <reponame>JQGoh/multivariate_time_series_pipeline
# -*- coding: utf-8 -*-
from pathlib import Path
class FilePathEnum(object):
PROJECT_DIR_POSIX = project_dir = Path(__file__).resolve().parents[1] # PosixPath
DOWNLOADED_ZIP = Path(PROJECT_DIR_POSIX).joinpath(
"data/raw/household_power_consumption.zip"
)
FIGURE = Path(PROJECT_DIR_POSIX).joinpath("reports/figures/predictions_global_active_power.png")
MOCK_DATA = Path(PROJECT_DIR_POSIX).joinpath("data/interim/mock_data.csv")
PIPELINE = Path(PROJECT_DIR_POSIX).joinpath("data/processed/data_processing_pipelines.pkl")
TEST_DATA = Path(PROJECT_DIR_POSIX).joinpath("data/processed/test_data.csv")
TRAIN_FEATURES = Path(PROJECT_DIR_POSIX).joinpath("data/processed/train_features.csv")
class TsFreshEnum(object):
# id passed as column_id/the derived feature due to rolling of time series
ID = "id"
SORT = "sort"
class DataMetadata(object):
"""Data metadata"""
DATETIME_FORMAT = "%Y-%m-%d %H:%M:%S"
DATETIME = "datetime"
TARGET = "Global_active_power"
# synthetic series id
ID = TsFreshEnum.ID
GLOBAL_REACTIVE_POWER = "Global_reactive_power"
GLOBAL_INTENSITY = "Global_intensity"
SUB_METERING_1 = "Sub_metering_1"
SUB_METERING_2 = "Sub_metering_2"
SUB_METERING_3 = "Sub_metering_3"
VOLTAGE = "Voltage"
# Column sets
NUMERIC_FEATURES = [
GLOBAL_REACTIVE_POWER,
GLOBAL_INTENSITY,
SUB_METERING_1,
SUB_METERING_2,
SUB_METERING_3,
VOLTAGE,
]
| 2.109375 | 2 |
3d_wykresy.py | szarejkodariusz/3DRandomWalksInPython | 0 | 12795826 | <reponame>szarejkodariusz/3DRandomWalksInPython
import matplotlib as mpl
from mpl_toolkits.mplot3d import Axes3D
import matplotlib.pyplot as plt
import random
import numpy.random as npr
import numpy as np
# Number of steps
n_steps = 100
# Number of random walks
n_walks = 10
walks_x = [[0] * n_steps for i in range(n_walks)]
walks_y = [[0] * n_steps for i in range(n_walks)]
walks_z = [[0] * n_steps for i in range(n_walks)]
walk_x = [0] * n_steps
walk_y = [0] * n_steps
walk_z = [0] * n_steps
x2ave = [0.0] * n_steps
y2ave = [0.0] * n_steps
z2ave = [0.0] * n_steps
r2ave = [0.0] * n_steps
# Generate random walk
for i in range(0, n_walks):
x = 0
y = 0
z = 0
for j in range(0, n_steps):
# Array of random number
rnd = npr.random(3)-0.5
# Norm array
norm = np.linalg.norm(rnd)
rnd = rnd / norm
x = rnd[0] + x
y = rnd[1] + y
z = rnd[2] + z
# <x> = 0 so variance can
# be calculated in the following way:
x2ave[j] = x2ave[j] + x**2;
y2ave[j] = y2ave[j] + y**2;
z2ave[j] = z2ave[j] + z**2;
walk_x[j] = x
walk_y[j] = y
walk_z[j] = z
walks_x[i] = [x for x in walk_x]
walks_y[i] = [y for y in walk_y]
walks_z[i] = [z for z in walk_z]
mpl.rcParams['legend.fontsize'] = 10
fig = plt.figure()
ax = fig.gca(projection='3d')
for i in range(0,n_walks):
ax.plot(walks_x[i], walks_y[i], walks_z[i], label='Random walk')
ax.scatter(walks_x[i][-1], walks_y[i][-1], walks_z[i][-1], c='b', marker='o') # Ploting final point
# Plot
plt.xlabel('x')
plt.ylabel('y')
#plt.zlabel('z')
ax.legend()
plt.title('Random walks in 3D dimension')
plt.grid(True)
plt.show()
| 3.390625 | 3 |
scripts/setup-sfe-benchmarks.py | jthorton/double-exp-vdw | 1 | 12795827 | <reponame>jthorton/double-exp-vdw
import os.path
from glob import glob
from absolv.models import (
EquilibriumProtocol,
SimulationProtocol,
State,
System,
TransferFreeEnergySchema,
)
from nonbonded.library.models.datasets import DataSet
from openff.toolkit.typing.engines.smirnoff import ForceField
from openmm import unit
from tqdm import tqdm
def main():
data_sets = [
DataSet.parse_file(
os.path.join(
"..",
"data-set-curation",
"physical-property",
"benchmarks",
"sage-fsolv-test-v1.json",
)
),
DataSet.parse_file(
os.path.join(
"..",
"data-set-curation",
"physical-property",
"benchmarks",
"sage-mnsol-test-v1.json",
)
),
]
schemas = []
for entry in (entry for data_set in data_sets for entry in data_set.entries):
solute = [
component.smiles
for component in entry.components
if component.role == "Solute"
][0]
solvent = [
component.smiles
for component in entry.components
if component.role == "Solvent"
][0]
schema = TransferFreeEnergySchema(
system=System(
solutes={solute: 1}, solvent_a=None, solvent_b={solvent: 1000}
),
state=State(
temperature=298.15 * unit.kelvin, pressure=1.0 * unit.atmosphere
),
alchemical_protocol_a=EquilibriumProtocol(
lambda_sterics=[1.0, 1.0, 1.0, 1.0, 1.0],
lambda_electrostatics=[1.0, 0.75, 0.5, 0.25, 0.0],
sampler="repex",
production_protocol=SimulationProtocol(
n_steps_per_iteration=500, n_iterations=2000
),
),
alchemical_protocol_b=EquilibriumProtocol(
lambda_sterics=[
1.00, 1.00, 1.00, 1.00, 1.00, 0.95, 0.90, 0.80, 0.70, 0.60, 0.50,
0.40, 0.35, 0.30, 0.25, 0.20, 0.15, 0.10, 0.05, 0.00,
],
lambda_electrostatics=[
1.00, 0.75, 0.50, 0.25, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00,
0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00,
],
sampler="repex",
production_protocol=SimulationProtocol(
n_steps_per_iteration=500, n_iterations=2000
),
),
)
schemas.append(schema)
force_field_paths = glob(
os.path.join(
"..",
"inputs-and-results",
"optimizations",
"*",
"result",
"optimize",
"force-field.offxml",
)
)
for force_field_path in tqdm(force_field_paths, desc="force field"):
root_name = force_field_path.split(os.sep)[-4]
root_path = os.path.join(
"..",
"inputs-and-results",
"benchmarks",
"transfer-free-energies",
root_name,
)
os.makedirs(os.path.join(root_path, "schemas"))
force_field = ForceField(
force_field_path, load_plugins=True, allow_cosmetic_attributes=True
)
force_field.to_file(
os.path.join(root_path, "force-field.offxml"),
discard_cosmetic_attributes=True,
)
for i, schema in enumerate(schemas):
with open(os.path.join(root_path, "schemas", f"{i + 1}.json"), "w") as file:
file.write(schema.json(indent=2))
if __name__ == "__main__":
main()
| 1.84375 | 2 |
app/simulator/stamp.py | wbj218/microVisualization | 0 | 12795828 | <filename>app/simulator/stamp.py<gh_stars>0
class Stamp:
def __init__(self):
self.enqueue_time = -1
self.picked_time = -1
self.processed_time = -1
self.send_time = -1
self.cache_send_time = -1
self.cache_get_back_time = -1
self.final_time = -1
self.delta_time = -1
self.values = {}
self.checked_cache = False
self.cache_result = None
self.sql_result = None
def visited(self):
return self.picked_time != -1 | 2.40625 | 2 |
tracker/lookups.py | TreZc0/donation-tracker | 39 | 12795829 | <filename>tracker/lookups.py
from ajax_select import LookupChannel
from django.contrib.auth import get_user_model
from django.core.exceptions import PermissionDenied
from django.db.models import Q
from django.urls import reverse
from django.utils.html import escape
from django.utils.safestring import mark_safe
import tracker.search_filters as filters
from tracker.models import (
Bid,
Country,
CountryRegion,
Donation,
Donor,
Event,
Prize,
Runner,
SpeedRun,
)
"""
In order to use these lookups properly with the admin, you will need to install/enable the 'ajax_select'
django module, and also add an AJAX_LOOKUP_CHANNELS table (the table of all
lookups used by this application are in tracker/ajax_lookup_channels.py)
They can be imported with the line:
from tracker.ajax_lookup_channels import AJAX_LOOKUP_CHANNELS
"""
class UserLookup(LookupChannel):
def __init__(self, *args, **kwargs):
self.model = get_user_model()
super(UserLookup, self).__init__(*args, **kwargs)
def get_query(self, q, request):
if not request.user.has_perm('tracker.can_search'):
raise PermissionDenied
return self.model.objects.filter(username__icontains=q)
def get_result(self, obj):
return obj.username
def format_match(self, obj):
return escape(obj.username)
def can_add(self, user, source_model):
# avoid in-line addition of users by accident
return False
class CountryLookup(LookupChannel):
model = Country
def get_query(self, q, request):
return Country.objects.filter(name__icontains=q)
def get_result(self, obj):
return str(obj)
def format_match(self, obj):
return escape(str(obj))
def can_add(self, user, source_model):
# Presumably, we don't want to add countries typically
return False
class CountryRegionLookup(LookupChannel):
model = CountryRegion
def get_query(self, q, request):
return CountryRegion.objects.filter(
Q(name__icontains=q) | Q(country__name__icontains=q)
)
def get_result(self, obj):
return str(obj)
def format_match(self, obj):
return escape(str(obj))
class GenericLookup(LookupChannel):
useLock = False
extra_params = {}
def get_extra_params(self, request):
return self.extra_params
def get_query(self, q, request):
params = {'q': q}
params.update(self.get_extra_params(request))
model = getattr(self, 'modelName', self.model)
if self.useLock and not request.user.has_perm('tracker.can_edit_locked_events'):
params['locked'] = False
return filters.run_model_query(model, params, request.user)
def get_result(self, obj):
return str(obj)
def format_match(self, obj):
return escape(str(obj))
# returning the admin URL reduces the genericity of our solution a little bit, but this can be solved
# by using distinct lookups for admin/non-admin applications (which we should do regardless since
# non-admin search should be different)
def format_item_display(self, obj):
result = '<a href="{0}">{1}</a>'.format(
reverse(
'admin:tracker_{0}_change'.format(obj._meta.model_name), args=[obj.pk]
),
escape(str(obj)),
)
return mark_safe(result)
class BidLookup(GenericLookup):
useLock = True
model = Bid
modelName = 'bid'
extra_params = {'feed': 'all'}
class AllBidLookup(GenericLookup):
useLock = True
model = Bid
modelName = 'allbids'
extra_params = {'feed': 'all'}
class BidTargetLookup(GenericLookup):
model = Bid
modelName = 'bidtarget'
useLock = True
extra_params = {'feed': 'all'}
class DonationLookup(GenericLookup):
model = Donation
useLock = True
class DonorLookup(GenericLookup):
model = Donor
class PrizeLookup(GenericLookup):
model = Prize
class RunLookup(GenericLookup):
model = SpeedRun
useLock = True
class EventLookup(GenericLookup):
model = Event
useLock = True
class RunnerLookup(GenericLookup):
model = Runner
| 2.09375 | 2 |
tellsticknet/protocols/fineoffset.py | molobrakos/tellsticknet | 30 | 12795830 | <gh_stars>10-100
def decode(packet):
"""
https://github.com/telldus/telldus/blob/master/telldus-core/service/ProtocolFineoffset.cpp
>>> decode(dict(data=0x48801aff05))["data"]["temp"]
2.6
"""
data = packet["data"]
data = "%010x" % int(data)
data = data[:-2]
humidity = int(data[-2:], 16)
data = data[:-2]
value = int(data[-3:], 16)
temp = (value & 0x7FF) / 10
value >>= 11
if value & 1:
temp = -temp
data = data[:-3]
id = int(data, 16) & 0xFF
if humidity <= 100:
return dict(
packet,
model="temperaturehumidity",
sensorId=id,
data=dict(humidity=humidity, temp=temp),
)
else:
return dict(
packet, model="temperature", sensorId=id, data=dict(temp=temp)
)
| 3.15625 | 3 |
few_shot/datasets.py | liaoweiduo/few-shot | 0 | 12795831 | <gh_stars>0
from torch.utils.data import Dataset
import torch
from PIL import Image
from torchvision import transforms
from skimage import io
from tqdm import tqdm
import pandas as pd
import numpy as np
import os
from typing import List, Dict
from config import DATA_PATH
class OmniglotDataset(Dataset):
def __init__(self, subset, OOD_test=False):
"""Dataset class representing Omniglot dataset
# Arguments:
subset: Whether the dataset represents the 'background' or 'evaluation' set
"""
if subset not in ('background', 'evaluation'):
raise(ValueError, 'subset must be one of (background, evaluation)')
self.subset = subset
self.OOD_test = OOD_test
self.df = pd.DataFrame(self.index_subset(self.subset))
# Index of dataframe has direct correspondence to item in dataset
self.df = self.df.assign(id=self.df.index.values)
# Convert arbitrary class names of dataset to ordered 0-(num_speakers - 1) integers
self.unique_characters = sorted(self.df['class_name'].unique()) # [2636]
# ['Angelic.0.character01', 'Angelic.0.character02', 'Angelic.0.character03', ...]
self.class_name_to_id = {self.unique_characters[i]: i for i in range(self.num_classes())} # {dict: 2636}
# {'Angelic.0.character01': 0, 'Angelic.0.character02': 1, 'Angelic.0.character03': 2, ...}
self.df = self.df.assign(class_id=self.df['class_name'].apply(lambda c: self.class_name_to_id[c]))
# alphabet class_name filepath subset id class_id {DataFrame: (52720, 6)}
# 0 Angelic.0 Angelic.0.character01 ... 0 0
# 1 Angelic.0 Angelic.0.character01 ... 1 0
# 2 Angelic.0 Angelic.0.character01 ... 2 0
# 3 Angelic.0 Angelic.0.character01 ... 3 0
# 4 Angelic.0 Angelic.0.character01 ... 4 0
# Create dicts
self.datasetid_to_filepath = self.df.to_dict()['filepath'] # {dict: 52720}
# {0: '//10.20.2.245/datasets/datasets/Omniglot_enriched/images_evaluation\\Angelic.0\\character01\\0965_01.png', ...}
self.datasetid_to_class_id = self.df.to_dict()['class_id'] # {dict: 52720}
# {0: 0, 1: 0, 2: 0, 3: 0, 4: 0, 5: 0, 6: 0, 7: 0, 8: 0, 9: 0, 10: 0, 11: 0, 12: 0, ...}
# Setup transforms enable evaluation as OOD dataset
self.transform = transforms.Compose([
transforms.Resize(84),
transforms.ToTensor(), # ToTensor() will normalize to [0, 1]
# transforms.Normalize(mean=[0.5, 0.5, 0.5], std=[0.5, 0.5, 0.5])
])
def __getitem__(self, item):
if self.OOD_test:
instance = Image.open(self.datasetid_to_filepath[item]) # PNG, 28X28
instance = instance.convert('RGB')
instance = self.transform(instance) # [3, 84, 84]
label = self.datasetid_to_class_id[item] # from 0 -> 20
return instance, label
else:
instance = io.imread(self.datasetid_to_filepath[item]) # [28, 28]
# Reindex to channels first format as supported by pytorch
instance = instance[np.newaxis, :, :] # [1, 28, 28]
# Normalise to 0-1
instance = (instance - instance.min()) / (instance.max() - instance.min())
label = self.datasetid_to_class_id[item] # from 0 -> 2636
return torch.from_numpy(instance), label
def __len__(self):
return len(self.df)
def num_classes(self):
return len(self.df['class_name'].unique())
@staticmethod
def index_subset(subset):
"""Index a subset by looping through all of its files and recording relevant information.
# Arguments
subset: Name of the subset
# Returns
A list of dicts containing information about all the image files in a particular subset of the
Omniglot dataset dataset
"""
images = []
print('Indexing {}...'.format(subset))
# Quick first pass to find total for tqdm bar
subset_len = 0
for root, folders, files in os.walk(DATA_PATH + '/Omniglot_enriched/images_{}'.format(subset)):
subset_len += len([f for f in files if f.endswith('.png')])
progress_bar = tqdm(total=subset_len)
for root, folders, files in os.walk(DATA_PATH + '/Omniglot_enriched/images_{}'.format(subset)):
if len(files) == 0:
continue
alphabet = root.split(os.sep)[-2] # linux / ; windows \\
# Angelic.0
class_name = '{}.{}'.format(alphabet, root.split(os.sep)[-1])
# Angelic.0.character01
for f in files:
progress_bar.update(1)
images.append({
'subset': subset,
'alphabet': alphabet,
'class_name': class_name,
'filepath': os.path.join(root, f)
})
# filepath: //10.20.2.245/datasets/datasets/Omniglot_enriched/images_evaluation\\Angelic.0\\character01\\0965_01.png
progress_bar.close()
return images
class MiniImageNet(Dataset):
def __init__(self, subset):
"""Dataset class representing miniImageNet dataset
# Arguments:
subset: Whether the dataset represents the background or evaluation set
"""
if subset not in ('background', 'evaluation'):
raise(ValueError, 'subset must be one of (background, evaluation)')
self.subset = subset
self.df = pd.DataFrame(self.index_subset(self.subset))
# Index of dataframe has direct correspondence to item in dataset
self.df = self.df.assign(id=self.df.index.values)
# Convert arbitrary class names of dataset to ordered 0-(num_speakers - 1) integers
self.unique_characters = sorted(self.df['class_name'].unique()) # [20]
# ['n01770081', 'n02101006', 'n02108551', 'n02174001', 'n02219486', 'n02606052', 'n02747177', ...]
self.class_name_to_id = {self.unique_characters[i]: i for i in range(self.num_classes())} # {dict: 20}
# {'n01770081': 0, 'n02101006': 1, 'n02108551': 2, 'n02174001': 3, 'n02219486': 4, ...}
self.df = self.df.assign(class_id=self.df['class_name'].apply(lambda c: self.class_name_to_id[c]))
# class_name filepath subset id class_id {MiniImageNet: 12000}
# 0 n01770081 ... 0 0
# 1 n01770081 ... 1 0
# 2 n01770081 ... 2 0
# 3 n01770081 ... 3 0
# 4 n01770081 ... 4 0
# Create dicts
self.datasetid_to_filepath = self.df.to_dict()['filepath'] # {dict: 12000}
# {0: '//10.20.2.245/datasets/datasets/miniImageNet/images_evaluation\\n01770081\\00001098.jpg', ...}
self.datasetid_to_class_id = self.df.to_dict()['class_id'] # {dict: 12000}
# {0: 0, 1: 0, 2: 0, 3: 0, 4: 0, 5: 0, 6: 0, 7: 0, 8: 0, 9: 0, 10: 0, ...}
# Setup transforms
self.transform = transforms.Compose([
transforms.CenterCrop(224),
transforms.Resize(84),
transforms.ToTensor(),
transforms.Normalize(mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225])
])
def __getitem__(self, item):
instance = Image.open(self.datasetid_to_filepath[item]) # JpegImageFile, 500x384
instance = self.transform(instance) # [3, 84, 84]
label = self.datasetid_to_class_id[item] # from 0 -> 20
return instance, label
def __len__(self):
return len(self.df)
def num_classes(self):
return len(self.df['class_name'].unique())
@staticmethod
def index_subset(subset):
"""Index a subset by looping through all of its files and recording relevant information.
# Arguments
subset: Name of the subset
# Returns
A list of dicts containing information about all the image files in a particular subset of the
miniImageNet dataset
"""
images = []
print('Indexing {}...'.format(subset))
# Quick first pass to find total for tqdm bar
subset_len = 0
for root, folders, files in os.walk(DATA_PATH + '/miniImageNet/images_{}'.format(subset)):
subset_len += len([f for f in files if f.endswith('.jpg')])
progress_bar = tqdm(total=subset_len)
for root, folders, files in os.walk(DATA_PATH + '/miniImageNet/images_{}'.format(subset)):
if len(files) == 0:
continue
class_name = root.split(os.sep)[-1] # linux / ; windows \\
# n01770081
for f in files:
progress_bar.update(1)
images.append({
'subset': subset,
'class_name': class_name,
'filepath': os.path.join(root, f)
})
# filepath: //10.20.2.245/datasets/datasets/miniImageNet/images_evaluation\\n01770081\\00001098.jpg
progress_bar.close()
return images
class Meta(Dataset):
def __init__(self, subset, target):
"""Dataset class representing CUB_Bird/DTD_Texture/FGVC_Aircraft/FGVCx_Fungi dataset
# Arguments:
subset: Whether the dataset represents the background or evaluation set
target: which dataset to represent
"""
if subset not in ('background', 'evaluation'):
raise(ValueError, 'subset must be one of (background, evaluation)')
if target not in ('CUB_Bird', 'DTD_Texture', 'FGVC_Aircraft', 'FGVCx_Fungi'):
raise(ValueError, 'target must be one of (CUB_Bird, DTD_Texture, FGVC_Aircraft, FGVCx_Fungi)')
self.subset = subset
self.target = target
self.df = pd.DataFrame(self.index_subset(self.subset, self.target))
# Index of dataframe has direct correspondence to item in dataset
self.df = self.df.assign(id=self.df.index.values)
# Convert arbitrary class names of dataset to ordered 0-(num_speakers - 1) integers
self.unique_characters = sorted(self.df['class_name'].unique()) # [16]
# ['014.Indigo_Bunting', '042.Vermilion_Flycatcher', '051.Horned_Grebe', ...]
self.class_name_to_id = {self.unique_characters[i]: i for i in range(self.num_classes())} # {dict: 16}
# {'014.Indigo_Bunting': 0, '042.Vermilion_Flycatcher': 1, '051.Horned_Grebe': 2, ...}
self.df = self.df.assign(class_id=self.df['class_name'].apply(lambda c: self.class_name_to_id[c]))
# class_name filepath subset id class_id {Bird: 960}
# 0 014.Indigo_Bunting ... 0 0
# 1 014.Indigo_Bunting ... 1 0
# 2 014.Indigo_Bunting ... 2 0
# 3 014.Indigo_Bunting ... 3 0
# 4 014.Indigo_Bunting ... 4 0
# Create dicts
self.datasetid_to_filepath = self.df.to_dict()['filepath'] # {dict: 960}
# {0: '//10.20.2.245/datasets/datasets/meta-dataset/CUB_Bird/val\\014.Indigo_Bunting\\Indigo_Bunting_0001_12469.jpg', ...}
self.datasetid_to_class_id = self.df.to_dict()['class_id'] # {dict: 960}
# {0: 0, 1: 0, 2: 0, 3: 0, 4: 0, 5: 0, 6: 0, 7: 0, 8: 0, 9: 0, 10: 0, ...}
# Setup transforms
self.transform = transforms.Compose([
transforms.Resize(84),
transforms.ToTensor(), # ToTensor() will normalize to [0, 1]
])
def __getitem__(self, item):
instance = Image.open(self.datasetid_to_filepath[item]) # JpegImageFile, 84x84
instance = self.transform(instance) # [3, 84, 84]
label = self.datasetid_to_class_id[item] # from 0 -> 16
return instance, label
def __len__(self):
return len(self.df)
def num_classes(self):
return len(self.df['class_name'].unique())
@staticmethod
def index_subset(subset, target):
"""Index a subset by looping through all of its files and recording relevant information.
# Arguments
subset: Name of the subset
# Returns
A list of dicts containing information about all the image files in a particular subset of the
miniImageNet dataset
"""
images = []
print('Indexing {}...{}...'.format(target, subset))
folder_name = 'train' if subset == 'background' else 'val'
# Quick first pass to find total for tqdm bar
subset_len = 0
for root, folders, files in os.walk(DATA_PATH + '/meta-dataset/{}/{}'.format(target, folder_name)):
subset_len += len([f for f in files if f.endswith('.jpg')])
progress_bar = tqdm(total=subset_len)
for root, folders, files in os.walk(DATA_PATH + '/meta-dataset/{}/{}'.format(target, folder_name)):
if len(files) == 0:
continue
class_name = root.split(os.sep)[-1] # linux / ; windows \\
# 014.Indigo_Bunting
for f in files:
progress_bar.update(1)
images.append({
'subset': subset,
'class_name': class_name,
'filepath': os.path.join(root, f)
})
# filepath: //10.20.2.245/datasets/datasets/meta-dataset/CUB_Bird/val\\014.Indigo_Bunting\\Indigo_Bunting_0001_12469.jpg
progress_bar.close()
return images
class DummyDataset(Dataset):
def __init__(self, samples_per_class=10, n_classes=10, n_features=1):
"""Dummy dataset for debugging/testing purposes
A sample from the DummyDataset has (n_features + 1) features. The first feature is the index of the sample
in the data and the remaining features are the class index.
# Arguments
samples_per_class: Number of samples per class in the dataset
n_classes: Number of distinct classes in the dataset
n_features: Number of extra features each sample should have.
"""
self.samples_per_class = samples_per_class
self.n_classes = n_classes
self.n_features = n_features
# Create a dataframe to be consistent with other Datasets
self.df = pd.DataFrame({
'class_id': [i % self.n_classes for i in range(len(self))]
})
self.df = self.df.assign(id=self.df.index.values)
def __len__(self):
return self.samples_per_class * self.n_classes
def __getitem__(self, item):
class_id = item % self.n_classes
return np.array([item] + [class_id]*self.n_features, dtype=np.float), float(class_id)
class MultiDataset(Dataset):
def __init__(self, dataset_list: List[Dataset]):
"""Dataset class representing a list of datasets
# Arguments:
:param dataset_list: need to first prepare each sub-dataset
"""
self.dataset_list = dataset_list
self.datasetid_to_class_id = self.label_mapping()
def index_mapping(self, index) -> (int, int):
"""
A mapping method to map index (in __getitem__ method) to the index in the corresponding dataset.
:param index:
:return: dataset_id, item
"""
for dataset_id, dataset in enumerate(self.dataset_list):
if index < len(dataset):
return dataset_id, index
else:
index = index - len(dataset)
raise(ValueError, f'index exceeds total number of instances, index {index}')
def label_mapping(self) -> Dict:
"""
generate mapping dict from datasetid to global class id.
:return: datasetid_to_class_id
"""
datasetid_to_class_id = dict()
index_offset = 0
class_id_offset = 0
for dataset in self.dataset_list:
datasetid_to_class_id.update(
dict(zip(map(lambda id: id + index_offset, dataset.datasetid_to_class_id.keys()),
map(lambda class_id: class_id + class_id_offset, dataset.datasetid_to_class_id.values())))
)
index_offset = index_offset + len(dataset)
class_id_offset = class_id_offset + dataset.num_classes()
return datasetid_to_class_id
def __getitem__(self, item):
dataset_id, index = self.index_mapping(item)
instance, true_label = self.dataset_list[dataset_id][index] # true_label is the label in sub-dataset
label = self.datasetid_to_class_id[item] # int
return instance, label
def __len__(self):
return sum([len(dataset) for dataset in self.dataset_list])
def num_classes(self):
sum([dataset.num_classes() for dataset in self.dataset_list])
if __name__ == "__main__":
# debug on MultiDataset
evaluation = MultiDataset([Meta('evaluation', 'CUB_Bird'),
Meta('evaluation', 'DTD_Texture'),
Meta('evaluation', 'FGVC_Aircraft')])
#
print(evaluation[1000][0].shape, evaluation[1000][1])
| 2.484375 | 2 |
ebay.py | zweed4u/Analytics | 0 | 12795832 | <reponame>zweed4u/Analytics
import requests, BeautifulSoup
session=requests.session()
headers={'User-Agent':'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/54.0.2840.90 Safari/537.36'}
styleCode='B39254'#'BB1826'
r=session.get('http://www.ebay.com/sch/i.html?_nkw='+str(styleCode)+'&LH_Complete=1&LH_Sold=1',headers=headers)
soup=BeautifulSoup.BeautifulSoup(r.content)
try:
#more than 48 listings - grid view
for cardListing in soup.find("ul", {"id": "GalleryViewInner"}).findAll('li'):
#parse element for title and sold price and when
#store all prices in array for maths - min, max, mean, volatility
print cardListing.find('h3').text
st=cardListing.findAll("div", {"class":"gvprices"})[0].text.split('$')[1]
price=st[:st.index('.')+3]
type=st[st.index('.')+3:]
print price, type
print
except:
#error with findAll - less than 48 items?
for cardListing in soup.find("ul", {"id":"ListViewInner"}).findAll('li',recursive=False):
#parse element for title and sold price and when
#store all prices in array for maths - min, max, mean, volatility
print cardListing.find('h3').text
st=cardListing.findAll("li", {"class":"lvprice prc"})[0].text.split('$')[1]
price=st[:st.index('.')+3]
type=cardListing.findAll("li", {"class":"lvformat"})[0].text
print price, type
print
| 3.015625 | 3 |
job_applications/models.py | yhaojin/recruitment_pipeline | 0 | 12795833 | from django.db import models
from .utils import upload_to_file, generate_random_string
from .validators import validate_file
class CompanyManager(models.Manager):
def get_or_none(self, **kwargs):
try:
return self.get(**kwargs)
except Company.DoesNotExist:
return None
class Company(models.Model):
objects = CompanyManager()
label = models.CharField(max_length=60, default="default", unique=True)
description = models.TextField(blank=True, null=True)
metadata = models.TextField()
created = models.DateTimeField(auto_now_add=True)
modified = models.DateTimeField(auto_now=True)
def __repr__(self):
return self.__str__()
def __str__(self):
return f"<Company Label: {self.label} / Description: {self.description}>"
class JobManager(models.Manager):
def get_or_none(self, **kwargs):
try:
return self.get(**kwargs)
except Job.DoesNotExist:
return None
class Job(models.Model):
objects = JobManager()
label = models.CharField(max_length=60, default="default")
description = models.TextField(blank=True, null=True)
company = models.ForeignKey('job_applications.Company', on_delete=models.SET_NULL, null=True, blank=True)
hiring_contact = models.CharField(max_length=60, default="default")
hiring_contact_email = models.EmailField(max_length=254)
is_open = models.BooleanField(default=True)
metadata = models.TextField()
created = models.DateTimeField(auto_now_add=True)
modified = models.DateTimeField(auto_now=True)
def __repr__(self):
return self.__str__()
def __str__(self):
return f"<Job Label: {self.label} / Company: {self.company.label}>"
class ApplicationManager(models.Manager):
def get_or_none(self, **kwargs):
try:
return self.get(**kwargs)
except Application.DoesNotExist:
return None
class Application(models.Model):
objects = ApplicationManager()
PENDING = 1
REVIEWING = 2
SHORTLISTED = 3
INTERVIEWING = 4
ADVANCED_INTERVIEWING = 5
REJECTED = 6
OFFERED = 7
HIRED = 8
NEXT_STAGE = {
PENDING: [REVIEWING, SHORTLISTED],
REVIEWING: [REJECTED],
SHORTLISTED: [INTERVIEWING],
INTERVIEWING: [ADVANCED_INTERVIEWING, REJECTED, OFFERED],
ADVANCED_INTERVIEWING: [REJECTED, OFFERED],
OFFERED: [HIRED, REJECTED],
REJECTED: [],
HIRED: []
}
STAGE_TO_TASKS = {
PENDING: {
"description": "Checked with hiring manager whether he/she has reviewed the application?",
"deadline": 7
},
REVIEWING: {
"description": "Have you waited a few days yet?",
"deadline": 7
},
SHORTLISTED: {
"description": "Have you checked with candidate whether he has gone for the interview?",
"deadline": 7
},
INTERVIEWING: {
"description": "Have you checked with candidate on the status of his interview?",
"deadline": 7
},
ADVANCED_INTERVIEWING: {
"description": "Have you checked with candidate on the status of his advanced interview?",
"deadline": 7
},
OFFERED: {
"description": "Have you checked with candidate whether he has taken up the offer?",
"deadline": 7
}
}
categories = [
(PENDING, "Pending"),
(REVIEWING, "Reviewing"),
(SHORTLISTED, "Shortlisted"),
(INTERVIEWING, "Interviewing"),
(ADVANCED_INTERVIEWING, "Advanced Interviewing"),
(REJECTED, "Rejected"),
(OFFERED, "Offered"),
(HIRED, "Hired"),
]
user = models.ForeignKey('custom_user.User', on_delete=models.CASCADE, null=False, blank=False, related_name='user')
recruiter = models.ForeignKey('custom_user.User', on_delete=models.SET_NULL, null=True, blank=True,
related_name='recruiter')
email = models.EmailField(max_length=254)
job = models.ForeignKey('job_applications.Job', on_delete=models.CASCADE, null=False, blank=False)
stage = models.IntegerField(
choices=categories,
)
resume = models.FileField(
upload_to=upload_to_file,
validators=[validate_file],
help_text="Please upload only PDF or docx files",
)
created = models.DateTimeField(auto_now_add=True)
modified = models.DateTimeField(auto_now=True)
def __repr__(self):
return self.__str__()
def __str__(self):
return f"<Application Username: {self.user.username} / JobId: {self.job.pk} / Stage: {self.stage}>"
@property
def possible_next_stages(self) -> list:
"""
retrieves the possible next stages to for the application to move into
"""
return Application.NEXT_STAGE[self.stage]
| 2.328125 | 2 |
API/dataloader.py | gaozhangyang/DecST | 2 | 12795834 | from .dataloader_traffic import load_data as load_BJ
from .dataloader_human import load_data as load_human
from .dataloader_moving_mnist import load_data as load_mmnist
from .dataloader_kth import load_data as load_kth
from .dataloader_kitticaltech import load_data as load_kitticaltech
def load_data(dataname,batch_size, val_batch_size, data_root, require_back=False, pre_seq_length=None, aft_seq_length=None):
if dataname == 'traffic':
return load_BJ(batch_size, val_batch_size, data_root, require_back)
elif dataname == 'human':
return load_human(batch_size, val_batch_size, data_root, require_back)
elif dataname == 'mmnist':
return load_mmnist(batch_size, val_batch_size, data_root, require_back)
elif dataname == 'kth':
return load_kth(batch_size, val_batch_size, data_root, pre_seq_length, aft_seq_length)
elif dataname == 'kitticaltech':
return load_kitticaltech(batch_size, val_batch_size, data_root, pre_seq_length, aft_seq_length) | 2.1875 | 2 |
special_k/check_gpg_keys/verify_expiry.py | namoopsoo/special_k | 0 | 12795835 | # Copyright 2020-present Kensho Technologies, LLC.
import logging
import sys
import click
import gpg
from ..signing import (
DAYS_WARNING_FOR_KEY_EXPIRATION,
add_trusted_keys_to_gpg_home_dir,
get_days_until_expiry,
)
from ..utils import get_temporary_directory
logger = logging.getLogger(__name__)
logger.setLevel(logging.INFO)
def check_gpg_key_expiry(
days_warning_for_key_expiration: int = DAYS_WARNING_FOR_KEY_EXPIRATION,
) -> bool:
"""Check key expirations
Args:
days_warning_for_key_expiration: warn if a key expires within this number of days.
Defaults to 30.
Returns:
True if no keys are soon to expire or already expired, False otherwise
"""
with get_temporary_directory() as gpg_homedir:
add_trusted_keys_to_gpg_home_dir(gpg_homedir)
with gpg.Context(home_dir=gpg_homedir) as ctx:
fpr_to_expiry = get_days_until_expiry(ctx)
no_keys_close_to_expiry = True
for fpr, days_to_expiry in fpr_to_expiry.items():
if days_to_expiry < 0:
no_keys_close_to_expiry = False
action_message = "KEY IS EXPIRED!"
elif days_to_expiry < days_warning_for_key_expiration:
no_keys_close_to_expiry = False
action_message = "UPDATE KEY ASAP!!!!"
else:
action_message = "OK for now, but stay tuned"
logger.info(
"Key (FPR: %s) expires in %s days. %s", fpr, days_to_expiry, action_message
)
return no_keys_close_to_expiry
@click.command()
@click.argument("days_before_warning", required=False)
def main(days_before_warning) -> None:
"""Log info about when GPG keys will expire"""
no_keys_close_to_expiry = check_gpg_key_expiry(days_before_warning)
if no_keys_close_to_expiry:
sys.exit(0)
sys.exit(1)
| 2.3125 | 2 |
stylegan2/utils.py | moritztng/stylegan2-pytorch | 0 | 12795836 | from os import remove
from os.path import isdir, join
from pathlib import Path
from gdown import download
from zipfile import ZipFile
def download_ffhq(path):
path = join(path, 'ffhq')
if not isdir(path):
Path(path).mkdir(parents=True, exist_ok=True)
path_zip = join(path, 'ffhq.zip')
download(id='1EL0pQnON0SFOY8XXn8DX4T6cIcKf4CNu', output=path_zip)
with ZipFile(path_zip, 'r') as f:
f.extractall(path)
remove(path_zip)
def set_requires_grad(module, requires_grad):
for p in module.parameters():
p.requires_grad = requires_grad
return module
| 2.59375 | 3 |
search/forms.py | pkimber/search | 0 | 12795837 | <gh_stars>0
# -*- encoding: utf-8 -*-
from django import forms
from haystack.forms import SearchForm
class MySearchForm(SearchForm):
"""
Search records (exclude 'deleted' records by default)...
For form information... see 'Creating your own form':
http://django-haystack.readthedocs.org/en/latest/views_and_forms.html#creating-your-own-form
"""
deleted = forms.BooleanField(required=False)
def search(self):
# First, store the SearchQuerySet received from other processing.
sqs = super().search()
if not self.is_valid():
return self.no_query_found()
# Check to see if a deleted was ticked.
deleted = self.cleaned_data['deleted']
if deleted:
# return all records
pass
else:
# exclude deleted records
sqs = sqs.exclude(deleted=1)
return sqs
| 2.640625 | 3 |
setup.py | kalekundert/TestSuite | 0 | 12795838 | <reponame>kalekundert/TestSuite
import distutils.core
# Uploading to PyPI
# =================
# $ python setup.py register -r pypi
# $ python setup.py sdist upload -r pypi
version = '0.0'
distutils.core.setup(
name='finalexam',
version=version,
author='<NAME>',
url='https://github.com/kalekundert/finalexam',
download_url='https://github.com/kalekundert/finalexam/tarball/'+version,
license='LICENSE.txt',
description="A simple unit testing framework.",
long_description=open('README.rst').read(),
keywords=['unit', 'testing', 'pythonic', 'library'],
py_modules=['finalexam'],
requires=['nonstdlib'],
)
| 1.21875 | 1 |
main.py | feng-y16/pinn_cavity | 0 | 12795839 | import pdb
import time
import lib.tf_silent
import numpy as np
import tensorflow as tf
import matplotlib.pyplot as plt
import matplotlib.cm as cm
from matplotlib.colors import Normalize
from matplotlib.gridspec import GridSpec
import os
import pickle
import argparse
from lib.pinn import PINN
from lib.network import Network
from lib.optimizer import Optimizer
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument('-i', '--maxiter', type=int, default=2000)
parser.add_argument('-ntr', '--num-train-samples', type=int, default=10000)
parser.add_argument('-nte', '--num-test-samples', type=int, default=100)
parser.add_argument('-n', '--network', type=str, default='pinn')
parser.add_argument('-l', '--loss', type=str, default='l2')
parser.add_argument('-gi', '--gradient-interval', type=int, default=100)
parser.add_argument('--gt-path', type=str, default='data/pinn.pkl')
return parser.parse_known_args()[0]
def uv(network, xy):
"""
Compute flow velocities (u, v) for the network with output (psi, p).
Args:
xy: network input variables as ndarray.
Returns:
(u, v) as ndarray.
"""
xy = tf.constant(xy)
with tf.GradientTape() as g:
g.watch(xy)
psi_p = network(xy)
psi_p_j = g.batch_jacobian(psi_p, xy)
u = psi_p_j[..., 0, 1]
v = -psi_p_j[..., 0, 0]
return u.numpy(), v.numpy()
def contour(grid, x, y, z, title, levels=50):
"""
Contour plot.
Args:
grid: plot position.
x: x-array.
y: y-array.
z: z-array.
title: title string.
levels: number of contour lines.
"""
# get the value range
vmin = -2e-1
vmax = 2e-1
if (title == 'psi'):
vmax = 1.2e-1
vmin = -1e-1
if (title == 'p'):
vmax = 6.1e-1
vmin = -5e-1
if (title == 'u'):
vmax = 1.1e+0
vmin = -2e-1
if (title == 'v'):
vmax = 2.1e-1
vmin = -2e-1
if (title == 'dpsi'):
vmax = 1.1e-2
vmin = 0.0
if (title == 'dp'):
vmax = 4.1e-1
vmin = 0.0
if (title == 'du'):
vmax = 1.1e-1
vmin = 0.0
if (title == 'dv'):
vmax = 8.1e-2
vmin = 0.0
# plot a contour
plt.subplot(grid)
print(title, vmin, vmax)
plt.contour(x, y, z, colors='k', linewidths=0.2, levels=levels, vmin=vmin, vmax=vmax)
plt.contourf(x, y, z, cmap='rainbow', levels=levels, vmin=vmin, vmax=vmax)
plt.title(title)
m = plt.cm.ScalarMappable(cmap='rainbow', norm=Normalize(vmin=vmin, vmax=vmax))
m.set_array(z)
m.set_clim(vmin, vmax)
cbar = plt.colorbar(m, pad=0.03, aspect=25, format='%.0e')
cbar.mappable.set_clim(vmin, vmax)
if __name__ == '__main__':
"""
Test the physics informed neural network (PINN) model
for the cavity flow governed by the steady Navier-Stokes equation.
"""
args = parse_args()
# number of training samples
num_train_samples = args.num_train_samples
# number of test samples
num_test_samples = args.num_test_samples
# inlet flow velocity
u0 = 1
# density
rho = 1
# viscosity
nu = 0.01
# build a core network model
network = Network().build()
network.summary()
# build a PINN model
model = PINN(network, rho=rho, nu=nu).build()
# create training input
xy_eqn = np.random.rand(num_train_samples, 2)
xy_ub = np.random.rand(num_train_samples//2, 2) # top-bottom boundaries
xy_ub[..., 1] = np.round(xy_ub[..., 1]) # y-position is 0 or 1
xy_lr = np.random.rand(num_train_samples//2, 2) # left-right boundaries
xy_lr[..., 0] = np.round(xy_lr[..., 0]) # x-position is 0 or 1
xy_bnd = np.random.permutation(np.concatenate([xy_ub, xy_lr]))
x_train = [xy_eqn, xy_bnd]
# create training output
zeros = np.zeros((num_train_samples, 2))
uv_bnd = np.zeros((num_train_samples, 2))
uv_bnd[..., 0] = u0 * np.floor(xy_bnd[..., 1])
y_train = [zeros, zeros, uv_bnd]
# train the model using L-BFGS-B algorithm
optimizer = Optimizer(model=model, x_train=x_train, y_train=y_train, dict_params=args.__dict__)
optimizer.fit()
# create meshgrid coordinates (x, y) for test plots
x = np.linspace(0, 1, num_test_samples)
y = np.linspace(0, 1, num_test_samples)
x, y = np.meshgrid(x, y)
xy = np.stack([x.flatten(), y.flatten()], axis=-1)
# predict (psi, p)
psi_p = network.predict(xy, batch_size=len(xy))
psi, p = [ psi_p[..., i].reshape(x.shape) for i in range(psi_p.shape[-1]) ]
# compute (u, v)
u, v = uv(network, xy)
u = u.reshape(x.shape)
v = v.reshape(x.shape)
if os.path.isfile(args.gt_path):
with open(args.gt_path, 'rb') as f:
data = pickle.load(f)
x_gt, y_gt, psi_gt, p_gt, u_gt, v_gt = data
fig = plt.figure(figsize=(6, 5))
gs = GridSpec(2, 2)
contour(gs[0, 0], x, y, np.abs(psi - psi_gt), 'dpsi')
contour(gs[0, 1], x, y, np.abs(p - p_gt), 'dp')
contour(gs[1, 0], x, y, np.abs(u - u_gt), 'du')
contour(gs[1, 1], x, y, np.abs(v - v_gt), 'dv')
plt.tight_layout()
plt.savefig(os.path.join('figures', list(args.__dict__.values())[:-1].__str__() + str(time.time()) +
'_error.png'))
plt.show()
plt.close()
fig = plt.figure(figsize=(6, 5))
gs = GridSpec(2, 2)
contour(gs[0, 0], x, y, psi, 'psi')
contour(gs[0, 1], x, y, p, 'p')
contour(gs[1, 0], x, y, u, 'u')
contour(gs[1, 1], x, y, v, 'v')
plt.tight_layout()
plt.savefig(os.path.join('figures', list(args.__dict__.values())[:-1].__str__() + str(time.time()) + '.png'))
plt.show()
plt.close()
else:
# plot test results
fig = plt.figure(figsize=(6, 5))
gs = GridSpec(2, 2)
contour(gs[0, 0], x, y, psi, 'psi')
contour(gs[0, 1], x, y, p, 'p')
contour(gs[1, 0], x, y, u, 'u')
contour(gs[1, 1], x, y, v, 'v')
data = [x, y, psi, p, u, v]
with open(args.gt_path, 'wb') as f:
pickle.dump(data, f)
plt.tight_layout()
plt.savefig(os.path.join('figures', list(args.__dict__.values())[:-1].__str__() + str(time.time()) + '.png'))
plt.show()
plt.close()
| 2.15625 | 2 |
mondrian_kernel.py | matejbalog/mondrian-kernel | 10 | 12795840 | <reponame>matejbalog/mondrian-kernel
import heapq
import numpy as np
import scipy.sparse
from sklearn import linear_model
import sys
import time
from utils import sample_cut, errors_regression
def evaluate_all_lifetimes(X, y, X_test, y_test, M, lifetime_max, delta,
validation=False, mondrian_kernel=False, mondrian_forest=False, weights_from_lifetime=None):
"""
Sweeps through Mondrian kernels with all lifetime in [0, lifetime_max]. This can be used to (1) construct a Mondrian
feature map with lifetime lifetime_max, to (2) find a suitable lifetime (inverse kernel width), or to (3) compare
Mondrian kernel to Mondrian forest across lifetimes.
:param X: training inputs
:param y: training regression targets
:param X_test: test inputs
:param y_test: test regression targets
:param M: number of Mondrian trees
:param lifetime_max: terminal lifetime
:param delta: ridge regression regularization hyperparameter
:param validation: flag indicating whether a validation set should be created by halving the test set
:param mondrian_kernel: flag indicating whether mondrian kernel should be evaluated
:param mondrian_forest: flag indicating whether mondrian forest should be evaluated
:param weights_from_lifetime: lifetime at which forest and kernel learned weights should be saved
:return: dictionary res containing all results
"""
N, D = np.shape(X)
N_test = np.shape(X_test)[0]
X_all = np.array(np.r_[X, X_test])
N_all = N + N_test
if mondrian_forest or mondrian_kernel:
y = np.squeeze(y)
y_test = np.squeeze(y_test)
# subtract target means
y_mean = np.mean(y)
y_train = y - y_mean
# start timer
time_start = time.clock()
# initialize sparse feature matrix
indptr = range(0, M * N_all + 1, M)
indices = range(M) * N_all
data = np.ones(N_all * M) / np.sqrt(M)
Z_all = scipy.sparse.csr_matrix((data, indices, indptr), shape=(N_all, M))
feature_from_repetition = range(M)
C = M
# bounding box for all datapoints used to sample first cut in each tree
feature_data = [np.array(range(N_all)) for _ in range(M)]
lX = np.min(X_all, 0)
uX = np.max(X_all, 0)
# event = tuple (time, tree, feature, dim, loc), where feature is the index of feature being split
events = []
active_features = []
active_features_in_tree = [[] for _ in range(M)]
for m in range(M):
cut_time, dim, loc = sample_cut(lX, uX, 0.0)
if cut_time < lifetime_max:
heapq.heappush(events, (cut_time, m, m, dim, loc))
active_features.append(m)
active_features_in_tree[m].append(m)
# iterate through birth times in increasing order
list_times = []
list_runtime = []
if mondrian_forest:
w_trees = [np.zeros(1) for _ in range(M)]
trees_y_hat_train = np.zeros((N, M)) # initialize Mondrian tree predictions and squared errors
trees_y_hat_test = np.zeros((N_test, M))
list_forest_error_train = []
list_forest_error_test = []
if mondrian_kernel:
w_kernel = np.zeros(M)
w_kernel_save = np.zeros(M)
list_kernel_error_train = []
if validation:
list_kernel_error_validation = []
list_kernel_error_test = []
while len(events) > 0:
(birth_time, m, c, dim, loc) = heapq.heappop(events)
list_times.append(birth_time)
# construct new feature
Xd = X_all[feature_data[c], dim]
feature_l = (feature_data[c])[Xd <= loc]
feature_r = (feature_data[c])[Xd > loc]
feature_data.append(feature_l)
feature_data.append(feature_r)
active_features.remove(c)
active_features_in_tree[m].remove(c)
active_features.append(C + 0)
active_features.append(C + 1)
active_features_in_tree[m].append(C + 0)
active_features_in_tree[m].append(C + 1)
# move datapoints from split feature to child features
Z_all.indices[feature_l * M + m] = C + 0
Z_all.indices[feature_r * M + m] = C + 1
Z_all = scipy.sparse.csr_matrix((Z_all.data, Z_all.indices, Z_all.indptr), shape=(N_all, C + 2), copy=False)
# sample the cut for each child
lX_l = np.min(X_all[feature_l, :], axis=0)
uX_l = np.max(X_all[feature_l, :], axis=0)
cut_time_l, dim_l, loc_l = sample_cut(lX_l, uX_l, birth_time)
lX_r = np.min(X_all[feature_r, :], axis=0)
uX_r = np.max(X_all[feature_r, :], axis=0)
cut_time_r, dim_r, loc_r = sample_cut(lX_r, uX_r, birth_time)
# add new cuts to heap
if cut_time_l < lifetime_max:
heapq.heappush(events, (cut_time_l, m, C + 0, dim_l, loc_l))
if cut_time_r < lifetime_max:
heapq.heappush(events, (cut_time_r, m, C + 1, dim_r, loc_r))
feature_from_repetition.append(m)
feature_from_repetition.append(m)
C += 2
if mondrian_forest:
# update Mondrian forest predictions in tree m
Z_train = Z_all[:N, active_features_in_tree[m]]
Z_test = Z_all[N:, active_features_in_tree[m]]
w_tree = np.linalg.solve(np.transpose(Z_train).dot(Z_train) + delta / M * np.identity(len(active_features_in_tree[m])),
np.transpose(Z_train).dot(y_train))
if weights_from_lifetime is not None and birth_time <= weights_from_lifetime:
w_trees[m] = w_tree / np.sqrt(M)
trees_y_hat_train[:, m] = np.squeeze(Z_train.dot(w_tree))
trees_y_hat_test[:, m] = np.squeeze(Z_test.dot(w_tree))
# update Mondrian forest error
y_hat_train = y_mean + np.mean(trees_y_hat_train, 1)
y_hat_test = y_mean + np.mean(trees_y_hat_test, 1)
error_train, error_test = errors_regression(y, y_test, y_hat_train, y_hat_test)
list_forest_error_train.append(error_train)
list_forest_error_test.append(error_test)
# update Mondrian kernel predictions
if mondrian_kernel:
w_kernel = np.append(w_kernel, [w_kernel[c], w_kernel[c]])
w_kernel[c] = 0
Z_train = Z_all[:N]
Z_test = Z_all[N:]
SGD_epochs = 1
clf = linear_model.SGDRegressor(alpha=delta, fit_intercept=False, n_iter=SGD_epochs)
clf.fit(Z_train, y_train, coef_init=w_kernel)
w_kernel = clf.coef_
if weights_from_lifetime is not None and birth_time <= weights_from_lifetime:
w_kernel_save = np.array(w_kernel[active_features])
y_hat_train = y_mean + Z_train.dot(w_kernel)
y_hat_test = y_mean + Z_test.dot(w_kernel)
if validation:
error_train, error_validation =\
errors_regression(y, y_test[:(N_test/2)], y_hat_train, y_hat_test[:(N_test/2)])
error_train, error_test =\
errors_regression(y, y_test[(N_test/2):], y_hat_train, y_hat_test[(N_test/2):])
list_kernel_error_validation.append(error_validation)
else:
error_train, error_test = errors_regression(y, y_test, y_hat_train, y_hat_test)
list_kernel_error_train.append(error_train)
list_kernel_error_test.append(error_test)
# save runtime
list_runtime.append(time.clock() - time_start)
if mondrian_kernel:
# progress indicator in console
sys.stdout.write("\rTime: %.2E / %.2E (C = %d, test error = %.3f)" % (birth_time, lifetime_max, C, error_test))
sys.stdout.flush()
if mondrian_kernel:
sys.stdout.write("\n")
# this function returns a dictionary with all values of interest stored in it
results = {'times': list_times, 'runtimes': list_runtime, 'Z': Z_all, 'feature_from_repetition': np.array(feature_from_repetition)}
if mondrian_forest:
if weights_from_lifetime is not None:
results['w_forest'] = np.concatenate(w_trees)
results['w_kernel'] = w_kernel_save
results['forest_train'] = list_forest_error_train
results['forest_test'] = list_forest_error_test
if mondrian_kernel:
results['kernel_train'] = list_kernel_error_train
results['kernel_test'] = list_kernel_error_test
if validation:
results['kernel_validation'] = list_kernel_error_validation
return results
def Mondrian_kernel_features(X, lifetime, M):
res = evaluate_all_lifetimes(X, None, np.empty((0, X.shape[1])), None, M, lifetime, None)
Z = np.sqrt(M) * res['Z'] # undo normalization
return Z, res['feature_from_repetition']
| 2.546875 | 3 |
2021/src/aoc2021/cli.py | Stannislav/Advent-of-Code | 2 | 12795841 | """The command line application for running the advent of code solutions."""
import argparse
import importlib
import pathlib
from typing import cast
from aoc2021.lib import ModSolution
def main() -> int:
"""Run the main CLI entry point."""
parser = argparse.ArgumentParser(
description="Run the advent of code puzzle solutions."
)
parser.add_argument("day", type=int, help="the day number")
parser.add_argument(
"-e",
"--extra",
action="store_true",
help="run the alternative community-based solution",
)
args = parser.parse_args()
# Read the input data
data_file = pathlib.Path(f"input/{args.day:02d}.txt")
if not data_file.exists():
print(f"Input data file not found: {data_file}")
return 1
with data_file.open() as fh:
raw_data = fh.read()
# Load the solution module
if args.extra:
submodule = "solutions_extra"
else:
submodule = "solutions"
module = f"aoc2021.{submodule}.day{args.day:02d}"
try:
mod_solution = cast(ModSolution, importlib.import_module(module))
except ModuleNotFoundError as exc:
print(exc)
return 1
# Get the solutions
part1, part2 = mod_solution.run(raw_data)
print(f"Part 1: {part1}")
print(f"Part 2: {part2}")
return 0
| 3.71875 | 4 |
Python/jadoo_and_dna_transcription.py | Rounak259/HackerEarth | 0 | 12795842 | '''dna_rna = str()
nucleotides = {'A', 'C', 'G', 'T', 'U'}
complement = {'G':'C', 'C':'G', 'T':'A', 'A':'U'}
b = set(input())
if b.issubset(nucleotides):
for i in b:
if i in complement:
dna_rna = dna_rna+complement[i]
else:
dna_rna = dna_rna+i
print(dna_rna)
else:
print("Invalid Input")'''
a={'G':'C','C':'G','T':'A','A':'U'}
b=input()
try:print(''.join(a[i] for i in b))
except:print("Invalid Input") | 3.734375 | 4 |
bot/main.py | AlexGustafsson/irc-sentiment-bot | 0 | 12795843 | import csv
import logging
import random
from argparse import ArgumentParser
from irc import IRC
from irc.messages import IRCMessage
from vaderSentiment.vaderSentiment import SentimentIntensityAnalyzer
positives = [
"(˶‾᷄ ⁻̫ ‾᷅˵)",
"(っˆڡˆς)",
"♥‿♥",
"(づ。◕‿‿◕。)づ",
"٩( ๑╹ ꇴ╹)۶",
"ᕕ( ᐛ )ᕗ",
"٩(^‿^)۶",
"\(^O^)/"
]
negatives = [
"(ノ ゜Д゜)ノ ︵ ┻━┻",
"(;´༎ຶД༎ຶ`)",
"( ͡° ʖ̯ ͡°)",
"(ノಠ益ಠ)ノ彡┻━┻",
"t(ಠ益ಠt)",
"༼ ༎ຶ ෴ ༎ຶ༽",
"┻━┻ ︵ヽ(`Д´)ノ︵ ┻━┻"
]
def main() -> None:
"""Main entrypoint of the bot."""
# Configure the default logging format
logging.basicConfig(
format="[%(asctime)s] [%(levelname)-5s] %(message)s",
level=logging.INFO,
datefmt="%Y-%m-%d %H:%M:%S"
)
# Create an argument parser for parsing CLI arguments
parser = ArgumentParser(description="An IRC bot providing sentiment analysis and reactions using ASCII emojis")
# Add parameters for the server connection
parser.add_argument("-s", "--server", required=True, type=str, help="The server to connect to")
# Add optional parameters for the server connection
parser.add_argument("-p", "--port", default=6697, type=int, help="The port to connect to")
parser.add_argument("--use-tls", default=True, type=bool, help="Whether or not to use TLS")
parser.add_argument("-t", "--timeout", default=300, type=float, help="Connection timeout in seconds")
# Add optional parameters for authentication etc.
parser.add_argument("-u", "--user", default="sentiment-bot", help="Username to use when connecting to the IRC server")
parser.add_argument("-n", "--nick", default="sentiment-bot", help="Nick to use when connecting to the IRC server")
parser.add_argument("-g", "--gecos", default="Sentiment Bot v1.0.2 (github.com/AlexGustafsson/irc-sentiment-bot)")
parser.add_argument("-c", "--channel", required=True, action='append', help="Channel to join. May be used more than once")
# Parse the arguments
options = parser.parse_args()
# Create an IRC connection
irc = IRC(
options.server,
options.port,
options.user,
options.nick,
timeout=options.timeout,
use_tls=options.use_tls
)
irc.connect()
# Connect to specified channels
for channel in options.channel:
irc.join(channel)
# The last analyzed result
lastMessageValence = None
# Handle all messages
for message in irc.messages:
if not isinstance(message, IRCMessage):
continue
target = message.author if message.target == options.nick else message.target
if message.message == "{}: help".format(options.nick):
irc.send_message(target, "I perform a simple sentiment analysis on your messages and respond with emojis")
irc.send_message(target, "You can debug the sentiment analysis of the last message like so:")
irc.send_message(target, "{}: debug".format(options.nick))
elif message.message == "{}: debug".format(options.nick):
if lastMessageValence is not None:
compound = "compound: {}".format(lastMessageValence["compound"])
debug = ", ".join(["'{}': {}".format(text, valence) for text, valence in lastMessageValence["debug"]])
irc.send_message(target, "{}. {}".format(compound, debug))
else:
analyzer = SentimentIntensityAnalyzer()
scores = analyzer.polarity_scores(message.message)
if scores["compound"] >= 0.6:
irc.send_message(target, random.choice(positives))
lastMessageValence = scores
elif scores["compound"] <= -0.6:
irc.send_message(target, random.choice(negatives))
lastMessageValence = scores
if __name__ == "__main__":
main()
| 2.78125 | 3 |
source/io/dataloader.py | mwhitesi/notatum | 0 | 12795844 | import gzip
import pandas as pd
import numpy as np
import io
import os
import re
import torch
import torch.utils.data as data_utils
import subprocess
import zipfile
import zlib
from Bio import AlignIO
from Bio.SeqIO.FastaIO import FastaIterator, as_fasta
from Bio.Align.Applications import MuscleCommandline
class IndexTensorDataset:
"""
Identical to torch.utils.data.Dataset.TensorDataset, but __getitem__
also returns indices as last value in tuple
"""
def __init__(self, *tensors):
assert all(tensors[0].size(0) == tensor.size(0) for tensor in tensors)
self.tensors = tensors
def __getitem__(self, index):
t = [tensor[index] for tensor in self.tensors]
t.append(index)
return(tuple(t))
def __len__(self):
return self.tensors[0].size(0)
class GeneDataset:
"""
Container object that provides access to the PyTorch Dataset and
Dataloader objects needed for one experiment
"""
def __init__(self, data_file, batch_size, test_split, shuffle_dataset,
random_seed, validation_split=0):
# Load tensor data
data = torch.load(data_file)
dataset = IndexTensorDataset(data['X'], data['y'])
# Test / train split
dataset_size = len(dataset)
indices = list(range(dataset_size))
split = int(np.floor(test_split * dataset_size))
if shuffle_dataset:
np.random.seed(random_seed)
np.random.shuffle(indices)
train_indices, test_indices = indices[split:], indices[:split]
# Initialize Dataloaders
train_sampler = data_utils.SubsetRandomSampler(train_indices)
test_sampler = data_utils.SubsetRandomSampler(test_indices)
self.train_loader = data_utils.DataLoader(dataset,
batch_size=batch_size,
sampler=train_sampler)
self.test_loader = data_utils.DataLoader(dataset,
batch_size=batch_size,
sampler=test_sampler)
self.isolates = data['isolates']
def transform(input, output):
"""Snakemake function
Split and transform input data
"""
genesdf = pd.read_csv(input[1], index_col=0, header=0)
metadf = pd.read_csv(input[0])
all_isolates = metadf["Isolate"].to_numpy('U')
encoding = {
'S': 0,
'I': 0.5,
'R': 1
}
pattern = re.compile("(\w{3}).pt$")
for f in output:
m = pattern.match(f, len(f)-6)
d = m.group(1)
# print(d)
y = metadf[d]
omit = pd.isnull(y)
isolates = all_isolates[~omit]
y = y.loc[~omit]
X = genesdf.loc[isolates].to_numpy()
ylabels = np.array([ encoding[v] for v in y ])
# print(ylabels.shape)
# print(X.shape)
# print(isolates.shape)
# print(isolates[0])
# print(isolates.dtype)
y_tensor = torch.from_numpy(ylabels)
X_tensor = torch.from_numpy(X)
torch.save({'y': y_tensor, 'X': X_tensor, 'isolates': isolates}, f)
def align(fh, transl=True):
"""
Translate and align pangenome cluster fasta file
"""
align_exe = MuscleCommandline(
r'C:\Users\matthewwhiteside\workspace\b_ecoli\muscle\muscle3.8.31_i86win32.exe',
clwstrict=True)
# Align on stdin/stdout
proc = subprocess.Popen(str(align_exe),
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
universal_newlines=True,
shell=False)
sequences = FastaIterator(fh)
inp = [ ">"+record.id+"\n"+str(record.translate(table="Bacterial").seq)+"\n" for record in sequences ]
inp = "".join(inp)
align, err = proc.communicate(input=inp)
return(align)
def decompress(zipf, transl=True):
"""
Decompress gzipped fasta files in zip archive
"""
with zipfile.ZipFile(zipf, "r") as zh:
i = 0
for z in zh.infolist():
if not z.is_dir():
print(z.filename)
gz = zh.read(z.filename)
fh = io.BytesIO(gz)
with gzip.open(fh, 'rb') as gz:
fn = gz.read()
yield fn.decode('utf-8')
if __name__ == "__main__":
for fn in decompress("data/raw/ecoli/pan_genome_sequences.zip"):
with io.StringIO(fn) as ifh:
with open('data/tmp/test.aln', 'w') as ofh:
ofh.write(align(ifh))
break
| 2.375 | 2 |
Step/Custom/Jenkins.py | TheNexusAvenger/Kubuntu-Helper-Scripts | 0 | 12795845 | <reponame>TheNexusAvenger/Kubuntu-Helper-Scripts
"""
TheNexusAvenger
Installs Jenkins and a separate Java version.
"""
import os
import shutil
from Step.Standard.DownloadArchive import downloadArchive
from Step.Standard.DownloadFile import getDownloadLocation
from Step.Standard.RunProcess import runProcess
def installJenkins():
# Install Jenkins.
if not os.path.exists("/etc/init.d/jenkins"):
runProcess(["apt", "install", "-y", "curl"])
os.system("curl -fsSL https://pkg.jenkins.io/debian-stable/jenkins.io.key | sudo tee /usr/share/keyrings/jenkins-keyring.asc > /dev/null")
os.system("echo deb [signed-by=/usr/share/keyrings/jenkins-keyring.asc] https://pkg.jenkins.io/debian-stable binary/ | sudo tee /etc/apt/sources.list.d/jenkins.list > /dev/null")
runProcess(["apt", "update"])
try:
runProcess(["apt", "install", "-y", "jenkins"])
except Exception:
# Ignore exceptions that are likely due to the wrong java version being used.
pass
# Set up GraalVM.
# Jenkins requires Java 11, while other programs require newer versions.
if not os.path.exists("/usr/lib/jvm/java-11-graal"):
downloadArchive("https://github.com/graalvm/graalvm-ce-builds/releases/download/vm-21.3.0/graalvm-ce-java11-linux-amd64-21.3.0.tar.gz", "tar.gz", "graalvm-11")
graalDownloadLocation = getDownloadLocation("graalvm-11")
graalDownloadLocation = os.path.join(graalDownloadLocation, os.listdir(graalDownloadLocation)[0])
shutil.copytree(graalDownloadLocation, "/usr/lib/jvm/java-11-graal")
# Add GraalVM to the Jenkins service file path and restart it.
with open("/etc/init.d/jenkins") as file:
jenkinsService = file.read()
if "/usr/lib/jvm/java-11-graal" not in jenkinsService:
# Modify the file.
jenkinsService = jenkinsService.replace("PATH=/bin:", "PATH=/usr/lib/jvm/java-11-graal/bin:/bin:")
with open("/etc/init.d/jenkins", "w") as file:
file.write(jenkinsService)
# Restart the service.
runProcess(["systemctl", "daemon-reload"])
runProcess(["systemctl", "restart", "jenkins"]) | 2.140625 | 2 |
flt/model.py | davegreenwood/face-landmark-tool | 4 | 12795846 | """construct landmark models """
import json
def read_json(fname):
with open(fname) as fid:
data = json.load(fid)
return data
def write_json(model, fname):
with open(fname, "w") as fid:
json.dump(model, fid)
index = dict(
jaw=[0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16],
left_brow=[17, 18, 19, 20, 21],
right_brow=[22, 23, 24, 25, 26],
nose_bridge=[27, 28, 29, 30],
nose_lower=[31, 32, 33, 34, 35],
left_eye=[36, 37, 38, 39, 40, 41],
right_eye=[42, 43, 44, 45, 46, 47],
mouth_outer=[48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59],
mouth_inner=[60, 61, 62, 63, 64, 65, 66, 67],
left_pupil=[68],
right_pupil=[69])
keys = ["jaw", "left_brow", "right_brow", "nose_bridge", "nose_lower",
"left_eye", "right_eye", "mouth_outer", "mouth_inner"]
pos = [
# jaw
[219., 287.], [220., 334.], [227., 381.], [237., 428.], [254., 471.],
[284., 511.], [317., 542.], [357., 568.], [400., 576.], [444., 566.],
[482., 540.], [518., 507.], [546., 467.], [564., 424.], [571., 376.],
[576., 329.], [576., 281.],
# left_brow
[254., 252.], [274., 233.], [304., 227.], [335., 232.], [363., 244.],
# right_brow
[422., 241.], [450., 229.], [482., 223.], [512., 228.], [535., 245.],
# nose_bridge
[394., 277.], [394., 309.], [394., 341.], [395., 371.], [360., 392.],
# nose_lower
[377., 399.], [396., 404.], [414., 398.], [430., 391.],
# left_eye
[288., 283.], [307., 271.], [330., 271.], [348., 285.],
[329., 290.], [306., 290.],
# right_eye
[442., 283.], [459., 270.], [482., 269.], [501., 279.7],
[484., 287.], [462., 288.],
# mouth_outer
[328., 448.], [353., 437.], [378., 434.], [396., 438.], [416., 432.],
[442., 437.], [468., 446.], [444., 472.], [419., 484.], [398., 488.],
[379., 486.], [353., 475.],
# mouth_inner
[340., 451.], [378., 448.], [397., 450.], [415., 448.], [457., 449.],
[417., 462.], [397., 463.], [377., 460.],
# left_pupil
[319., 278.],
# right_pupil
[474., 277.]
]
model = dict(pos=pos, index=index, keys=keys)
if __name__ == "__main__":
write_json(model, "model.json")
| 2.859375 | 3 |
src/apps/calendar/models.py | creimers/graphene-advent | 0 | 12795847 | from django.db import models
import datetime
from easy_thumbnails.files import get_thumbnailer
from filer.fields.image import FilerImageField
import shortuuid
class Calendar(models.Model):
name = models.CharField(max_length=250)
uuid = models.CharField(max_length=22)
YEAR_CHOICES = [(r, r) for r in range(1984, datetime.date.today().year+1)]
year = models.IntegerField(
null=True, max_length=4, choices=YEAR_CHOICES,
default=datetime.datetime.now().year
)
def create_uuid(self):
return shortuuid.uuid()
def save(self, *args, **kwargs):
if not self.uuid:
self.uuid = self.create_uuid()
super(Calendar, self).save(*args, **kwargs)
def existing_days(self):
return self.days.all().count()
def __str__(self):
return self.name
class Day(models.Model):
class Meta:
unique_together = (('day', 'calendar'))
ordering = ['day', ]
calendar = models.ForeignKey(Calendar, related_name="days")
DAY_CHOICES = lambda x: [(i, '_' + str(i) + '_') for i in range(1, x + 1)]
day = models.IntegerField(choices=DAY_CHOICES(24))
image_source = models.URLField(blank=True)
original_image = FilerImageField(null=True)
def get_image_small_url(self):
# TODO: get these from the field
height = 400
width = 400
return get_thumbnailer(self.original_image.file).get_thumbnail({
'size': (width, height),
'crop': True,
'upscale': True,
'detail': True,
'subject_location': self.original_image.subject_location
}).url
def get_image_large_url(self):
# TODO: get these from the field
height = 1200
width = 1200
return get_thumbnailer(self.original_image.file).get_thumbnail({
'size': (width, height),
'crop': True,
'upscale': True,
'detail': True,
'subject_location': self.original_image.subject_location
}).url
def __str__(self):
return ' '.join([self.calendar.name, str(self.day)])
| 2.171875 | 2 |
gwent/common/Zone.py | shinoi2/gwent | 0 | 12795848 | <gh_stars>0
#!/usr/bin/python3
HAND = 0
HERO = 1
FIELD = 2
DECK = 3
GRAVEYARD = 4
REMOVED = 5
| 1.273438 | 1 |
src/Problem_7.py | BenjaminHb/WHU_CS_WebLearn | 0 | 12795849 | <filename>src/Problem_7.py
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# File Name: Problem_7.py
# Project Name: WebLearn
# Author: <NAME>
# Created Time: 2019-01-13 02:04
# Version: 0.0.1.20190113
#
# Copyright (c) <NAME> 2019
# All rights reserved.
#
if __name__ == '__main__':
while True:
try:
animal_no = int(input())
feed_time = []
total_time = 0
for i in range(8):
feed_time.append(list(map(lambda x: int(x), input().split(' '))))
for i in range(animal_no):
for j in range(7):
if feed_time[j][i] < feed_time[j + 1][i]:
temp = feed_time[j][i]
feed_time[j][i] = feed_time[j + 1][i]
feed_time[j + 1][i] = temp
total_time += feed_time[7][i]
print(total_time)
except:
break
| 3.078125 | 3 |
app/ctr/__init__.py | ihong9059/flasky | 0 | 12795850 | from flask import Blueprint
ctr = Blueprint('ctr', __name__)
from . import views
| 1.25 | 1 |
Subsets and Splits