id
stringlengths 1
7
| text
stringlengths 6
1.03M
| dataset_id
stringclasses 1
value |
---|---|---|
85243
|
<reponame>MadeInHaus/django-social
from django.contrib import admin
from ..models import (FacebookAccount, FacebookMessage, FacebookSearch,
TwitterAccount, TwitterMessage, TwitterSearch,
RSSAccount, RSSMessage, Message,
InstagramAccount, InstagramSearch, InstagramMessage,
TwitterSetting, FacebookSetting, InstagramSetting,
FacebookPublicAccount, TwitterPublicAccount, InstagramPublicAccount,
RSSSetting)
from .models import (SingletonAdmin, MessageAdmin, FacebookAccountAdmin, FacebookSearchAdmin,
FacebookMessageAdmin, TwitterAccountAdmin, TwitterPublicAccountAdmin,
TwitterMessageAdmin, TwitterSearchAdmin,
InstagramAccountAdmin, InstagramPublicAccountAdmin, InstagramSearchAdmin,
InstagramMessageAdmin, RSSAccountAdmin, RSSMessageAdmin,)
admin.site.register(Message, MessageAdmin)
admin.site.register(FacebookSetting, SingletonAdmin)
admin.site.register(FacebookAccount, FacebookAccountAdmin)
admin.site.register(FacebookPublicAccount, FacebookAccountAdmin)
admin.site.register(FacebookMessage, FacebookMessageAdmin)
admin.site.register(FacebookSearch, FacebookSearchAdmin)
admin.site.register(TwitterSetting, SingletonAdmin)
admin.site.register(TwitterAccount, TwitterAccountAdmin)
admin.site.register(TwitterPublicAccount, TwitterPublicAccountAdmin)
admin.site.register(TwitterMessage, TwitterMessageAdmin)
admin.site.register(TwitterSearch, TwitterSearchAdmin)
admin.site.register(InstagramSetting, SingletonAdmin)
admin.site.register(InstagramAccount, InstagramAccountAdmin)
admin.site.register(InstagramPublicAccount, InstagramPublicAccountAdmin)
admin.site.register(InstagramSearch, InstagramSearchAdmin)
admin.site.register(InstagramMessage, InstagramMessageAdmin)
admin.site.register(RSSSetting, SingletonAdmin)
admin.site.register(RSSAccount, RSSAccountAdmin)
admin.site.register(RSSMessage, RSSMessageAdmin)
|
StarcoderdataPython
|
1721102
|
from koala.sequence_id import SequenceId
_request_id = SequenceId()
_reentrant_id = SequenceId()
def set_request_id_seed(seed: int):
_request_id.set_seed(seed)
def new_request_id() -> int:
return _request_id.new_id()
def set_reentrant_id_seed(seed: int):
_reentrant_id.set_seed(seed)
def new_reentrant_id() -> int:
return _reentrant_id.new_id()
|
StarcoderdataPython
|
79101
|
import warnings
from random import randint
from unittest import TestCase
from .models import (
ColumnFamilyTestModel,
ColumnFamilyIndexedTestModel,
ClusterPrimaryKeyModel,
ForeignPartitionKeyModel,
DictFieldModel
)
from .util import (
connect_db,
destroy_db,
create_model
)
class ColumnFamilyModelTestCase(TestCase):
def setUp(self):
self.connection = connect_db()
self.cached_rows = {}
'''
Let's create some simple data.
'''
create_model(
self.connection,
ColumnFamilyTestModel
)
field_names = [
field.name if field.get_internal_type() != 'AutoField' else None
for field in ColumnFamilyTestModel._meta.fields
]
field_values = ['foo', 'bar', 'raw', 'awk', 'lik', 'sik', 'dik', 'doc']
self.total_rows = 100
value_index = 0
for x in xrange(self.total_rows):
test_data = {}
for name in field_names:
if not name:
continue
test_data[name] = field_values[value_index % len(field_values)]
value_index += 1
test_data['field_1'] = test_data['field_1'] + str(
randint(1000, 9999)
)
if test_data['field_1'] in self.cached_rows.keys():
continue
created_instance = ColumnFamilyTestModel.objects.create(
**test_data
)
self.cached_rows[created_instance.pk] = created_instance
self.created_instances = len(self.cached_rows)
import django
django.setup()
def tearDown(self):
destroy_db(self.connection)
def test_token_partition_key_field_value_to_string(self):
first_instance = ColumnFamilyTestModel.objects.all()[:1][0]
token_field, _, _, _ = ColumnFamilyTestModel._meta.get_field_by_name(
'pk_token'
)
result = token_field.value_to_string(first_instance)
self.assertIsNotNone(result)
class ColumnFamilyTestIndexedQueriesTestCase(TestCase):
def setUp(self):
self.connection = connect_db()
self.cached_rows = {}
'''
Let's create some simple data.
'''
create_model(
self.connection,
ColumnFamilyIndexedTestModel
)
field_names = [
field.name if field.get_internal_type() != 'AutoField' else None
for field in ColumnFamilyIndexedTestModel._meta.fields
]
field_values = [
'foo',
'bar',
'raw',
'awk',
'lik',
'sik',
'dik',
'doc',
'dab'
]
high_cardinality_field_values = ['yes', 'no']
self.total_rows = 400
value_index = 0
for x in xrange(self.total_rows):
test_data = {}
for name in field_names:
if not name:
continue
test_data[name] = field_values[value_index % len(field_values)]
test_data['field_4'] = (
high_cardinality_field_values[
value_index % len(
high_cardinality_field_values
)
]
)
value_index += 1
test_data['field_1'] = test_data['field_1'] + str(
randint(1000, 9999)
)
if test_data['field_1'] in self.cached_rows.keys():
continue
created_instance = ColumnFamilyIndexedTestModel.objects.create(
**test_data
)
self.cached_rows[created_instance.pk] = created_instance
self.created_instances = len(self.cached_rows)
import django
django.setup()
def tearDown(self):
destroy_db(self.connection)
def test_partial_inefficient_get_query(self):
all_results = ColumnFamilyIndexedTestModel.objects.all()
all_results = [x for x in all_results]
last_result = all_results[-1]
last_result.field_3 = 'tool'
last_result_indexed_value = last_result.field_4
last_result.save()
partial_inefficient_get = (
ColumnFamilyIndexedTestModel.objects.get(
field_3='tool',
field_4=last_result_indexed_value
)
)
self.assertIsNotNone(partial_inefficient_get)
self.assertTrue(partial_inefficient_get.pk in self.cached_rows.keys())
class ForeignPartitionKeyModelTestCase(TestCase):
def setUp(self):
import django
django.setup()
self.connection = connect_db()
create_model(
self.connection,
ClusterPrimaryKeyModel
)
create_model(
self.connection,
ForeignPartitionKeyModel
)
def tearDown(self):
destroy_db(self.connection)
def test_order_by_efficient(self):
rel_instance = ClusterPrimaryKeyModel()
rel_instance.auto_populate()
rel_instance.save()
instances = []
for i in xrange(10):
instances.append(ForeignPartitionKeyModel.objects.create(
related=rel_instance
))
with warnings.catch_warnings(record=True) as w:
ordered_query = ForeignPartitionKeyModel.objects.filter(
related=rel_instance
).order_by('-created')
results = list(ordered_query)
self.assertEqual(
0,
len(w)
)
self.assertEqual(
10,
len(results)
)
for i in instances:
i.delete()
all_instances = ForeignPartitionKeyModel.objects.all()
self.assertEqual(
0,
len(all_instances)
)
class TestDictFieldModel(TestCase):
def setUp(self):
import django
django.setup()
self.connection = connect_db()
create_model(
self.connection,
DictFieldModel
)
def tearDown(self):
destroy_db(self.connection)
def test_creation(self):
instance = DictFieldModel.objects.create(
parameters={'key0': 'value0', 'key1': 'value1'}
)
self.assertIsNotNone(instance)
|
StarcoderdataPython
|
3230009
|
import time
#start = time.perf_counter()
import tensorflow as tf
import argparse
import pickle
import os
from model import Model
from utils import build_dict, build_train_dataset, batch_iter
# Uncomment next 2 lines to suppress error and Tensorflow info verbosity. Or change logging levels
# tf.logging.set_verbosity(tf.logging.FATAL)
# os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'
def add_arguments(parser):
parser.add_argument("--num_hidden", type=int, default=150, help="Network size.")
parser.add_argument("--num_layers", type=int, default=2, help="Network depth.")
parser.add_argument("--beam_width", type=int, default=1, help="Beam width for beam search decoder.")
parser.add_argument("--glove", action="store_true", help="Use glove as initial word embedding.")
parser.add_argument("--embedding_size", type=int, default=300, help="Word embedding size.")
parser.add_argument("--learning_rate", type=float, default=1e-3, help="Learning rate.")
parser.add_argument("--batch_size", type=int, default=64, help="Batch size.")
parser.add_argument("--num_epochs", type=int, default=20, help="Number of epochs.")
parser.add_argument("--keep_prob", type=float, default=0.8, help="Dropout keep prob.")
parser.add_argument("--toy", action="store_true", help="Use only 50K samples of data")
parser.add_argument("--use_atten", action="store_true", help="Use only 50K samples of data")
parser.add_argument("--with_model", action="store_true", help="Continue from previously saved model")
parser = argparse.ArgumentParser()
add_arguments(parser)
args = parser.parse_args()
with open("args.pickle", "wb") as f:
pickle.dump(args, f)
if not os.path.exists("saved_model"):
os.mkdir("saved_model")
else:
if args.with_model:
old_model_checkpoint_path = open('saved_model/checkpoint', 'r')
old_model_checkpoint_path = "".join(["saved_model/", old_model_checkpoint_path.read().splitlines()[0].split('"')[1] ])
print("Building dictionary...")
word_dict, reversed_dict, article_max_len, summary_max_len = build_dict("train", args.toy)
print("Loading training dataset...")
train_x, train_y = build_train_dataset(word_dict, article_max_len, summary_max_len)
with tf.Session() as sess:
model = Model(reversed_dict, article_max_len, summary_max_len, args)
if args.use_atten:
print ("Using Attention")
else:
print ("Not Using Attention")
sess.run(tf.global_variables_initializer())
saver = tf.train.Saver(tf.global_variables())
if 'old_model_checkpoint_path' in globals():
print("Continuing from previous trained model:" , old_model_checkpoint_path , "...")
saver.restore(sess, old_model_checkpoint_path )
batches = batch_iter(train_x, train_y, args.batch_size, args.num_epochs)
num_batches_per_epoch = (len(train_x) - 1) // args.batch_size + 1
print("\nIteration starts.")
print("Number of batches per epoch :", num_batches_per_epoch)
for batch_x, batch_y in batches:
batch_x_len = list(map(lambda x: len([y for y in x if y != 0]), batch_x))
batch_decoder_input = list(map(lambda x: [word_dict["<s>"]] + list(x), batch_y))
batch_decoder_len = list(map(lambda x: len([y for y in x if y != 0]), batch_decoder_input))
batch_decoder_output = list(map(lambda x: list(x) + [word_dict["</s>"]], batch_y))
batch_decoder_input = list(
map(lambda d: d + (summary_max_len - len(d)) * [word_dict["<padding>"]], batch_decoder_input))
batch_decoder_output = list(
map(lambda d: d + (summary_max_len - len(d)) * [word_dict["<padding>"]], batch_decoder_output))
train_feed_dict = {
model.batch_size: len(batch_x),
model.X: batch_x,
model.X_len: batch_x_len,
model.decoder_input: batch_decoder_input,
model.decoder_len: batch_decoder_len,
model.decoder_target: batch_decoder_output
}
_, step, loss = sess.run([model.update, model.global_step, model.loss], feed_dict=train_feed_dict)
if step % 1000 == 0:
print("step {0}: loss = {1}".format(step, loss))
if step % num_batches_per_epoch == 0:
#hours, rem = divmod(time.perf_counter() - start, 3600)
#minutes, seconds = divmod(rem, 60)
saver.save(sess, "./saved_model/model.ckpt", global_step=step)
print(" Epoch {0}: Model is saved.".format(step // num_batches_per_epoch))
#"Elapsed: {:0>2}:{:0>2}:{:05.2f}".format(int(hours),int(minutes),seconds) , "\n")
|
StarcoderdataPython
|
1678825
|
# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License
from .common import DistributedOperatorImplContainer
from .common import DistributedOperatorImpl
from .common import register_distributed_operator_impl_container
from .common import register_distributed_operator_impl
from .common import is_elementwise_op
from ..utils import is_dim_shard
from ..utils import is_dim_replicate
from ..utils import is_valid_list_index
from ..utils import compute_compatible_dim_mapping
from ..utils import compute_compatible_dims_mapping
from ..utils import compute_compatible_and_update_dim_mapping
from ..dist_attribute import OperatorDistributedAttribute
from paddle.fluid import core, unique_name
from paddle.fluid.framework import _non_static_mode
from paddle.fluid.framework import Program, Parameter, Variable, program_guard
from paddle.fluid.data_feeder import check_variable_and_dtype, check_dtype
from paddle.distributed.fleet.meta_optimizers.common import OpRole, OP_ROLE_KEY, OP_ROLE_VAR_KEY
from ..process_group import new_process_group
from ..utils import _get_comm_group, _get_corresponding_rank
from .dist_default import DistributedDefaultImpl0
class DistributedElementwise(DistributedOperatorImplContainer):
def __init__(self, op_type):
super(DistributedElementwise, self).__init__(op_type)
register_distributed_operator_impl_container(
DistributedElementwise("elementwise"))
# Replicated Elementwise
class DistributedElementwiseImpl0(DistributedOperatorImpl):
def __init__(self, name):
super(DistributedElementwiseImpl0, self).__init__(name)
self._forward_implemented = False
self._backward_implemented = False
def is_input_compatible(self, dist_op):
op_desc = dist_op.serial_op.desc
if is_elementwise_op(op_desc.type()):
return True
else:
return False
def is_output_compatible(self, dist_op):
op_desc = dist_op.serial_op.desc
op_desc = dist_op.serial_op.desc
if is_elementwise_op(op_desc.type()):
return True
else:
return False
def is_auto_compatible(self, dist_op):
op_desc = dist_op.serial_op.desc
op_dist_attr = dist_op.dist_attr
dims_mapping_list = []
input_arg_names = op_desc.input_arg_names()
max_dims_mapping_len = -1
for arg_name in input_arg_names:
dims_mapping = op_dist_attr.get_input_dims_mapping(arg_name)
if max_dims_mapping_len < len(dims_mapping):
max_dims_mapping_len = len(dims_mapping)
dims_mapping_list.append(dims_mapping)
output_arg_names = op_desc.output_arg_names()
for arg_name in output_arg_names:
dims_mapping = op_dist_attr.get_output_dims_mapping(arg_name)
assert len(dims_mapping) == max_dims_mapping_len
dims_mapping_list.append(dims_mapping)
for idx in range(max_dims_mapping_len):
dim_mappings = []
for dims_mapping in dims_mapping_list:
if idx < len(dims_mapping):
dim_mappings.append(dims_mapping[-(idx + 1)])
if not all(dim_mappings[0] == dim_mapping
for dim_mapping in dim_mappings):
return False
return True
def update_dims_mapping(self, dist_op):
changed = False
op_desc = dist_op.serial_op.desc
op_dist_attr = dist_op.dist_attr
input_arg_names = op_desc.input_arg_names()
input_dims_mapping_dict = {}
input_dims_mapping_lens = {}
max_dims_mapping_len = -1
for arg_name in input_arg_names:
dims_mapping = op_dist_attr.get_input_dims_mapping(arg_name)
if max_dims_mapping_len < len(dims_mapping):
max_dims_mapping_len = len(dims_mapping)
input_dims_mapping_dict[arg_name] = dims_mapping
input_dims_mapping_lens[arg_name] = len(dims_mapping)
dims_mapping_list = []
for arg_name in input_arg_names:
if input_dims_mapping_lens[arg_name] < max_dims_mapping_len:
new_dims_mapping = [-1 for _ in range(max_dims_mapping_len)]
for i in range(input_dims_mapping_lens[arg_name]):
new_idx = (max_dims_mapping_len -
input_dims_mapping_lens[arg_name]) + i
new_dims_mapping[new_idx] = input_dims_mapping_dict[
arg_name][i]
dims_mapping_list.append(new_dims_mapping)
else:
dims_mapping_list.append(input_dims_mapping_dict[arg_name])
output_arg_names = op_desc.output_arg_names()
for arg_name in output_arg_names:
dims_mapping = op_dist_attr.get_output_dims_mapping(arg_name)
assert len(dims_mapping) == max_dims_mapping_len
dims_mapping_list.append(dims_mapping)
compatible_dims_mapping = compute_compatible_dims_mapping(
dims_mapping_list)
assert compatible_dims_mapping is not None, "There is no compatible dim mapping."
for arg_name in input_arg_names:
if input_dims_mapping_lens[arg_name] < max_dims_mapping_len:
new_dims_mapping = [
-1 for _ in range(input_dims_mapping_lens[arg_name])
]
for i in range(input_dims_mapping_lens[arg_name]):
new_idx = (max_dims_mapping_len -
input_dims_mapping_lens[arg_name]) + i
new_dims_mapping[i] = compatible_dims_mapping[new_idx]
if new_dims_mapping != input_dims_mapping_dict[arg_name]:
op_dist_attr.set_input_dims_mapping(arg_name,
new_dims_mapping)
changed = True
else:
if compatible_dims_mapping != input_dims_mapping_dict[arg_name]:
op_dist_attr.set_input_dims_mapping(arg_name,
compatible_dims_mapping)
changed = True
for arg_name in output_arg_names:
dims_mapping = op_dist_attr.get_output_dims_mapping(arg_name)
if compatible_dims_mapping != dims_mapping:
op_dist_attr.set_output_dims_mapping(arg_name,
compatible_dims_mapping)
changed = True
return changed
@staticmethod
def forward(ctx, *args, **kwargs):
DistributedDefaultImpl0.forward(ctx, *args, **kwargs)
@staticmethod
def backward(ctx, *args, **kwargs):
DistributedDefaultImpl0.backward(ctx, *args, **kwargs)
register_distributed_operator_impl(
"elementwise", DistributedElementwiseImpl0("replicate_parallel"))
|
StarcoderdataPython
|
196446
|
from bplot import svgbase
from bplot import text
class Table(svgbase.Container):
def __init__(self, insert, size, matrix,
rows=None,
cols=None,
show_value=None,
show_rows=None,
show_cols=None,
value_pos='middle:center',
show_grid=None,
font_size=10,
cell_fill='none',
value_fill='black',
value_cmap=None,
cell_cmap=None,
**kwargs
):
self.matrix = matrix
self.rows = rows
self.cols = cols
self.show_value = show_value
self.value_pos = value_pos
self.show_rows = show_rows
self.show_cols = show_cols
self.show_grid = show_grid
self.font_size = font_size
self.value_fill = value_fill
self.cell_fill = cell_fill
self.value_cmap = value_cmap or []
self.cell_cmap = cell_cmap or []
self.ratio_x = ':'.join(['1']*len(self.matrix[0]))
self.ratio_y = ':'.join(['1']*len(self.matrix))
super(Table, self).__init__(insert, size,
**kwargs)
self._render()
if rows:
self._render_rows()
if cols:
self._render_cols()
def _render(self):
rows = self.split(split_ratio=(self.ratio_x, self.ratio_y))
for i, row in enumerate(rows):
for j, cell in enumerate(row):
if self.show_grid:
cell_stroke = 'black'
else:
cell_stroke = 'white'
if self.cell_cmap:
cell_fill = self.cell_cmap[i][j]
else:
cell_fill = self.cell_fill
rect = svgbase.Rect((0, 0), cell.size,
fill=cell_fill, stroke=cell_stroke)
cell.svgobj.add(rect.svgobj)
cell.rect = rect
v = self.matrix[i][j]
if self.value_cmap:
value_fill = self.value_cmap[i][j]
else:
value_fill = self.value_fill
if self.show_value:
self._set_label(cell, v,
show_value=self.show_value,
value_pos=self.value_pos,
fill=value_fill)
self.svgobj.add(cell.svgobj)
def _render_rows(self):
width = 10
for row in self.rows:
w = text.measure_text(''.join(str(x) for x in row), self.font_size) + 2*len(row)
if w > width:
width = w
x, y = self.insert
_, height = self.size
insert = x - width, y
size = width, height
container = Table(insert, size, self.rows,
show_value=self.show_rows,
value_pos='iright:center',
font_size=self.font_size,
)
self.trows = container
def _render_cols(self):
height = self.font_size*2
x, y = self.insert
width, _ = self.size
insert = x, y - height
size = width, height
container = Table(insert, size, self.cols,
show_value=self.show_cols,
value_pos='middle:ibottom',
font_size=self.font_size,
)
self.tcols = container
def _set_label(self, cell, v,
show_value=None,
value_pos='',
fill='black',
):
try:
v = round(v, 2)
except Exception:
pass
if show_value == 'latex':
name = v.replace('\\', '').replace('{', '').replace('}', '')
size = cell.size[0]*2/3, cell.size[1]*2/3
label = text.LatexText(v, name, (0, 0), size,
font_size=self.font_size, fill='black',
anchor_object=cell.rect, anchor_pos=value_pos,
)
cell.svgobj.add(label.svgobj)
return
if show_value == 'sign':
if v > 0:
v = '+'
elif v < 0:
v = '-'
else:
v = '0'
label = svgbase.Text(v,
anchor_object=cell.rect,
anchor_pos=value_pos,
fill=fill,
font_size=self.font_size,
)
cell.svgobj.add(label.svgobj)
|
StarcoderdataPython
|
1682123
|
<reponame>grg121/headmouse<filename>camera_opencv.py
import sys
import keyboard
import cv2
import pyautogui
from base_camera import BaseCamera
import numpy as np
# multiple cascades: https://github.com/Itseez/opencv/tree/master/data/haarcascades
face_cascade = cv2.CascadeClassifier('haarcascade_frontalface_default.xml')
eye_cascade = cv2.CascadeClassifier('haarcascade_eye.xml')
smile_cascade = cv2.CascadeClassifier('haarcascade_smile.xml')
pyautogui.FAILSAFE = False
class Camera(BaseCamera):
video_source = 0
@staticmethod
def set_video_source(source):
Camera.video_source = source
@staticmethod
def frames():
def ClickDown(img,color):
cv2.rectangle(img,(0,0),(img.shape[1],img.shape[0]),color,15) # average
def Draw(obj, img, color):
x, y, w, h = obj
cv2.rectangle(img,(x,y),(x+w,y+h),color,2) # average
def CenterOf(obj):
x, y, w, h = obj
px = int(x+w/2)
py = int(y+h/2)
return px,py
camera = cv2.VideoCapture(Camera.video_source)
if not camera.isOpened():
raise RuntimeError('Could not start camera.')
_, img = camera.read()
shape = img.shape
center_x = int(shape[1]/2)
center_y = int(shape[0]/2)
x = y = 0
rate = 15
sensibility = 5
face_buffer = []
while len(face_buffer) < rate:
face_buffer.append([0,0,0,0])
while True:
# read current frame
_, img = camera.read()
img = cv2.flip( img, 1 )
grayscale = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
detected_faces = face_cascade.detectMultiScale(grayscale, 1.4, 5)
if len(detected_faces) > 0:
current_face = max(detected_faces, key=lambda x:x[2]*x[3]) # bigger face detected
face_buffer.append(current_face)
# keep buffer size to rate
del face_buffer[0]
Draw(current_face, img, (0,255,255))
x,y,w,h = current_face
roi_gray = grayscale[y+int(h/2):y+h, x:x+w]
roi_color = img[y+int(h/2):y+h, x:x+w]
average_face = np.mean(face_buffer, axis=0) # get average parameters of buffer faces
# x,y,w,h
Draw(average_face.astype('int'), img, (0,0,255))
px,py = CenterOf(average_face)
h, w,_ = img.shape
cv2.line(img,(int(w/2),0),(int(w/2),h),(255,123,0),2)
cv2.line(img,(0,int(h/2)),(w,int(h/2)),(255,123,0),2)
cv2.circle(img,(px,py), 10, (0,255,255), -1)
desp_x = int((center_x-px)/sensibility)
desp_y = int((center_y-py)/sensibility)
font = cv2.FONT_HERSHEY_SIMPLEX
# pyautogui.moveRel(-desp_x, -desp_y, duration=0)
vel = 10
if desp_x > 0:
desp_x = vel
if desp_x < 0:
desp_x = -vel
pyautogui.moveRel(-desp_x, 0 , duration=0)
zoom = 1
zoom_limit = 5
if desp_y > zoom_limit :
pyautogui.scroll(zoom)
if desp_y < -zoom_limit :
pyautogui.scroll(-zoom)
# encode as a jpeg image and return it
yield cv2.imencode('.jpg', img)[1].tobytes()
|
StarcoderdataPython
|
1717821
|
import vdomr as vd
from .viewcontainer import ViewContainer
from .forestviewcontrolpanel import ForestViewControlPanel
import uuid
import multiprocessing
import traceback
import sys
import time
from mountaintools import client as mt
# MEDIUM TODO move tabs between north/south containers
# MEDIUM TODO cross-correlograms widget
class ForestViewMainWindow(vd.Component):
def __init__(self, context):
vd.Component.__init__(self)
self._context = context
self._control_panel = ForestViewControlPanel(self._context)
self._view_container_north = ViewContainer()
self._view_container_south = ViewContainer()
self._view_containers = [self._view_container_north, self._view_container_south]
self._control_panel.onLaunchView(self._trigger_launch_view)
self._current_view_container = self._view_container_north
self._view_container_north.onClick(self._on_click_north)
self._view_container_south.onClick(self._on_click_south)
# style0 = dict(border='solid 1px gray')
self._container_CP = Container(self._control_panel, scroll=True)
self._container_VCN = Container(self._view_container_north)
self._container_VCS = Container(self._view_container_south)
self._container_main = Container(self._container_CP, self._container_VCN, self._container_VCS, position_mode='relative')
self._highlight_view_containers()
self._size = (1200, 800)
vd.devel.loadBootstrap()
self._update_sizes()
def setSize(self, size):
self._size = size
self._update_sizes()
def size(self):
return self._size
def _update_sizes(self):
width = self._size[0]
# width1 = int(min(300, width*0.3))
width1 = 320
width2 = width - width1 - 30
height = self._size[1]
height1 = int(height / 2) - 5
height2 = height - height1 - 30
self._container_main.setSize((width, height))
self._container_main.setPosition((0, 0))
self._container_CP.setSize((width1, height - 20))
self._container_CP.setPosition((10, 10))
self._container_VCN.setSize((width2, height1))
self._container_VCN.setPosition((width1 + 20, 10))
self._container_VCS.setSize((width2, height2))
self._container_VCS.setPosition((width1 + 20, height1 + 20))
self._view_container_north.setSize((width2, height1))
self._view_container_south.setSize((width2, height2))
def _highlight_view_containers(self):
for VC in [self._view_container_north, self._view_container_south]:
VC.setHighlight(VC == self._current_view_container)
def _on_click_north(self):
self._current_view_container = self._view_container_north
self._highlight_view_containers()
def _on_click_south(self):
self._current_view_container = self._view_container_south
self._highlight_view_containers()
def _trigger_launch_view(self, view_launcher):
if not view_launcher.get('always_open_new', False):
for VC in self._view_containers:
v = VC.findView(name=view_launcher.get('name', ''))
if v:
VC.setCurrentView(v)
return
frame = ViewFrame(view_launcher=view_launcher)
self._current_view_container.addView(frame, name=view_launcher.get('name', ''))
frame.initialize()
def context(self):
return self._context
def render(self):
return self._container_main
class TextComponent(vd.Component):
def __init__(self):
vd.Component.__init__(self)
self._text = ''
def setText(self, txt):
if self._text == txt:
return
self._text = txt
self.refresh()
def render(self):
return vd.span(self._text)
class TitleBar(vd.Component):
def __init__(self):
vd.Component.__init__(self)
self._title = 'TitleBar'
self._height = 20
def setTitle(self, title0):
if title0 == self._title:
return
self._title = title0
self.refresh()
def height(self):
return self._height
def render(self):
return vd.div(self._title, style={'height': '{}px'.format(self._height), 'font-size': '14px'})
class ViewFrame(vd.Component):
def __init__(self, *, view_launcher):
vd.Component.__init__(self)
self._view_launcher = view_launcher
self._connection_to_prepare = None
self._prepare_log_text = ''
self._prepare_log_text_view = TextComponent()
self._title_bar = TitleBar()
self._view = None
self._size = (100, 100)
self._init_process = None
self.updateTitle()
def setSize(self, size):
self._size = size
self._update_view_size()
def size(self):
return self._size
def tabLabel(self):
if self._view:
return self._view.tabLabel()
else:
return self._view_launcher['label'] + '...'
def updateTitle(self):
if self._view:
if hasattr(self._view, 'title'):
title0 = self._view.title()
else:
title0 = ''
else:
title0 = 'Preparing: {} ...'.format(self._view_launcher['label'])
self._title_bar.setTitle(title0)
def initialize(self):
view_launcher = self._view_launcher
context = view_launcher['context']
opts = view_launcher['opts']
view_class = view_launcher['view_class']
if hasattr(view_class, 'prepareView'):
self._connection_to_prepare, connection_to_parent = multiprocessing.Pipe()
self._init_process = multiprocessing.Process(
target=_prepare_in_worker,
args=(view_class, context, opts, connection_to_parent)
)
self._init_process.start()
self._check_prepare_count = 0
vd.set_timeout(self._check_prepare, 0.1)
else:
if hasattr(context, 'initialize'):
context.initialize()
self._view = view_class(context=context, opts=opts)
self._update_view_size()
self.updateTitle()
self.refresh()
def cleanup(self):
if self._init_process:
print('# terminating init process')
self._init_process.terminate()
if self._view:
if hasattr(self._view, 'cleanup'):
(getattr(self._view, 'cleanup'))()
def render(self):
if self._view:
X = self._view
else:
X = vd.components.ScrollArea(
vd.div(
vd.h3('Preparing...'),
vd.pre(self._prepare_log_text_view)
),
height=self._size[1] - self._title_bar.height()
)
return vd.div(self._title_bar, X)
def _update_view_size(self):
size = self._size
if self._view:
self._view.setSize((size[0], size[1] - self._title_bar.height()))
def _check_prepare(self):
if not self._view:
if self._connection_to_prepare.poll():
msg = self._connection_to_prepare.recv()
if msg['name'] == 'log':
self._prepare_log_text = self._prepare_log_text + msg['text']
self._prepare_log_text_view.setText(self._prepare_log_text)
elif msg['name'] == 'result':
self._on_prepare_completed(msg['result'])
return
self._check_prepare_count = self._check_prepare_count + 1
if self._check_prepare_count < 3:
timeout = 0.2
elif self._check_prepare_count < 5:
timeout = 0.5
elif self._check_prepare_count < 10:
timeout = 1
else:
timeout = 5
vd.set_timeout(self._check_prepare, timeout)
def _on_prepare_completed(self, result):
self._init_process = None
view_launcher = self._view_launcher
context = view_launcher['context']
opts = view_launcher['opts']
view_class = view_launcher['view_class']
if hasattr(context, 'initialize'):
context.initialize()
self._view = view_class(context=context, opts=opts, prepare_result=result)
self._update_view_size()
self.refresh()
class StdoutSender():
def __init__(self, connection):
self._connection = connection
self._handler = _StdoutHandler(connection)
def __enter__(self):
self._old_stdout = sys.stdout
self._old_stderr = sys.stderr
self._handler.setOtherStdout(self._old_stdout)
sys.stdout = self._handler
sys.stderr = self._handler
return dict()
def __exit__(self, exc_type, exc_val, exc_tb):
self._handler.send()
sys.stdout = self._old_stdout
sys.stderr = self._old_stderr
class _StdoutHandler(object):
def __init__(self, connection):
self._connection = connection
self._text = ''
self._timer = time.time()
self._other_stdout = None
def write(self, data):
if self._other_stdout:
self._other_stdout.write(data)
self._text = self._text + str(data)
elapsed = time.time() - self._timer
if elapsed > 5:
self.send()
self._timer = time.time()
def flush(self):
if self._other_stdout:
self._other_stdout.flush()
def setOtherStdout(self, other_stdout):
self._other_stdout = other_stdout
def send(self):
if self._text:
self._connection.send(dict(name="log", text=self._text))
self._text = ''
def _prepare_in_worker(view_class, context, opts, connection_to_parent):
# mt.setDownloadFromConfig(download_from_config)
with StdoutSender(connection=connection_to_parent):
try:
print('***** Preparing...')
result0 = view_class.prepareView(context=context, opts=opts)
except:
traceback.print_exc()
raise
connection_to_parent.send(dict(
name='result',
result=result0
))
class Container(vd.Component):
def __init__(self, *args, position=(0, 0), size=(0, 0), position_mode='absolute', style=dict(), scroll=False):
vd.Component.__init__(self)
self._elmt_id = 'Container-' + str(uuid.uuid4())
self._children = list(args)
self._position = position
self._size = size
self._position_mode = position_mode
self._style = style
self._scroll = scroll
def setSize(self, size):
js = """
document.getElementById('{elmt_id}').style.width='{width}px';
document.getElementById('{elmt_id}').style.height='{height}px';
"""
js = js.replace('{elmt_id}', self._elmt_id)
js = js.replace('{width}', str(size[0]))
js = js.replace('{height}', str(size[1]))
self.executeJavascript(js)
def setPosition(self, position):
js = """
document.getElementById('{elmt_id}').style.left='{left}px';
document.getElementById('{elmt_id}').style.top='{top}px';
"""
js = js.replace('{elmt_id}', self._elmt_id)
js = js.replace('{left}', str(position[0]))
js = js.replace('{top}', str(position[1]))
self.executeJavascript(js)
def render(self):
style = self._style
style['position'] = self._position_mode
style['width'] = '{}px'.format(self._size[0])
style['height'] = '{}px'.format(self._size[1])
style['left'] = '{}px'.format(self._position[0])
style['top'] = '{}px'.format(self._position[1])
if self._scroll:
style['overflow'] = 'auto'
else:
style['overflow'] = 'hidden'
ret = vd.div(
self._children,
style=style,
id=self._elmt_id
)
# if self._scroll:
# ret = vd.components.ScrollArea(ret, height=self._size[1])
return ret
|
StarcoderdataPython
|
158281
|
import click
from click.testing import CliRunner
from cthulhu.bin.cli import main
def test_cthulhu():
runner = CliRunner()
results = runner.invoke(main, ['--help'])
assert results.exit_code == 0
assert "Create a distributed test fixture for use on a Unix-like system." in results.output
|
StarcoderdataPython
|
172539
|
<reponame>mrh1997/headlock<gh_stars>1-10
"""
This is for headlock internal use only!
"""
from typing import Tuple, Callable
import abc
from collections.abc import ByteString
class MemoryManagementError(Exception):
"""
This exception occurs, when if memory allocation/release failed.
"""
class AddressSpace:
"""
An address space provides an interface to running C code at ABI (not API!)
layer.
This class is only the abstract base class. Descendants have to implement
the communication between this python process/machine and some specific
kind of "running code" (i.e. an OS user process, an OS kernel or an
embedded system running on a remote machine).
This class is not responsible to instanciate C code or stop it!
The the code has to be loaded somewhere else and a handle/ID has to be
passed to the constructor of the subclass.
"""
def __init__(self):
self.bridgepool = {}
def _register_memory_block(self, address, len):
pass
@abc.abstractmethod
def find_memory_block(self, address:int) -> Tuple[int, int]:
"""
returns the start address and the length of the memory block which
contains this address. raises ValueError if no containing memory block
exists
"""
@abc.abstractmethod
def read_memory(self, address:int, length:int) -> bytes:
"""
Reads a specific amount of Memory (in bytes) of the address space.
The caller has to ensure that the specified memory range is valid,
otherwise the connected process could crash
"""
@abc.abstractmethod
def write_memory(self, address:int, data:ByteString):
"""
Writes a specific amount of Memory (in bytes) to the address space.
The caller has to ensure that the specified memory range is valid,
otherwise the connected process could crash
"""
@abc.abstractmethod
def alloc_memory(self, length:int) -> int:
"""
Allocated length bytes of contiguous memory and returns a reference to
it.
"""
@abc.abstractmethod
def get_symbol_adr(self, symbol_name:str) -> int:
"""
returns the address of a specific symbol.
Symbol may be a global variable or a function.
"""
@abc.abstractmethod
def get_symbol_name(self, adr:int) -> str:
"""
returns the name of a symbol or raises ValueError is adr does not
refer to a valid C symbol
"""
@abc.abstractmethod
def invoke_c_func(self, func_adr:int, c_sig:str,
args_adr:int, retval_adr:int) -> bytes:
"""
invokes a piece of C code via the bridge for signature of name
"c_sig".
"""
@abc.abstractmethod
def create_c_callback(self, c_sig:str,
pyfunc:Callable[[int, int], None]) -> int:
"""
Creates a new C function pointer of signature 'c_sig'.
Everytime this function is called, the call is bridged and
forwarded to pyfunc.
Returns the address of the created C callback.
"""
@abc.abstractmethod
def close(self):
"""
Close the connection to the addressspace.
"""
|
StarcoderdataPython
|
1771310
|
<filename>cptm/utils/topics.py
import pandas as pd
def get_top_topic_words(topics, opinions, t, top=10):
"""Return dataframe containing top topics and opinions.
Parameters
t : str - index of topic number
top : int - the number of words to store in the dataframe
Returns Pandas DataFrame
The DataFrame contains top topic words, weights of topic words and for
each perspective opinion words and weigths of opinion words.
"""
t = str(t)
topic = topics[t].copy()
topic.sort(ascending=False)
topic = topic[0:top]
df_t = pd.DataFrame(topic)
df_t.reset_index(level=0, inplace=True)
df_t.columns = ['topic', 'weights_topic']
dfs = [df_t]
for p, o in opinions.iteritems():
opinion = o[t].copy()
opinion.sort(ascending=False)
opinion = opinion[0:top]
df_o = pd.DataFrame(opinion)
df_o.reset_index(level=0, inplace=True)
df_o.columns = ['{}'.format(p),
'weights_{}'.format(p)]
dfs.append(df_o)
return pd.concat(dfs, axis=1)
def topic_str(df, single_line=False, weights=False, opinions=True):
if opinions:
opinion_labels = [l for l in df.columns if not l.startswith('weights')]
else:
opinion_labels = [l for l in df.columns if l.startswith('topic')]
if not single_line:
if not weights:
return str(df[opinion_labels])
else:
return str(df)
else:
lines = []
if not weights:
for l in opinion_labels:
lines.append(u'{}:\t'.format(l)+' '.join(df[l]))
else:
for l in opinion_labels:
zipped = zip(df[l], df['weights_{}'.format(l)])
line = [u'{}*{:.4f}'.format(wo, we) for wo, we in zipped]
lines.append(' '.join([u'{}:\t'.format(l)]+line))
return u'\n'.join(lines)
|
StarcoderdataPython
|
4805275
|
from delira._debug_mode import get_current_debug_mode, switch_debug_mode, \
set_debug_mode
from delira._backends import get_backends, seed_all
from ._version import get_versions as _get_versions
import warnings
warnings.simplefilter('default', DeprecationWarning)
warnings.simplefilter('ignore', ImportWarning)
__version__ = _get_versions()['version']
del _get_versions
|
StarcoderdataPython
|
1621381
|
from .kitti import KittiDataset
from .nuscenes import NuScenesDataset
from .lyft import LyftDataset
dataset_factory = {
"KITTI": KittiDataset,
"NUSC": NuScenesDataset,
"LYFT": LyftDataset,
}
def get_dataset(dataset_name):
return dataset_factory[dataset_name]
|
StarcoderdataPython
|
1762932
|
<reponame>Mirmik/gxx
#!/usr/bin/env python3
#coding: utf-8
from licant.modules import submodule
from licant.cxx_modules import application
from licant.scripter import scriptq
import licant
scriptq.execute("../../../gxx.g.py")
application("target",
sources = ["main.cpp"],
mdepends = ["gxx"]
)
licant.ex(default = "target")
|
StarcoderdataPython
|
3220399
|
<gh_stars>0
# Software License Agreement (BSD License)
#
# Copyright (c) 2012, <NAME>, Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
# * Neither the name of <NAME>, Inc. nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
import sys
import traceback
from collections import deque
from threading import Condition, Thread
from time import time
""" Sits between incoming messages from a subscription, and the outgoing
publish method. Provides throttling / buffering capabilities.
When the parameters change, the handler may transition to a different kind
of handler
"""
class MessageHandler:
def __init__(self, previous_handler=None, publish=None):
if previous_handler:
self.last_publish = previous_handler.last_publish
self.throttle_rate = previous_handler.throttle_rate
self.queue_length = previous_handler.queue_length
self.publish = previous_handler.publish
else:
self.last_publish = 0
self.throttle_rate = 0
self.queue_length = 0
self.publish = publish
def set_throttle_rate(self, throttle_rate):
self.throttle_rate = throttle_rate / 1000.0
return self.transition()
def set_queue_length(self, queue_length):
self.queue_length = queue_length
return self.transition()
def time_remaining(self):
return max((self.last_publish + self.throttle_rate) - time(), 0)
def handle_message(self, msg):
self.last_publish = time()
self.publish(msg)
def transition(self):
if self.throttle_rate == 0 and self.queue_length == 0:
return self
elif self.queue_length == 0:
return ThrottleMessageHandler(self)
else:
return QueueMessageHandler(self)
def finish(self, block=True):
pass
class ThrottleMessageHandler(MessageHandler):
def handle_message(self, msg):
if self.time_remaining() == 0:
MessageHandler.handle_message(self, msg)
def transition(self):
if self.throttle_rate == 0 and self.queue_length == 0:
return MessageHandler(self)
elif self.queue_length == 0:
return self
else:
return QueueMessageHandler(self)
def finish(self, block=True):
pass
class QueueMessageHandler(MessageHandler, Thread):
def __init__(self, previous_handler):
Thread.__init__(self)
MessageHandler.__init__(self, previous_handler)
self.daemon = True
self.queue = deque(maxlen=self.queue_length)
self.c = Condition()
self.alive = True
self.start()
def handle_message(self, msg):
with self.c:
if not self.alive:
return
should_notify = len(self.queue) == 0
self.queue.append(msg)
if should_notify:
self.c.notify()
def transition(self):
if self.throttle_rate == 0 and self.queue_length == 0:
self.finish()
return MessageHandler(self)
elif self.queue_length == 0:
self.finish()
return ThrottleMessageHandler(self)
else:
with self.c:
old_queue = self.queue
self.queue = deque(maxlen=self.queue_length)
while len(old_queue) > 0:
self.queue.append(old_queue.popleft())
self.c.notify()
return self
def finish(self, block=True):
"""If throttle was set to 0, this pushes all buffered messages"""
# Notify the thread to finish
with self.c:
self.alive = False
self.c.notify()
if block:
self.join()
def run(self):
while self.alive:
msg = None
with self.c:
if len(self.queue) == 0:
self.c.wait()
else:
self.c.wait(self.time_remaining())
if self.alive and self.time_remaining() == 0 and len(self.queue) > 0:
msg = self.queue.popleft()
if msg is not None:
try:
MessageHandler.handle_message(self, msg)
except Exception:
traceback.print_exc(file=sys.stderr)
while self.time_remaining() == 0 and len(self.queue) > 0:
try:
MessageHandler.handle_message(self, self.queue[0])
except Exception:
traceback.print_exc(file=sys.stderr)
|
StarcoderdataPython
|
1708314
|
<gh_stars>0
import rpyc
class RobotService(rpyc.Service):
def __init__(self, robot):
super().__init__()
self.exposed_robot = robot
def on_connect(self, conn):
# code that runs when a connection is created
# (to init the service, if needed)
pass
def on_disconnect(self, conn):
# code that runs after the connection has already closed
# (to finalize the service, if needed)
pass
def restartSimulation(self):
self.exposed_robot.simulationReset()
from controller import Supervisor
from rpyc.utils.server import ThreadedServer
TIME_STEP = 8
robot = Supervisor()
service = RobotService(robot)
t = ThreadedServer(service, port=18861, protocol_config={'allow_public_attrs': True,})
t.start()
while robot.step(TIME_STEP) != -1:
pass
|
StarcoderdataPython
|
110836
|
<reponame>excelsimon/AI
# -*- coding:utf-8 -*-
import re
from sklearn.model_selection import train_test_split
from sklearn.naive_bayes import MultinomialNB
from sklearn.feature_extraction.text import CountVectorizer
class LanguageDetector():
def __init__(self,classifier=MultinomialNB()):
self.classifier = classifier
self.vectorizer = CountVectorizer(
lowercase=True,
analyzer='char_wb',
ngram_range=(1,2),
max_features=1000,
preprocessor=self._remove_noise
)
def _remove_noise(self,document):
noise_pattern = re.compile('|'.join(['http\S+', '\@\w+', '\#\w+']))
clean_text = re.sub(noise_pattern, "", document)
return clean_text.strip()
def features(self,x):
return self.vectorizer.transform(x)
def fit(self,x,y):
self.vectorizer.fit(x)
self.classifier.fit(self.features(x),y)
def predict(self,x):
return self.classifier.predict(self.features([x]))
def score(self,x,y):
return self.classifier.score(self.features(x),y)
data_f = open('language_detector.csv')
lines = data_f.readlines()
data_f.close()
dataset = [(line.strip()[:-3],line.strip()[-2:]) for line in lines]
x,y = zip(*dataset) #x,y为list,x包含所有句子,y包含对应标签
x_train,x_test,y_train,y_test = train_test_split(x,y,test_size=0.3)
language_detector = LanguageDetector()
language_detector.fit(x_train,y_train)
print(language_detector.score(x_test,y_test))
print(language_detector.predict('This is an english sentence'))
"""
output:
0.977941176471
['en']
"""
|
StarcoderdataPython
|
132780
|
<gh_stars>1-10
from pytest import approx
def test_consult_for_daiweth(pool_daiweth_30bps, quanto_feed):
seconds_agos = [7200, 3600, 600, 0]
windows = [3600, 3600, 600]
now_idxs = [1, len(seconds_agos)-1, len(seconds_agos)-1]
tick_cums, secs_per_liq_cums = pool_daiweth_30bps.observe(seconds_agos)
actual_avg_ticks, actual_avg_liqs = quanto_feed.consult(pool_daiweth_30bps,
seconds_agos,
windows,
now_idxs)
# calculate expect arithmetic means for ticks and harmonic
# mean for liquidity to compare w actuals
for i in range(len(windows)):
expect_avg_tick = int((tick_cums[now_idxs[i]]-tick_cums[i])/windows[i])
expect_avg_liq = int(windows[i] * ((1 << 160) - 1) /
((secs_per_liq_cums[now_idxs[i]]
- secs_per_liq_cums[i]) << 32))
# rel=1e-4 is needed for rounding with ticks
assert approx(expect_avg_tick, rel=1e-4) == actual_avg_ticks[i]
assert approx(expect_avg_liq) == actual_avg_liqs[i]
def test_consult_for_uniweth(pool_uniweth_30bps, inverse_feed):
seconds_agos = [7200, 3600, 600, 0]
windows = [3600, 3600, 600]
now_idxs = [1, len(seconds_agos)-1, len(seconds_agos)-1]
tick_cums, secs_per_liq_cums = pool_uniweth_30bps.observe(seconds_agos)
actual_avg_ticks, actual_avg_liqs = inverse_feed.consult(
pool_uniweth_30bps, seconds_agos, windows, now_idxs)
# calculate expect arithmetic means for ticks and harmonic
# mean for liquidity to compare w actuals
for i in range(len(windows)):
expect_avg_tick = int((tick_cums[now_idxs[i]]-tick_cums[i])/windows[i])
expect_avg_liq = int(windows[i] * ((1 << 160) - 1) /
((secs_per_liq_cums[now_idxs[i]]
- secs_per_liq_cums[i]) << 32))
# rel=1e-4 is needed for rounding with ticks
assert approx(expect_avg_tick, rel=1e-4) == actual_avg_ticks[i]
assert approx(expect_avg_liq) == actual_avg_liqs[i]
|
StarcoderdataPython
|
183246
|
'''
Hackerrank: https://www.hackerrank.com/challenges/coin-change/problem
this solution will get timeout in many test cases
however, this solution can get all the combination during computation
'''
import sys
from collections import Counter, defaultdict
sys.setrecursionlimit(10000)
memory = defaultdict(list)
def recur_getWays(n, c):
#print('enter: n='+str(n))
if n < 0:
return False, []
elif n == 0:
return True, [Counter()]
if n in memory.keys():
return True, memory[n]
all_combs_count = []
for coin in c:
#print('choose coin:'+str(coin))
valid, combs = recur_getWays(n-coin, c)
if valid:
for comb in combs:
comb = comb + Counter({coin:1})
existed = False
for existed_comb in all_combs_count:
if existed_comb == comb:
existed = True
break
if not existed:
all_combs_count.append(comb)
#print('memorize: n='+str(n)+' count:'+str(all_combs_count))
memory[n] = all_combs_count
return True, all_combs_count
def getWays(n, c):
# Complete this function
valid, combs = recur_getWays(n, c)
#print(memory)
return len(combs)
n, m = input().strip().split(' ')
n, m = [int(n), int(m)]
c = list(map(int, input().strip().split(' ')))
# Print the number of ways of making change for 'n' units using coins having the values given by 'c'
ways = getWays(n, c)
print(ways)
|
StarcoderdataPython
|
1721712
|
<reponame>coolexplorer/slackbot-buffy<gh_stars>1-10
import logging
from constant.k8s_command import k8s_commands, k8s_sub_commands
logger = logging.getLogger(__name__)
class K8SParser:
def __init__(self, k8s, message):
self.k8s = k8s
self.message = message
self.command = self.message[0].lower()
self.k8s_command = self.message[1].lower()
self.k8s_sub_command = self.message[2].lower()
self.params = self.message[3:]
self.case_name = "{0}_{1}".format(self.k8s_command, self.k8s_sub_command)
self.namespace = None
def parse(self):
if self.k8s_command in k8s_commands and self.k8s_sub_command in k8s_sub_commands:
case = getattr(self, self.case_name, lambda: "case_default")
else:
logger.error(f'Invalid Command: {self.case_name}')
raise Exception('Invalid Command')
self._check_params()
return case()
def _check_params(self):
if '-n' in self.params:
index = self.params.index('-n')
self.namespace = self.params[index + 1]
def get_pods(self):
return self.k8s.get_pods(self.namespace)
def get_deploys(self):
return self.k8s.get_deployments(self.namespace)
def get_daemons(self):
return self.k8s.get_daemon_sets(self.namespace)
def get_states(self):
return self.k8s.get_stateful_sets(self.namespace)
def get_replicas(self):
return self.k8s.get_replica_sets(self.namespace)
def get_ns(self):
return self.k8s.get_namespaces()
def get_configmap(self):
return self.k8s.get_config_map(self.namespace)
def get_secret(self):
return self.k8s.get_secret(self.namespace)
def get_nodes(self):
return self.k8s.get_nodes()
|
StarcoderdataPython
|
1634700
|
import skimage.draw as skd
import skimage.io as skio
import numpy as np
import h5py
import itertools
import random
from typing import List
from dataclasses import dataclass, field
def default_float(n=1,low=0.0,high=1.0):
if n == 1:
return field(default_factory = lambda: np.random.uniform(low, high) )
else:
return field(default_factory = lambda: np.random.uniform(low, high, n) )
@dataclass
class Shape:
color: List[float] = default_float(3)
size: float = default_float(low=.1,high=.9)
x: float = default_float()
y: float = default_float()
def gen(self, img_size):
raise NotImplementedError()
@dataclass
class RotatableShape(Shape):
rotation: float = default_float()
class Circle(Shape):
def render(self, img_size):
radius = int(self.size * 0.5 * min(img_size[0], img_size[1]))
r = int(self.y*img_size[0])
c = int(self.x*img_size[1])
return skd.circle(r, c, radius, shape=img_size[:2])
class CircleOutline(Shape):
def render(self, img_size):
radius = int(self.size * 0.5 * min(img_size[0], img_size[1]))
r = int(self.y*img_size[0])
c = int(self.x*img_size[1])
return skd.circle_perimeter(r, c, radius, shape=img_size[:2])
class Square(RotatableShape):
def render(self, img_size):
r = int(self.y*img_size[0])
c = int(self.x*img_size[1])
th = self.rotation * np.pi
hs = int(self.size * 0.5 * min(img_size[0], img_size[1]))
rr = [r-hs, r-hs, r+hs, r+hs]
cc = [c-hs, c+hs, c+hs, c-hs]
rr, cc = rotate_rc(rr, cc, th, r, c)
rr,cc = skd.polygon(rr, cc)
rr,cc = rr.flatten(), cc.flatten()
return rr, cc
@dataclass
class Rectangle(RotatableShape):
aspect: float = default_float(low=0.0,high=1.0)
def render(self, img_size):
r = int(self.y * img_size[0])
c = int(self.x * img_size[1])
th = self.rotation * np.pi
hs = int(self.size * 0.5 * min(img_size[0], img_size[1]))
rr = [r-hs, r-hs, r+hs, r+hs]
cc = [c-hs*self.aspect, c+hs*self.aspect, c+hs*self.aspect, c-hs*self.aspect]
rr, cc = rotate_rc(rr, cc, th, r, c)
rr,cc = skd.polygon(rr, cc)
rr,cc = rr.flatten(), cc.flatten()
return rr, cc
@dataclass
class Ellipse(RotatableShape):
aspect: float = default_float(low=0.0,high=1.0)
def render(self, img_size):
r = int(self.y * img_size[0])
c = int(self.x * img_size[1])
th = self.rotation * 2 * np.pi - np.pi
radius = int(self.size * 0.5 * min(img_size[0], img_size[1]))
rr,cc = skd.ellipse(r, c, radius, radius*self.aspect, rotation=th)
rr,cc = rr.flatten(), cc.flatten()
return rr,cc
class Triangle(RotatableShape):
def render(self, img_size):
r = int(self.y * img_size[0])
c = int(self.x * img_size[1])
th = self.rotation * 2 * np.pi
hw = int(self.size * 0.5 * min(img_size[0], img_size[1]))
hh = hw*np.sqrt(3) * 0.5
rr = [ r+hw, r, r-hw ]
cc = [ c-hh, c+hh, c-hh ]
rr, cc = rotate_rc(rr, cc, th, r, c)
rr,cc = skd.polygon(rr, cc)
rr,cc = rr.flatten(), cc.flatten()
return rr, cc
SHAPE_CHOICES = [ Circle, Triangle, Rectangle, Ellipse, Square ]
def render_shapes(shapes, img_size, bg=None):
img = np.zeros(img_size, dtype=np.float32)
if bg is None:
bg = np.random.random(3)
img[:,:,0] = bg[0]
img[:,:,1] = bg[1]
img[:,:,2] = bg[2]
for shape in shapes:
rr,cc = shape.render(img_size)
rr,cc = crop_rc(rr, cc, img_size)
img[rr,cc,:] = shape.color
return img
def random_shapes(shape, n_min, n_max):
n = np.random.randint(n_min, n_max+1)
shapes = [ random_shape(shape) for i in range(n) ]
shapes.sort(key=lambda s: s.size)
shapes = shapes[::-1]
return shapes
def random_shape(shape=None):
if shape is None:
shape = random.choice(SHAPE_CHOICES)
return shape()
def rotate_rc(rr,cc,th,r,c):
p = np.array([ rr,
cc,
np.ones(len(rr)) ])
T1 = np.array([[ 1, 0, -r],
[ 0, 1, -c],
[ 0, 0, 1 ]])
R = np.array([ [ np.cos(th), -np.sin(th), 0 ],
[ np.sin(th), np.cos(th), 0 ],
[ 0, 0, 1 ] ])
T2 = np.array([[ 1, 0, r ],
[ 0, 1, c ],
[ 0, 0, 1 ]])
T = np.dot(T2, np.dot(R, T1))
pt = np.dot(T, p)
pt = np.round(pt)
return pt[0].astype(int), pt[1].astype(int)
def crop_rc(rr, cc, img_size):
mask = (rr >= 0) & (rr < img_size[0]) & (cc >= 0) & (cc < img_size[1])
return rr[mask], cc[mask]
def render_shape_sets(n, shape, img_sizes, n_min, n_max, dtype=np.float32):
img_sets = [ np.zeros([n]+list(img_size), dtype=dtype) for img_size in img_sizes ]
for i in range(n):
shapes = random_shapes(shape, n_min, n_max)
for j in range(len(img_sizes)):
img_sets[j][i,:] = render_shapes(shapes, img_sizes[j])
return img_sets
if __name__ == "__main__":
shp = (512,512,3)
im = render_shape_sets(1, None, [shp,shp], 10, 10)
import matplotlib.pyplot as plt
plt.imshow(im[1][0])
plt.axis('off')
plt.show()
plt.savefig('test.png')
|
StarcoderdataPython
|
59966
|
<gh_stars>0
#################################
# #Katma değer ciro view #
#################################
try:
from tkinter import *
from tkinter import ttk
except ImportError:
#for python 2.7+
from Tkinter import *
import ttk
class kdc_View:
def __init__(self,ik):
self.status = {0:"Katma Değer Ciro Yerlerde!",1:"Katma Değer Ciro Normal",2:"Katma Değer Ciro Yüksek"}
self.ik = ik
self.main()
def request(self):
state = self.ik.calculate(tsm.get(), hmm.get(), bog.get(), sg.get(), sahg.get() )
if(state is not None):
state=str(state[0])+" TL "+self.status[state[1]]
durum.set(state)
def main(self):
global tsm #total satış miktar +
global hmm #ham madde maliyet -
global bog # bakım onarım girder -
global sg #sevkiyat gideri -
global sahg #satınalınan hizmet gider -
global durum
root = Tk()
root.title("Katma Değer Ciro Hesabı")
mainframe = ttk.Frame(root, padding="3 3 12 12")
mainframe.grid(column=0, row=0, sticky=(N, W, E, S))
mainframe.columnconfigure(0, weight=1)
mainframe.rowconfigure(0, weight=1)
tsm = IntVar()
hmm = IntVar()
bog = IntVar()
sg = IntVar()
sahg = IntVar()
durum = StringVar()
#gelir
tsm_entry = ttk.Entry(mainframe, width=10, textvariable=tsm)
#Giderler
hmm_entry = ttk.Entry(mainframe, width=10, textvariable=hmm)
bog_entry = ttk.Entry(mainframe, width=10, textvariable=bog)
sg_entry = ttk.Entry(mainframe, width=10, textvariable=sg)
sahg_entry = ttk.Entry(mainframe, width=10, textvariable=sahg)
#gelir Labels
ttk.Label(mainframe, text="Toplam Satış Miktarı ").grid(column=1, row=1, sticky=W)
tsm_entry.grid(column=2, row=1, sticky=(W, E))
ttk.Label(mainframe, text="------------------------------").grid(column=1, row=2, sticky=(W,E))
ttk.Label(mainframe, text="Gider/Maliyetler").grid(column=2, row=2, sticky=(W,E))
ttk.Label(mainframe, text="------------------------------").grid(column=3, row=2, sticky=(W,E))
#gider Labels
ttk.Label(mainframe, text="Hammadde Maliyeti ").grid(column=1, row=3, sticky=W)
hmm_entry.grid(column=2, row=3, sticky=(W, E))
ttk.Label(mainframe, text="Bakım Onarım Giderleri ").grid(column=1, row=4, sticky=W)
bog_entry.grid(column=2, row=4, sticky=(W, E))
ttk.Label(mainframe, text="Sevkiyat Giderleri ").grid(column=1, row=5, sticky=W)
sg_entry.grid(column=2, row=5, sticky=(W, E))
ttk.Label(mainframe, text="Satın Alınan Hizmet Giderleri ").grid(column=1, row=6, sticky=W)
sahg_entry.grid(column=2, row=6, sticky=(W, E))
ttk.Button(mainframe, text="Hesapla", command=self.request).grid(column=3, row=7, sticky=W)
ttk.Label(mainframe, text="Durum :").grid(column=1, row=7, sticky=E)
ttk.Label(mainframe, textvariable=durum).grid(column=2, row=7, sticky=(W, E))
for child in mainframe.winfo_children():
child.grid_configure(padx=5, pady=5)
root.bind('<Return>', self.request)
root.mainloop()
|
StarcoderdataPython
|
3325352
|
# multi_spect_reg_config.py
# Copyright (c) 2020, <NAME>, <NAME>, University of Nevada, Reno.
# All rights reserved.
import SimpleITK as sitk
import os
import configparser
import numpy as np
class camera_parameter_t():
def __init__(self, camera_name, fx, fy, cx, cy, dist_vect):
self.camera_name = camera_name
self.fx = fx
self.fy = fy
self.cx = cx
self.cy = cy
self.dist_vect = np.array(dist_vect)
self.camera_matrix = np.array([ [ fx, 0.0, cx],
[0.0, fy, cy],
[0.0, 0.0, 1.0]])
class reg_config_t:
def __init__(self, config_file_path, input_dataset_path=None, output_dataset_path=None, failure_dataset_path=None):
configDict = configparser.ConfigParser()
# read the config file
configDict.read(config_file_path)
# get the name of the fixed channel
self.fixed_channel_name = self.clean_string(configDict['REGISTRATION']['fixed_channel'])
self.ordered_channel_names = self.parse_list(configDict['REGISTRATION']['input_channel_order'], str)
# check that the fixed channel name is also in the ordered channel names
assert self.fixed_channel_name in self.ordered_channel_names
# make a list of the moving channel names (all channels except the fixed channel)
self.moving_channel_names = list(self.ordered_channel_names)
self.moving_channel_names.remove(self.fixed_channel_name)
# make a dictionary of the per-channel settings
self.channel_param_map_settings = {}
for moving_ch_name in self.moving_channel_names:
self.channel_param_map_settings[moving_ch_name] = str(configDict['REGISTRATION']["param_map_" + moving_ch_name])
# dataset processing config
self.channel_paths = {}
# if the config file specifies settings for dataset processing
if 'PATHS' in configDict.keys():
self.input_dataset_path = input_dataset_path or str(configDict['PATHS']["INPUT_DATASET_PATH"])
self.output_dataset_path = output_dataset_path or str(configDict['PATHS']["OUTPUT_DATASET_PATH"])
self.failure_dataset_path = failure_dataset_path or str(configDict['PATHS']["OUTPUT_FAILURE_PATH"])
# for each channel build the absolute path
for name in self.ordered_channel_names:
sub_dir_path = configDict['PATHS'][name+"_SUBDIR"]
sub_dir_path = os.path.join(self.input_dataset_path, sub_dir_path)
self.channel_paths[name] = sub_dir_path
# create a dictionary which maps image IDs to a dictionary which maps channel names to the path of the image for that channel
self.img_path_dict = {}
self.load_image_dict(self.channel_paths)
# get the list of image IDs
self.image_ids = list(self.img_path_dict.keys())
self.image_ids.sort()
# create output directories if they don't exist
if not os.path.exists(self.output_dataset_path):
os.mkdir(self.output_dataset_path)
if not os.path.exists(self.failure_dataset_path):
os.mkdir(self.failure_dataset_path)
if self.image_extension == ".tif":
for c in self.channel_paths.values():
output_path = os.path.join(self.output_dataset_path, os.path.split(c)[-1])
failure_path = os.path.join(self.failure_dataset_path, os.path.split(c)[-1])
if not os.path.exists(output_path):
os.mkdir(output_path)
if not os.path.exists(failure_path):
os.mkdir(failure_path)
# Load Per-Channel Registration Settings
self.param_map = {}
for ch_name in self.channel_param_map_settings:
p_map = self.channel_param_map_settings[ch_name]
self.param_map[ch_name] = {}
self.param_map[ch_name]['max_alignment_attempts'] = int(configDict[p_map]["max_alignment_attempts"])
self.param_map[ch_name]['metric_num_hist_bins'] = int(configDict[p_map]["metric_num_hist_bins"])
self.param_map[ch_name]['metric_mask_border_size'] = int(configDict[p_map]["metric_mask_border_size"])
self.param_map[ch_name]['metric_sampling_rate_per_level'] = self.parse_list(configDict[p_map]["metric_sampling_rate_per_level"], float)
self.param_map[ch_name]['opt_shrink_factors'] = self.parse_list(configDict[p_map]["opt_shrink_factors"], int)
self.param_map[ch_name]['opt_scale_sigmas'] = self.parse_list(configDict[p_map]["opt_scale_sigmas"], float)
self.param_map[ch_name]['opt_final_metric_min'] = float(configDict[p_map]["opt_final_metric_min"])
self.param_map[ch_name]['evol_epsilon'] = float(configDict[p_map]["evol_epsilon"])
self.param_map[ch_name]['evol_iterations'] = int(configDict[p_map]["evol_iterations"])
self.param_map[ch_name]['evol_init_rad'] = float(configDict[p_map]["evol_init_rad"])
self.param_map[ch_name]['evol_growth_fact'] = float(configDict[p_map]["evol_growth_fact"])
self.param_map[ch_name]['evol_shrink_fact'] = float(configDict[p_map]["evol_shrink_fact"])
# Load Per-Channel Camera Settings
self.camera_params = {}
for ch_name in self.ordered_channel_names:
self.load_cam_config(configDict, ch_name)
def load_image_dict(self, data_set_paths_dict):
self.img_path_dict = {}
for ch_name in self.ordered_channel_names:
file_list = os.listdir(data_set_paths_dict[ch_name])
# print("Ch %s found %i files"%(ch_name, len(file_list)))
# identify image type
if file_list[0].endswith(".jpg"):
self.image_extension = ".jpg"
elif file_list[0].endswith(".tif") or file_list[0].endswith(".tiff"):
self.image_extension = ".tif"
else:
print(f"Image file {file_list[0]} is not of a supported type. (.jpg, ,tif, .tiff)")
raise TypeError
for file_name in file_list:
img_id = int(list(file_name.split('_'))[1])
#if img_id == 1:
# print("Found ID 1 for channel : ", ch_name)
if img_id not in self.img_path_dict.keys():
self.img_path_dict[img_id] = {}
self.img_path_dict[img_id][ch_name] = os.path.join(data_set_paths_dict[ch_name], file_name)
# remove any images which don't have images for all channels
bad_image_ids = []
for img_id in self.img_path_dict:
if len(self.img_path_dict[img_id].keys()) != len(self.ordered_channel_names):
# print("Bad ID : %i, number of keys: %i, number of channels: %i"%(img_id, len(self.img_path_dict[img_id].keys()), len(self.ordered_channel_names)))
# print(list(self.img_path_dict[img_id].keys()))
bad_image_ids.append(img_id)
for img_id in bad_image_ids:
del self.img_path_dict[img_id]
# get the list of image IDs
self.image_ids = list(self.img_path_dict.keys())
def get_img_paths(self, id):
paths = self.img_path_dict[id]
path_list = []
for ch_name in self.ordered_channel_names:
path_list.append(paths[ch_name])
return path_list
def load_cam_config(self, configDict, ch_name):
cam_name = "CAM_" + str(ch_name)
try:
fx = float(configDict[cam_name]['fx'])
fy = float(configDict[cam_name]['fy'])
cx = float(configDict[cam_name]['cx'])
cy = float(configDict[cam_name]['cy'])
dist_vect = self.parse_list(configDict[cam_name]['dist_vect'], float)
self.camera_params[ch_name] = camera_parameter_t(ch_name, fx, fy, cx, cy, dist_vect)
except KeyError as e:
raise Exception("Error While Parsing Camera Configuration : Config file does not contain key : " + str(e))
def clean_string(self, in_string):
in_string = in_string.strip()
in_string = in_string.replace('\t', '')
in_string = in_string.replace(' ', '')
return in_string
def parse_list(self, in_string, d_type):
in_string = self.clean_string(in_string)
string_list = in_string.split(',')
result = []
for string in string_list:
result.append(d_type(string))
return result
|
StarcoderdataPython
|
94690
|
import pandas as pd
df = pd.read_csv("data.csv")
df.head()
bools_distance = []
for distance in df.Distance:
if distance <= 100:
bools_distance.append(True)
else:
bools_distance.append(False)
temp_distance = pd.Series(bools_distance)
temp_distance.head()
distance = df[temp_distance]
distance.reset_index(inplace=True, drop=True)
distance.head()
bools_gravity = []
for gravity in df.Gravity:
if gravity <= 350 and gravity >= 150:
bools_gravity.append(True)
else:
bools_gravity.append(False)
gravity_support = pd.Series(bools_gravity)
gravity_support.head()
final_stars = df[gravity_support]
final_stars.head()
final_stars.shape
final_stars.reset_index(inplace=True, drop=True)
final_stars.head()
final_stars.to_csv("final_data.csv")
|
StarcoderdataPython
|
8351
|
<reponame>haltu/velmu-mpass-demo<gh_stars>0
# -*- coding: utf-8 -*-
# Generated by Django 1.11.10 on 2018-03-20 08:34
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
import parler.models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='AuthenticationSource',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created_at', models.DateTimeField(auto_now_add=True)),
('modified_at', models.DateTimeField(auto_now=True)),
('auth_id', models.CharField(max_length=128)),
('icon_url', models.CharField(blank=True, max_length=2048, null=True)),
],
options={
'abstract': False,
},
bases=(parler.models.TranslatableModelMixin, models.Model),
),
migrations.CreateModel(
name='AuthenticationSourceTranslation',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('language_code', models.CharField(db_index=True, max_length=15, verbose_name='Language')),
('title', models.CharField(max_length=2048)),
('master', models.ForeignKey(editable=False, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='translations', to='mpass.AuthenticationSource')),
],
options={
'managed': True,
'db_table': 'mpass_authenticationsource_translation',
'db_tablespace': '',
'default_permissions': (),
'verbose_name': 'authentication source Translation',
},
),
migrations.CreateModel(
name='AuthenticationTag',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created_at', models.DateTimeField(auto_now_add=True)),
('modified_at', models.DateTimeField(auto_now=True)),
('tag_id', models.CharField(max_length=128)),
],
options={
'abstract': False,
},
bases=(parler.models.TranslatableModelMixin, models.Model),
),
migrations.CreateModel(
name='AuthenticationTagTranslation',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('language_code', models.CharField(db_index=True, max_length=15, verbose_name='Language')),
('title', models.CharField(max_length=2048)),
('master', models.ForeignKey(editable=False, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='translations', to='mpass.AuthenticationTag')),
],
options={
'managed': True,
'db_table': 'mpass_authenticationtag_translation',
'db_tablespace': '',
'default_permissions': (),
'verbose_name': 'authentication tag Translation',
},
),
migrations.AddField(
model_name='authenticationsource',
name='tags',
field=models.ManyToManyField(blank=True, to='mpass.AuthenticationTag'),
),
migrations.AlterUniqueTogether(
name='authenticationtagtranslation',
unique_together=set([('language_code', 'master')]),
),
migrations.AlterUniqueTogether(
name='authenticationsourcetranslation',
unique_together=set([('language_code', 'master')]),
),
]
|
StarcoderdataPython
|
3271122
|
import numpy as np
import torch
class UnNormalize(object):
def __init__(self, mean, std):
self.mean = mean
self.std = std
def __call__(self, imgs):
"""
Args:
tensor (Tensor): Tensor image of size (B, C, H, W) to be normalized.
Returns:
Tensor: Normalized image.
"""
if imgs.size(1) == 1:
tensor = imgs.mul_(self.std[0]).add_(self.mean[0])
else:
tensor = []
for img in imgs:
for t, m, s in zip(tensor, self.mean, self.std):
t.mul_(s).add_(m)
tensor = torch.stack(tensor)
# The normalize code -> t.sub_(m).div_(s)
return tensor
def postprocess_image(imgs):
imgs = imgs.squeeze(0)
imgs = imgs.cpu().numpy()
imgs = np.transpose(imgs, [1, 2, 0])
return imgs
|
StarcoderdataPython
|
1753414
|
# -*- coding:utf-8 -*-
mongo_dsn = 'mongodb://root:[email protected]/proxy?authSource=admin'
mongo_db = 'proxy'
# 抓取代理的站点
crawl_web_site = ['ip66', 'ip181', 'xici']
debug = 1
|
StarcoderdataPython
|
1708907
|
<filename>Method/ontolearn/abstracts.py
import logging
import random
from abc import ABCMeta, abstractmethod, ABC
from typing import Set, List, Tuple, Iterable, TypeVar, Generic, ClassVar, Optional, Generator, SupportsFloat
import numpy as np
import torch
from owlapy.model import OWLClassExpression, OWLOntology
from owlapy.util import iter_count
from .data_struct import Experience
from .data_struct import PrepareBatchOfTraining, PrepareBatchOfPrediction
from .owlready2.utils import get_full_iri
from .utils import read_csv
# random.seed(0) # Note: a module should not set the seed
_N = TypeVar('_N') #:
class AbstractLearningProblem(metaclass=ABCMeta):
"""Abstract learning problem"""
__slots__ = 'kb'
kb: 'AbstractKnowledgeBase'
@abstractmethod
def __init__(self, knowledge_base: 'AbstractKnowledgeBase'):
"""create a new abstract learning problem
Args:
knowledge_base: the knowledge base
"""
self.kb = knowledge_base
class AbstractScorer(Generic[_N], metaclass=ABCMeta):
"""
An abstract class for quality functions.
"""
__slots__ = 'lp', 'applied'
name: ClassVar
def __init__(self, learning_problem: AbstractLearningProblem):
"""Create a new quality function
Args:
learning_problem: learning problem containing the ideal solution. the score function uses the learning
problem and the provided matching instances to calculate the quality score
"""
self.lp = learning_problem
self.applied = 0
@abstractmethod
def score(self, instances) -> Tuple[bool, Optional[float]]:
"""Quality score for a set of instances with regard to the learning problem
Args:
instances (set): instances to calculate a quality score for
Returns:
Tuple, first position indicating if the function could be applied, second position the quality value
in the range 0.0--1.0
"""
pass
def apply(self, node: 'AbstractNode', instances) -> bool:
"""Apply the quality function to a search tree node after calculating the quality score on the given instances
Args:
node: search tree node to set the quality on
instances (set): instances to calculate the quality for
Returns:
True if the quality function was applied successfully
"""
assert isinstance(node, AbstractNode)
from ontolearn.search import _NodeQuality
assert isinstance(node, _NodeQuality)
self.applied += 1
ret, q = self.score(instances)
if q is not None:
node.quality = q
return ret
def clean(self):
"""Reset the state of the quality function, for example statistic counters"""
self.applied = 0
class AbstractHeuristic(Generic[_N], metaclass=ABCMeta):
"""Abstract base class for heuristic functions.
Heuristic functions can guide the search process."""
__slots__ = 'applied'
applied: int
@abstractmethod
def __init__(self):
"""Create a new heuristic function"""
self.applied = 0
@abstractmethod
def apply(self, node: _N, instances=None):
"""Apply the heuristic on a search tree node and set its heuristic property to the calculated value
Args:
node: node to set the heuristic on
instances (set): set of instances covered by this node
"""
pass
@abstractmethod
def clean(self):
"""Reset the state of the heuristic function, for example statistic counters"""
self.applied = 0
_KB = TypeVar('_KB', bound='AbstractKnowledgeBase') #:
logger = logging.getLogger(__name__)
class BaseRefinement(Generic[_N], metaclass=ABCMeta):
"""
Base class for Refinement Operators.
Let C, D \\in N_c where N_c os a finite set of concepts.
* Proposition 3.3 (Complete and Finite Refinement Operators) [1]
* ρ(C) = {C ⊓ T} ∪ {D \\| D is not empty AND D \\sqset C}
* The operator is finite,
* The operator is complete as given a concept C, we can reach an arbitrary concept D such that D subset of C.
*) Theoretical Foundations of Refinement Operators [1].
*) Defining a top-down refimenent operator that is a proper is crutial.
4.1.3 Achieving Properness [1]
*) Figure 4.1 [1] defines of the refinement operator
[1] Learning OWL Class Expressions
"""
__slots__ = 'kb'
kb: _KB
@abstractmethod
def __init__(self, knowledge_base: _KB):
"""Construct a new base refinement operator
Args:
knowledge_base: knowledge base to operate on
"""
self.kb = knowledge_base
@abstractmethod
def refine(self, *args, **kwargs) -> Iterable[OWLClassExpression]:
"""Refine a given concept
Args:
ce (OWLClassExpression): concept to refine
Returns:
new refined concepts
"""
pass
def len(self, concept: OWLClassExpression) -> int:
"""The length of a concept
Args:
concept: concept
Returns:
length of concept according to some metric configured in the knowledge base
"""
return self.kb.cl(concept)
class AbstractNode(metaclass=ABCMeta):
"""Abstract search tree node"""
__slots__ = ()
@abstractmethod
def __init__(self):
"""Create an abstract search tree node"""
pass
def __str__(self):
"""string representation of node, by default its internal memory address"""
addr = hex(id(self))
addr = addr[0:2] + addr[6:-1]
return f'{type(self)} at {addr}'
class AbstractOEHeuristicNode(metaclass=ABCMeta):
"""Abstract Node for the CELOEHeuristic heuristic function
This node must support quality, horizontal expansion (h_exp), is_root, parent_node and refinement_count
"""
__slots__ = ()
@property
@abstractmethod
def quality(self) -> Optional[float]:
pass
@property
@abstractmethod
def h_exp(self) -> int:
pass
@property
@abstractmethod
def is_root(self) -> bool:
pass
@property
@abstractmethod
def parent_node(self: _N) -> Optional[_N]:
pass
@property
@abstractmethod
def refinement_count(self) -> int:
pass
class AbstractConceptNode(metaclass=ABCMeta):
"""Abstract search tree node which has a concept"""
__slots__ = ()
@property
@abstractmethod
def concept(self) -> OWLClassExpression:
pass
class AbstractKnowledgeBase(metaclass=ABCMeta):
"""Abstract knowledge base"""
__slots__ = ()
thing: OWLClassExpression
@abstractmethod
def ontology(self) -> OWLOntology:
"""The base ontology of this knowledge base"""
pass
def describe(self) -> None:
"""Print a short description of the Knowledge Base to the info logger output"""
properties_count = iter_count(self.ontology().object_properties_in_signature()) + iter_count(
self.ontology().data_properties_in_signature())
logger.info(f'Number of named classes: {iter_count(self.ontology().classes_in_signature())}\n'
f'Number of individuals: {self.individuals_count()}\n'
f'Number of properties: {properties_count}')
@abstractmethod
def clean(self) -> None:
"""This method should reset any caches and statistics in the knowledge base"""
raise NotImplementedError
@abstractmethod
def individuals_count(self) -> int:
"""Total number of individuals in this knowledge base"""
pass
@abstractmethod
def individuals_set(self, *args, **kwargs) -> Set:
"""Encode an individual, an iterable of individuals or the individuals that are instances of a given concept
into a set.
Args:
arg (OWLNamedIndividual): individual to encode
arg (Iterable[OWLNamedIndividual]): individuals to encode
arg (OWLClassExpression): encode individuals that are instances of this concept
Returns:
encoded set representation of individual(s)
"""
pass
class LBLSearchTree(Generic[_N], metaclass=ABCMeta):
"""Abstract search tree for the Length based learner"""
@abstractmethod
def get_most_promising(self) -> _N:
"""Find most "promising" node in the search tree that should be refined next
Returns:
most promising search tree node
"""
pass
@abstractmethod
def add_node(self, node: _N, parent_node: _N):
"""Add a node to the search tree
Args:
node: node to add
parent_node: parent of that node
"""
pass
@abstractmethod
def clean(self):
"""Reset the search tree state"""
pass
@abstractmethod
def get_top_n(self, n: int) -> List[_N]:
"""Retrieve the best n search tree nodes
Args:
n: maximum number of nodes
Returns:
list of top n search tree nodes
"""
pass
@abstractmethod
def show_search_tree(self, root_concept: OWLClassExpression, heading_step: str):
"""Debugging function to print the search tree to standard output
Args:
root_concept: the tree is printed starting from this search tree node
heading_step: message to print at top of the output
"""
pass
@abstractmethod
def add_root(self, node: _N):
"""Add the root node to the search tree
Args:
node: root node to add
"""
pass
class AbstractDrill(ABC):
"""
Abstract class for Convolutional DQL concept learning
"""
def __init__(self, path_of_embeddings, reward_func, learning_rate=None,
num_episode=None, num_of_sequential_actions=None, max_len_replay_memory=None,
representation_mode=None, batch_size=1024, epsilon_decay=None, epsilon_min=None,
num_epochs_per_replay=None, num_workers=32):
# @TODO refactor the code for the sake of readability
self.instance_embeddings = read_csv(path_of_embeddings)
self.embedding_dim = self.instance_embeddings.shape[1]
self.reward_func = reward_func
assert reward_func
self.representation_mode = representation_mode
assert representation_mode in ['averaging', 'sampling']
# Will be filled by child class
self.heuristic_func = None
self.num_workers = num_workers
# constants
self.epsilon = 1
self.learning_rate = learning_rate
self.num_episode = num_episode
self.num_of_sequential_actions = num_of_sequential_actions
self.num_epochs_per_replay = num_epochs_per_replay
self.max_len_replay_memory = max_len_replay_memory
self.epsilon_decay = epsilon_decay
self.epsilon_min = epsilon_min
self.batch_size = batch_size
if self.learning_rate is None:
self.learning_rate = .001
if self.num_episode is None:
self.num_episode = 759
if self.max_len_replay_memory is None:
self.max_len_replay_memory = 1024
if self.num_of_sequential_actions is None:
self.num_of_sequential_actions = 10
if self.epsilon_decay is None:
self.epsilon_decay = .001
if self.epsilon_min is None:
self.epsilon_min = 0
if self.num_epochs_per_replay is None:
self.num_epochs_per_replay = 100
# will be filled
self.optimizer = None # torch.optim.Adam(self.model_net.parameters(), lr=self.learning_rate)
self.seen_examples = dict()
self.emb_pos, self.emb_neg = None, None
self.start_time = None
self.goal_found = False
self.experiences = Experience(maxlen=self.max_len_replay_memory)
def default_state_rl(self):
self.emb_pos, self.emb_neg = None, None
self.goal_found = False
self.start_time = None
@abstractmethod
def init_training(self, *args, **kwargs):
"""
Initialize training for a given E+,E- and K.
@param args:
@param kwargs:
@return:
"""
@abstractmethod
def terminate_training(self):
"""
Save weights and training data after training phase.
@return:
"""
def next_node_to_expand(self, t: int = None) -> AbstractNode:
"""
Return a node that maximizes the heuristic function at time t
@param t:
@return:
"""
if self.verbose > 1:
self.search_tree.show_search_tree(self.start_class, t)
return self.search_tree.get_most_promising()
def form_experiences(self, state_pairs: List, rewards: List) -> None:
"""
Form experiences from a sequence of concepts and corresponding rewards.
state_pairs - a list of tuples containing two consecutive states
reward - a list of reward.
Gamma is 1.
Return
X - a list of embeddings of current concept, next concept, positive examples, negative examples
y - argmax Q value.
"""
for th, consecutive_states in enumerate(state_pairs):
e, e_next = consecutive_states
self.experiences.append(
(e, e_next, max(rewards[th:]))) # given e, e_next, Q val is the max Q value reachable.
def learn_from_replay_memory(self) -> None:
"""
Learning by replaying memory
@return:
"""
current_state_batch, next_state_batch, q_values = self.experiences.retrieve()
current_state_batch = torch.cat(current_state_batch, dim=0)
next_state_batch = torch.cat(next_state_batch, dim=0)
q_values = torch.Tensor(q_values)
try:
assert current_state_batch.shape[1] == next_state_batch.shape[1] == self.emb_pos.shape[1] == \
self.emb_neg.shape[1]
except AssertionError as e:
print(current_state_batch.shape)
print(next_state_batch.shape)
print(self.emb_pos.shape)
print(self.emb_neg.shape)
print('Wrong format.')
print(e)
raise
assert current_state_batch.shape[2] == next_state_batch.shape[2] == self.emb_pos.shape[2] == self.emb_neg.shape[
2]
dataset = PrepareBatchOfTraining(current_state_batch=current_state_batch,
next_state_batch=next_state_batch,
p=self.emb_pos, n=self.emb_neg, q=q_values)
num_experience = len(dataset)
data_loader = torch.utils.data.DataLoader(dataset,
batch_size=self.batch_size, shuffle=True,
num_workers=self.num_workers)
print(f'Number of experiences:{num_experience}')
print('DQL agent is learning via experience replay')
self.heuristic_func.net.train()
for m in range(self.num_epochs_per_replay):
total_loss = 0
for X, y in data_loader:
self.optimizer.zero_grad() # zero the gradient buffers
# forward
predicted_q = self.heuristic_func.net.forward(X)
# loss
loss = self.heuristic_func.net.loss(predicted_q, y)
total_loss += loss.item()
# compute the derivative of the loss w.r.t. the parameters using backpropagation
loss.backward()
# clip gradients if gradients are killed. =>torch.nn.utils.clip_grad_norm_(self.model.parameters(), 0.5)
self.optimizer.step()
self.heuristic_func.net.train().eval()
def sequence_of_actions(self, root: AbstractNode) -> Tuple[
List[Tuple[AbstractNode, AbstractNode]], List[SupportsFloat]]:
"""
Perform self.num_of_sequential_actions number of actions
(1) Make a sequence of **self.num_of_sequential_actions** actions
(1.1) Get next states in a generator and convert them to list
(1.2) Exit, if If there is no next state. @TODO Is it possible to 0 next states ?! Nothing should in the set of refinements, shouldn't it ?, i.e. [Nothing]
(1.3) Find next state.
(1.4) Exit, if next state is **Nothing**
(1.5) Compute reward.
(1.6) Update current state.
(2) Return path_of_concepts, rewards
"""
assert isinstance(root, AbstractNode)
current_state = root
path_of_concepts = []
rewards = []
# (1)
for _ in range(self.num_of_sequential_actions):
# (1.1)
next_states = list(self.apply_rho(current_state))
# (1.2)
if len(next_states) == 0: # DEAD END
assert (len(current_state) + 3) <= self.max_child_length
break
# (1.3)
next_state = self.exploration_exploitation_tradeoff(current_state, next_states)
# (1.3)
if next_state.concept.str == 'Nothing': # Dead END
break
# (1.4)
path_of_concepts.append((current_state, next_state))
# (1.5)
rewards.append(self.reward_func.calculate(current_state, next_state))
# (1.6)
current_state = next_state
# (2)
return path_of_concepts, rewards
def update_search(self, concepts, predicted_Q_values):
"""
@param concepts:
@param predicted_Q_values:
@return:
"""
# simple loop.
for child_node, pred_Q in zip(concepts, predicted_Q_values):
child_node.heuristic = pred_Q
self.search_tree.quality_func.apply(child_node)
if child_node.quality > 0: # > too weak, ignore.
self.search_tree.add(child_node)
if child_node.quality == 1:
return child_node
def apply_rho(self, node: AbstractNode) -> Generator:
"""
Refine an OWL Class expression \\|= Observing next possible states
Computation O(N).
1. Generate concepts by refining a node
1.1. Compute allowed length of refinements
1.2. Convert concepts if concepts do not belong to self.concepts_to_ignore
Note that i.str not in self.concepts_to_ignore => O(1) if a set is being used.
3. Return Generator
"""
assert isinstance(node, AbstractNode)
# 1.
# (1.1)
length = len(node) + 3 if len(node) + 3 <= self.max_child_length else self.max_child_length
# (1.2)
for i in self.operator.refine(node, maxlength=length): # O(N)
if i.str not in self.concepts_to_ignore: # O(1)
yield self.operator.get_node(i, parent_node=node) # O(1)
def assign_embeddings(self, node: AbstractNode) -> None:
assert isinstance(node, AbstractNode)
# (1) Detect mode
if self.representation_mode == 'averaging':
# (2) if input node has not seen before, assign embeddings.
if node.embeddings is None:
str_idx = [get_full_iri(i).replace('\n', '') for i in node.concept.instances]
if len(str_idx) == 0:
emb = torch.zeros(self.sample_size, self.instance_embeddings.shape[1])
else:
emb = torch.tensor(self.instance_embeddings.loc[str_idx].values, dtype=torch.float32)
emb = torch.mean(emb, dim=0)
emb = emb.view(1, self.sample_size, self.instance_embeddings.shape[1])
node.embeddings = emb
else:
""" Embeddings already assigned."""
try:
assert node.embeddings.shape == (1, self.sample_size, self.instance_embeddings.shape[1])
except AssertionError as e:
print(e)
print(node)
print(node.embeddings.shape)
print((1, self.sample_size, self.instance_embeddings.shape[1]))
raise
elif self.representation_mode == 'sampling':
if node.embeddings is None:
str_idx = [get_full_iri(i).replace('\n', '') for i in node.concept.instances]
if len(str_idx) >= self.sample_size:
sampled_str_idx = random.sample(str_idx, self.sample_size)
emb = torch.tensor(self.instance_embeddings.loc[sampled_str_idx].values, dtype=torch.float32)
else:
num_rows_to_fill = self.sample_size - len(str_idx)
emb = torch.tensor(self.instance_embeddings.loc[str_idx].values, dtype=torch.float32)
emb = torch.cat((torch.zeros(num_rows_to_fill, self.instance_embeddings.shape[1]), emb))
emb = emb.view(1, self.sample_size, self.instance_embeddings.shape[1])
node.embeddings = emb
else:
""" Embeddings already assigned."""
try:
assert node.embeddings.shape == (1, self.sample_size, self.instance_embeddings.shape[1])
except AssertionError:
print(node)
print(self.sample_size)
print(node.embeddings.shape)
print((1, self.sample_size, self.instance_embeddings.shape[1]))
raise ValueError
else:
raise ValueError
# @todo remove this testing in experiments.
if torch.isnan(node.embeddings).any() or torch.isinf(node.embeddings).any():
# No individual contained in the input concept.
# Sanity checking.
raise ValueError
def save_weights(self):
"""
Save pytorch weights.
@return:
"""
# Save model.
torch.save(self.heuristic_func.net.state_dict(),
self.storage_path + '/{0}.pth'.format(self.heuristic_func.name))
def rl_learning_loop(self, pos_uri: Set[str], neg_uri: Set[str]) -> List[float]:
"""
RL agent learning loop over learning problem defined
@param pos_uri: A set of URIs indicating E^+
@param neg_uri: A set of URIs indicating E^-
Computation
1. Initialize training
2. Learning loop: Stopping criteria
***self.num_episode** OR ***self.epsilon < self.epsilon_min***
2.1. Perform sequence of actions
2.2. Decrease exploration rate
2.3. Form experiences
2.4. Experience Replay
2.5. Return sum of actions
@return: List of sum of rewards per episode.
"""
# (1)
self.init_training(pos_uri=pos_uri, neg_uri=neg_uri)
root = self.operator.get_node(self.start_class, root=True)
# (2) Assign embeddings of root/first state.
self.assign_embeddings(root)
sum_of_rewards_per_actions = []
log_every_n_episodes = int(self.num_episode * .1) + 1
# (2)
for th in range(self.num_episode):
# (2.1)
sequence_of_states, rewards = self.sequence_of_actions(root)
if th % log_every_n_episodes == 0:
self.logger.info(
'{0}.th iter. SumOfRewards: {1:.2f}\tEpsilon:{2:.2f}\t|ReplayMem.|:{3}'.format(th, sum(rewards),
self.epsilon, len(
self.experiences)))
# (2.2)
self.epsilon -= self.epsilon_decay
if self.epsilon < self.epsilon_min:
break
# (2.3)
self.form_experiences(sequence_of_states, rewards)
# (2.4)
if th % self.num_epochs_per_replay == 0 and len(self.experiences) > 1:
self.learn_from_replay_memory()
sum_of_rewards_per_actions.append(sum(rewards))
return sum_of_rewards_per_actions
def exploration_exploitation_tradeoff(self, current_state: AbstractNode,
next_states: List[AbstractNode]) -> AbstractNode:
"""
Exploration vs Exploitation tradeoff at finding next state.
(1) Exploration
(2) Exploitation
"""
if np.random.random() < self.epsilon:
next_state = random.choice(next_states)
self.assign_embeddings(next_state)
else:
next_state = self.exploitation(current_state, next_states)
return next_state
def exploitation(self, current_state: AbstractNode, next_states: List[AbstractNode]) -> AbstractNode:
"""
Find next node that is assigned with highest predicted Q value.
(1) Predict Q values : predictions.shape => torch.Size([n, 1]) where n = len(next_states)
(2) Find the index of max value in predictions
(3) Use the index to obtain next state.
(4) Return next state.
"""
predictions: torch.Tensor = self.predict_Q(current_state, next_states)
argmax_id = int(torch.argmax(predictions))
next_state = next_states[argmax_id]
"""
# Sanity checking
print('#'*10)
for s, q in zip(next_states, predictions):
print(s, q)
print('#'*10)
print(next_state,f'\t {torch.max(predictions)}')
"""
return next_state
def predict_Q(self, current_state: AbstractNode, next_states: List[AbstractNode]) -> torch.Tensor:
"""
Predict promise of next states given current state.
@param current_state:
@param next_states:
@return: predicted Q values.
"""
self.assign_embeddings(current_state)
assert len(next_states) > 0
with torch.no_grad():
self.heuristic_func.net.eval()
# create batch batch.
next_state_batch = []
for _ in next_states:
self.assign_embeddings(_)
next_state_batch.append(_.embeddings)
next_state_batch = torch.cat(next_state_batch, dim=0)
ds = PrepareBatchOfPrediction(current_state.embeddings,
next_state_batch,
self.emb_pos,
self.emb_neg)
predictions = self.heuristic_func.net.forward(ds.get_all())
return predictions
def train(self, dataset: Iterable[Tuple[str, Set, Set]], relearn_ratio: int = 2):
"""
Train RL agent on learning problems with relearn_ratio.
@param dataset: An iterable containing training data. Each item corresponds to a tuple of string representation
of target concept, a set of positive examples in the form of URIs amd a set of negative examples in the form of
URIs, respectively.
@param relearn_ratio: An integer indicating the number of times dataset is iterated.
# @TODO determine Big-O
Computation
1. Dataset and relearn_ratio loops: Learn each problem relearn_ratio times,
2. Learning loop
3. Take post process action that implemented by subclass.
@return: self
"""
# We need a better way of login,
self.logger.info('Training starts.')
print(f'Training starts.\nNumber of learning problem:{len(dataset)},\t Relearn ratio:{relearn_ratio}')
counter = 1
# 1.
for _ in range(relearn_ratio):
for (alc_concept_str, positives, negatives) in dataset:
self.logger.info(
'Goal Concept:{0}\tE^+:[{1}] \t E^-:[{2}]'.format(alc_concept_str,
len(positives), len(negatives)))
# 2.
print(f'RL training on {counter}.th learning problem starts')
sum_of_rewards_per_actions = self.rl_learning_loop(pos_uri=positives, neg_uri=negatives)
print(f'Sum of Rewards in first 3 trajectory:{sum_of_rewards_per_actions[:3]}')
print(f'Sum of Rewards in last 3 trajectory:{sum_of_rewards_per_actions[:3]}')
self.seen_examples.setdefault(counter, dict()).update(
{'Concept': alc_concept_str, 'Positives': list(positives), 'Negatives': list(negatives)})
counter += 1
if counter % 100 == 0:
self.save_weights()
# 3.
return self.terminate_training()
|
StarcoderdataPython
|
3289204
|
glossary = {
'integer': 'is colloquially defined as a number that can be written without a fractional component.\n',
'iterate': 'is the repetition of a process in order to generate a sequence of outcomes.\n',
'indentation': 'is an empty space at the beginning of a line that groups particular blocks of code.\n',
'concatinate': 'is the operation of joining character strings end-to-end.\n',
'boolean': 'is a logical data type that can have only the values True or False.\n',
}
print(f"Integer: {glossary.get('integer')}\n")
print(f"Iterate: {glossary.get('iterate')}\n")
print(f"Indentation: {glossary.get('indentation')}\n")
print(f"Concatinate: {glossary.get('concatinate')}\n")
print(f"Boolean: {glossary.get('boolean')}\n")
|
StarcoderdataPython
|
3368731
|
from django.db import models
from import_common.core import matchuj_wydawce
from .base import BasePBNMongoDBModel
from bpp.models import LinkDoPBNMixin, const
class PublisherManager(models.Manager):
def official(self):
return self.exclude(mniswId=None)
class Publisher(LinkDoPBNMixin, BasePBNMongoDBModel):
objects = PublisherManager()
url_do_pbn = const.LINK_PBN_DO_WYDAWCY
atrybut_dla_url_do_pbn = "pk"
class Meta:
verbose_name = "Wydawca w PBN API"
verbose_name_plural = "Wydawcy w PBN API"
pull_up_on_save = ["publisherName", "mniswId"]
publisherName = models.TextField(null=True, blank=True, db_index=True)
mniswId = models.IntegerField(null=True, blank=True, db_index=True)
def __str__(self):
return f"{self.publisherName}, MNISW ID: {self.mniswId or '-'}"
@property
def points(self):
return self.current_version["object"]["points"]
def rekord_w_bpp(self):
from bpp.models import Wydawca
try:
return Wydawca.objects.get(pbn_uid_id=self.pk)
except Wydawca.DoesNotExist:
pass
def matchuj_wydawce(self):
return matchuj_wydawce(self.publisherName, self.pk)
|
StarcoderdataPython
|
159328
|
import re
def sort_urls(url_list,reverse=True):
return sorted(url_list, key=lambda k: k['bandwidth'], reverse=reverse)
def name_checker(name):
name = name.replace("'", "")
name = re.findall(r"([\w\d-]+)", name)
return ' '.join([x for x in name])
|
StarcoderdataPython
|
90950
|
<filename>fig/falcon/__init__.py
from .api import FalconAPI
from .models import Event
from .stream import StreamManagementThread
__all__ = ['Event', 'FalconAPI', 'StreamManagementThread']
|
StarcoderdataPython
|
1604779
|
# -*- coding: utf-8 -*-
"""
Created on Tue May 29 15:33:14 2018
@author: jonatha.costa
"""
import requests
from bs4 import BeautifulSoup
import pandas as pd
import datetime
import re
import numpy as np
from readability import Document
import requests
from readability.readability import Document
import time
def get_date(k):
soup = BeautifulSoup(k,'lxml')
date = soup.findAll("span",{'class':'date-display-single'})[0]
date = date.text
try:
date = datetime.datetime.strptime(date, "%d/%m/%Y").strftime('%Y-%m-%d')
except ValueError:
date = datetime.datetime.strptime(date, "%Y-%m-%dT%H:%M:%S").strftime('%Y-%m-%d')
return(date)
def get_manchete(k):
soup = BeautifulSoup(k, 'lxml')
#manchete = soup.findAll('h1',{'class':'content-head__title'})
manchete = soup.findAll('h1',{'property':'na:headline'})
try:
manchete_ok = manchete[0].text
except IndexError:
page_content = Document(k)
manchete_ok = page_content.title()
return(manchete_ok)
def boilerpipe_api_article_extract(k):
soup = BeautifulSoup(k, 'lxml')
text = soup.find_all('p')
texto = ""
for news in range(len(text)):
#print('concatenate part '+ str(news) + ' of ' + str(len(text)))
aux = text[news].text
texto = texto + aux
return(texto)
url1_base = 'https://www.cartacapital.com.br/@@search?advanced_search=False&b_start:int='
url2_base = '&created.query:date:list:record=1970/01/02%2000%3A00%3A00%20GMT%2B0&created.range:record=min&portal_type:list=ExternalBlogEntry&portal_type:list=Person&portal_type:list=collective.nitf.content&sort_on=&pt_toggle=%23&SearchableText=a'
page = 0
df_links = pd.DataFrame(columns = ["links_brutos","html"])
url_extract = url1_base + str(page) + url2_base
r = requests.get(url_extract)
while(r.status_code == 200):
print("get page:" + str(page))
url_extract = url1_base + str(page) + url2_base
r = requests.get(url_extract)
soup = BeautifulSoup(r.content, 'lxml')
html = soup
teste = soup.findAll('a')
time.sleep(1)
for i in range(len(teste)-2):
if('https://www.cartacapital.com.br' in teste[i].attrs['href'] and '/@@search?' not in teste[i].attrs['href']):
df_links = df_links.append({'links_brutos': teste[i].attrs['href'],
'html': html
},ignore_index=True)
page = page + 1
df_links = df_links.drop_duplicates()
df_links = df_links.reset_index(drop=True )
|
StarcoderdataPython
|
73686
|
<reponame>suamin/nemex<gh_stars>1-10
"""
Nemex module.
Classes:
- Nemex
"""
import time
from .data import EntitiesDictionary
from .utils import *
from .similarities import Verify
from .faerie import Faerie
logging.basicConfig(format='%(asctime)s : %(levelname)s : %(message)s', level=logging.INFO)
logger = logging.getLogger(__name__)
class Nemex:
"""Nemex class.
The Nemex class performs approximate 'Named Entity Matching and Extraction' based on the 'Faerie' algorithm.
Parameters
----------
list_or_file_entities : {list, str}
List or file with entities.
char : bool
If true, performs character-based similarity instead of token-based.
q : int
Size of q-grams.
special_char : str
Special character for substitution of space character.
unique : bool
If true, preserves order with uniqueness.
lower : bool
If true, converts document to lower case.
similarity : str
Similarity method.
t : int
Similarity threshold.
pruner : str
Pruning method.
verify : bool
If true, verify candidates.
"""
def __init__(self,
list_or_file_entities,
char: bool = Default.CHAR,
q: int = Default.TOKEN_THRESH,
special_char: str = Default.SPECIAL_CHAR,
unique: bool = Default.UNIQUE,
lower: bool = Default.LOWER,
similarity: str = Default.SIMILARITY,
t: int = Default.SIM_THRESH_CHAR,
pruner: str = Default.PRUNER,
verify: bool = Default.VERIFY
) -> None:
# character-level
if char:
if similarity not in Sim.CHAR_BASED:
raise ValueError("Change similarity method to 'edit_dist' or 'edit_sim' for character level.")
if similarity == Sim.EDIT_DIST:
t = int(t)
if not t >= 1:
raise ValueError("Edit distance threshold must be >= 1")
else:
if not (0. < t <= 1.0):
raise ValueError("Similarity score should be in (0, 1]")
q = int(q)
if not q >= 1:
raise ValueError("q-gram must be at least 1")
# token-level
else:
if similarity in Sim.CHAR_BASED:
raise ValueError("Change similarity method to 'cosine', 'dice' or 'jaccard' for token level.")
if not (0. < t <= 1.0):
raise ValueError("Similarity score should be in (0, 1]")
# tokenizer
self.tokenizer = Tokenizer(char, q, special_char, unique, lower)
self.char = char
# log start
logger.info("Building entities dictionary ...")
T = time.time()
# create entity dictionary
if isinstance(list_or_file_entities, list):
self.E = EntitiesDictionary.from_list(list_or_file_entities, self.tokenizer.tokenize)
elif isinstance(list_or_file_entities, str):
# else it is file of tsv id\tent lines or just text of ent lines
self.E = EntitiesDictionary.from_tsv_file(list_or_file_entities, self.tokenizer.tokenize)
else:
logger.error("Bad input type.")
logger.error("Expected `list` or `str`, but got ", type(list_or_file_entities))
exit(0)
# caching
self.cache_ent_repr = dict()
# log end
T = time.time() - T
logger.info("Building dictionary took {} seconds.".format(int(T)))
# setup model
self.faerie = Faerie(self.E, similarity=similarity, t=t, q=q, pruner=pruner)
self.verify = verify
return
def __call__(self, document: str, valid_only: bool = True) -> dict:
"""Executes the Nemex algorithm.
Parameters
----------
document : str
Text document.
valid_only : bool
If true, return only as valid verified substrings.
Returns
-------
Dictionary with document and match list.
"""
# check doc type
assert isinstance(document, str), "Expected a string as document."
# tokenize
doc_tokens = self.tokenizer.tokenize(document)
# char-based
if self.char:
doc_tokens_str = qgrams_to_char(doc_tokens).replace(self.tokenizer.special_char, " ")
# token-based
else:
doc_tokens_str = " ".join(doc_tokens)
# init spans
spans = tokens_to_whitespace_char_spans(doc_tokens)
# init output
output = {"document": doc_tokens_str, "matches": list()}
# returns pair of <entity index, (start, end) positions in doc_tokens>
for e, (i, j) in self.faerie(doc_tokens):
match_tokens = doc_tokens[i:j+1]
match_span = spans[i:j+1]
if len(match_span) == 1:
start, end = match_span[0]
else:
start, end = match_span[0][0], match_span[-1][1]
# char-based
if self.char:
q = self.tokenizer.q
start, end = start - (i * q), end - (j * q)
match = doc_tokens_str[start:end]
if e not in self.cache_ent_repr:
entity = qgrams_to_char(self.E[e].tokens).replace(self.tokenizer.special_char, " ")
self.cache_ent_repr[e] = entity
else:
entity = self.cache_ent_repr[e]
output["matches"].append({
"entity": [entity, self.E[e].id],
"span": [start, end],
"match": match,
"score": None,
"valid": None
})
# verify
if self.verify:
valid, score = Verify.check(match, entity, self.faerie.similarity, self.faerie.t)
output["matches"][-1]["score"] = score
output["matches"][-1]["valid"] = valid
# return only valid matches
if valid_only and not valid:
del output["matches"][-1]
# token-based
else:
# end = spans[j][-1]
match = doc_tokens_str[start:end]
if e not in self.cache_ent_repr:
entity = " ".join(self.E[e].tokens)
self.cache_ent_repr[e] = entity
else:
entity = self.cache_ent_repr[e]
output["matches"].append({
"entity": [entity, self.E[e].id],
"span": [start, end],
"match": match,
"score": None,
"valid": None
})
# verify
if self.verify:
valid, score = Verify.check(
match_tokens, self.E[e].tokens, self.faerie.similarity, self.faerie.t
)
output["matches"][-1]["score"] = score
output["matches"][-1]["valid"] = valid
# return only valid matches
if valid_only and not valid:
del output["matches"][-1]
return output
|
StarcoderdataPython
|
4807833
|
#!/usr/bin/env python
print "hello World, Welcome to python"
|
StarcoderdataPython
|
1771626
|
"""ExpNushellxLpt.py
Definition for namedtuple representation of a set of *.lpt file data
from running NuShellX on Magnus, Heiko, or normal-ordered interactions
"""
from __future__ import print_function, division, unicode_literals
from collections import namedtuple
# noinspection PyClassHasNoInit
class ExpNushellxLpt(namedtuple('ExpNushellxLpt', ['Z', 'int'])):
"""Exp definition for *.nushellx_lpt files
Z: proton number
int: interaction type (e.g. 'usdb', 'sd-shell...', 'fit-gen...')
"""
__slots__ = ()
def __str__(self):
return str(tuple(self._asdict().values())).replace(', None', '')
|
StarcoderdataPython
|
129978
|
<gh_stars>0
#
# Copyright 2012 New Dream Network, LLC (DreamHost)
#
# Author: <NAME> <<EMAIL>>
# <NAME> <<EMAIL>>
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Utils for publishers
"""
import hashlib
import hmac
from oslo.config import cfg
import six
from ceilometer import utils
METER_PUBLISH_OPTS = [
cfg.StrOpt('metering_secret',
secret=True,
default='change this or be hacked',
help='Secret value for signing metering messages.',
deprecated_opts=[cfg.DeprecatedOpt("metering_secret",
"DEFAULT"),
cfg.DeprecatedOpt("metering_secret",
"publisher_rpc")]
),
]
def register_opts(config):
"""Register the options for publishing metering messages."""
config.register_opts(METER_PUBLISH_OPTS, group="publisher")
register_opts(cfg.CONF)
def compute_signature(message, secret):
"""Return the signature for a message dictionary."""
digest_maker = hmac.new(secret, '', hashlib.sha256)
for name, value in utils.recursive_keypairs(message):
if name == 'message_signature':
# Skip any existing signature value, which would not have
# been part of the original message.
continue
digest_maker.update(name)
digest_maker.update(six.text_type(value).encode('utf-8'))
return digest_maker.hexdigest()
def besteffort_compare_digest(first, second):
"""Returns True if both string inputs are equal, otherwise False.
This function should take a constant amount of time regardless of
how many characters in the strings match.
"""
# NOTE(sileht): compare_digest method protected for timing-attacks
# exists since python >= 2.7.7 and python >= 3.3
# this a bit less-secure python fallback version
# taken from https://github.com/openstack/python-keystoneclient/blob/
# master/keystoneclient/middleware/memcache_crypt.py#L88
if len(first) != len(second):
return False
result = 0
if six.PY3 and isinstance(first, bytes) and isinstance(second, bytes):
for x, y in zip(first, second):
result |= x ^ y
else:
for x, y in zip(first, second):
result |= ord(x) ^ ord(y)
return result == 0
if hasattr(hmac, 'compare_digest'):
compare_digest = hmac.compare_digest
else:
compare_digest = besteffort_compare_digest
def verify_signature(message, secret):
"""Check the signature in the message.
Message is verified against the value computed from the rest of the
contents.
"""
old_sig = message.get('message_signature', '')
new_sig = compute_signature(message, secret)
if isinstance(old_sig, six.text_type):
try:
old_sig = old_sig.encode('ascii')
except UnicodeDecodeError:
return False
return compare_digest(new_sig, old_sig)
def meter_message_from_counter(sample, secret):
"""Make a metering message ready to be published or stored.
Returns a dictionary containing a metering message
for a notification message and a Sample instance.
"""
msg = {'source': sample.source,
'counter_name': sample.name,
'counter_type': sample.type,
'counter_unit': sample.unit,
'counter_volume': sample.volume,
'user_id': sample.user_id,
'project_id': sample.project_id,
'resource_id': sample.resource_id,
'timestamp': sample.timestamp,
'resource_metadata': sample.resource_metadata,
'message_id': sample.id,
}
msg['message_signature'] = compute_signature(msg, secret)
return msg
|
StarcoderdataPython
|
159819
|
import os
import json
path = "P2S2/"
example = {}
example["labels"] = ["Background","Vegetation","Organ","Don't know"]
example["models"] = []
dirs = [item for item in os.listdir(path) if os.path.isdir(os.path.join(path,item))]
print(dirs)
for dir_ in dirs:
if dir_ == "images":
example["imageURLs"] = ["data/images/"+item for item in os.listdir(os.path.join(path,"images")) if item.endswith(".png")]
elif dir_ =="annotations":
example["annotationURLs"] = ["data/annotations/"+item for item in os.listdir(os.path.join(path,"annotations")) if item.endswith(".png")]
else:
example["models"].append(dir_)
with open(path+'example.json', 'w') as fp:
json.dump(example, fp)
|
StarcoderdataPython
|
1603061
|
"""
Tyes for storage nodes.
"""
import os
import re
class Cache:
"""
Data structure to handle cache operations
Operations:
get(filename: str) -> str | None
set(filename: str, content: str) -> None
"""
def __init__(self, cache_folder='cache'):
self.path = f'./{cache_folder}'
if not os.path.exists(self.path):
os.makedirs(self.path)
def get(self, filename: str) -> str:
filename = re.sub('https?://', '', filename)
filename = re.sub(r'\?|/', '_', filename)
try:
with open(os.path.join(self.path, filename), 'r') as fd:
return fd.read()
except FileNotFoundError:
return None
def set(self, filename: str, content: str):
filename = re.sub('https?://', '', filename)
filename = re.sub(r'\?|/', '_', filename)
with open(os.path.join(self.path, filename), 'w') as fd:
fd.write(content)
def __iter__(self):
for file in os.listdir(self.path):
with open(f'{self.path}/{file}') as fd:
content = fd.read()
yield (file, content)
if __name__ == '__main__':
cache = Cache()
for file, content in cache:
print(file, content)
|
StarcoderdataPython
|
151286
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import contextlib
import re
import sys
# NOTE: this module doesn't import sublime module so we can mock view/region etc in tests
LIST_ENTRY_BEGIN_RE = re.compile(
r"""^(
\s+[*] |
\s*[-+] |
\s*[0-9]+[.] |
\s[a-zA-Z][.]
)\s+
(?:
(?P<tick_box>\[[- xX]\])
\s
)?
""",
re.VERBOSE
)
HEADLINE_RE = re.compile(
'^([*]+) \s+' # STARS group 1
'(?: ([A-Za-z0-9]+)\s+ )?' # KEYWORD group 2
'(?: \[[#]([a-zA-Z])\]\s+)?' # PRIORITY group 3
'(.*?)' # TITLE -- match in nongreedy fashion group 4
'\s* (:(?: [a-zA-Z0-9_@#]+ :)+)? \s*$', # TAGS group 5
re.VERBOSE
)
CONTROL_LINE_RE = re.compile(
"^\#\+" # prefix
"([A-Z_]+) :" # key
"\s* (.*)", # value
re.VERBOSE
)
BEGIN_SRC_RE = re.compile(
r"^\s*\#\+BEGIN_SRC\b.*$"
)
END_SRC_RE = re.compile(
r"^\s*\#\+END_SRC\b.*$"
)
BEGIN_EXAMPLE_RE = re.compile(
r"^\s*\#\+BEGIN_EXAMPLE\b.*$"
)
END_EXAMPLE_RE = re.compile(
r"^\s*\#\+END_EXAMPLE\b.*$"
)
COLON_LINE_EXAMPLE_RE = re.compile(
r"^\s*:.*$"
)
KEYWORD_SET = frozenset(["TODO", "DONE"])
def is_point_within_region(point, region):
return region.a <= point < region.b
def line_is_list_entry_begin(line_text):
return LIST_ENTRY_BEGIN_RE.match(line_text)
def line_is_headline(line_text):
return HEADLINE_RE.match(line_text)
def iter_tree_depth_first(node):
for child in node.children:
for n in iter_tree_depth_first(child):
yield n
yield node
def find_child_containing_point(node, point):
if not is_point_within_region(point, node.region):
return None
while node.children:
for child in node.children:
if is_point_within_region(point, child.region):
node = child
break
else:
return node
return node
def sibling(node, offset, sibling_type_filter=None):
if node.parent is None:
return None
siblings = node.parent.children
if sibling_type_filter:
siblings = [s for s in siblings if isinstance(s, sibling_type_filter)]
idx = siblings.index(node)
if idx == -1:
raise AssertionError("Cannot find node in the list of its parent children")
if 0 <= idx + offset < len(siblings):
return siblings[idx + offset]
return None
def next_sibling(node, sibling_type_filter=None):
return sibling(node, 1, sibling_type_filter)
def prev_sibling(node, sibling_type_filter=None):
return sibling(node, -1, sibling_type_filter)
def view_full_lines(view, region):
# NOTE: line ending might be either '\r\n' or '\n'
# TODO: test this function
line_region_list = view.lines(region)
for i in range(len(line_region_list) - 1):
line_region_list[i].b = line_region_list[i+1].a
if line_region_list:
line_region_list[-1].b = view.size()
return line_region_list
def parse_org_document_new(view, region):
builder = OrgTreeBuilder(view)
parser_input = ParserInput(view, region)
parse_global_scope(parser_input, builder)
return builder.finish()
class OrgViewNode(object):
def __init__(self, view, parent):
self.children = []
self.parent = parent
if self.parent:
self.parent.children.append(self)
self.region = None
self.view = view
def text(self):
return self.view.substr(self.region)
def __repr__(self):
text = _node_text(self)
if len(text) > 55:
text = "{} ... {}".format(text[:25], text[-25:])
attrs = self._debug_attrs()
if attrs != "":
attrs += ", "
return "{cls}({attrs}{str_repr})".format(cls=type(self).__name__, attrs=attrs, str_repr=repr(text))
def _debug_attrs(self):
return ""
def debug_print(self, indent=None, file=None):
if file is None:
file = sys.stdout
if indent is None:
indent = 0
indent_str = " " * indent
file.write(indent_str + repr(self) + "\n")
for c in self.children:
c.debug_print(indent+2)
if indent == 0:
file.flush()
class OrgRoot(OrgViewNode):
node_type = "root"
def __init__(self, view):
super(OrgRoot, self).__init__(view, None)
class OrgSection(OrgViewNode):
node_type = "section"
def __init__(self, view, parent, level):
super(OrgSection, self).__init__(view, parent)
self.level = level
def _debug_attrs(self):
return "level={}".format(self.level)
class OrgHeadline(OrgViewNode):
node_type = "headline"
def __init__(self, view, parent, level):
super(OrgHeadline, self).__init__(view, parent)
self.level = level
def _debug_attrs(self):
return "level={}".format(self.level)
class OrgSrcBlock(OrgViewNode):
node_type = "src_block"
def org_headline_get_text(headline: OrgHeadline):
line = headline.view.substr(headline.region)
m = HEADLINE_RE.match(line)
assert m is not None
keyword = m.group(2)
title_begin = m.start(4)
title_end = m.end(4)
if keyword is not None and keyword not in KEYWORD_SET:
title_begin = m.start(2)
return line[title_begin:title_end]
def org_headline_get_tag_list(headline: OrgHeadline):
line = headline.view.substr(headline.region)
m = HEADLINE_RE.match(line)
assert m is not None
tag_group = m.group(5)
if tag_group is not None:
return tag_group.strip(':').split(':')
return []
class OrgList(OrgViewNode):
node_type = "list"
def __init__(self, view, parent, indent):
super(OrgList, self).__init__(view, parent)
self.indent = indent
class OrgListEntry(OrgViewNode):
node_type = "list_entry"
def __init__(self, view, parent, indent, match):
super(OrgListEntry, self).__init__(view, parent)
self.indent = indent
self.tick_offset = None
if match.group("tick_box") is not None:
self.tick_offset = match.start("tick_box") + 1
class OrgControlLine(OrgViewNode):
node_type = "control_line"
def __init__(self, view, parent):
super(OrgControlLine, self).__init__(view, parent)
def org_list_entry_get_tick_position(node: OrgListEntry):
if node.region is None:
raise ValueError("OrgListEntry region is unknown")
elif node.tick_offset is None:
return None
return node.region.a + node.tick_offset
def org_control_line_get_key_value(control_line: OrgControlLine):
line = control_line.view.substr(control_line.region)
m = CONTROL_LINE_RE.match(line)
assert m is not None
return m.group(1), m.group(2)
class OrgTreeBuilder:
def __init__(self, view):
self._root = OrgRoot(view)
section = OrgSection(view, self._root, 0)
self._stack = [self._root, section]
self._context_stack = [2]
def top(self):
return self._stack[-1]
def pop(self):
self._stack.pop()
def push(self, node):
self._stack.append(node)
def finish(self):
self._stack = None
return self._root
@contextlib.contextmanager
def push_context(self):
curlen = len(self._stack)
self._context_stack.append(curlen)
yield
self._context_stack.pop()
if len(self._stack) > curlen:
del self._stack[curlen:]
def is_context_empty(self):
return len(self._stack) <= self._context_stack[-1]
class ParserInput:
def __init__(self, view, region):
self._full_line_region_list = view_full_lines(view, region)
self._idx = 0
self.view = view
def get_current_line_region(self):
if self._idx < len(self._full_line_region_list):
return self._full_line_region_list[self._idx]
else:
return None
def next_line(self):
self._idx += 1
def parse_global_scope(parser_input: ParserInput, builder: OrgTreeBuilder):
view = parser_input.view
while parser_input.get_current_line_region() is not None:
region = parser_input.get_current_line_region()
line = view.substr(region)
line = line.rstrip('\n')
m = HEADLINE_RE.match(line)
if m is not None:
headline_level = len(m.group(1))
assert headline_level > 0
while (
not isinstance(builder.top(), OrgSection)
or builder.top().level >= headline_level
):
builder.pop()
new_section = OrgSection(view, builder.top(), headline_level)
headline = OrgHeadline(view, new_section, headline_level)
builder.push(new_section)
_extend_region(headline, region)
parser_input.next_line()
continue
m = LIST_ENTRY_BEGIN_RE.match(line)
if m is not None:
with builder.push_context():
parse_list(parser_input, builder)
continue
m = BEGIN_SRC_RE.match(line)
if m is not None:
with builder.push_context():
parse_example_block(parser_input, builder, BEGIN_SRC_RE, END_SRC_RE)
continue
m = BEGIN_EXAMPLE_RE.match(line)
if m is not None:
with builder.push_context():
parse_example_block(parser_input, builder, BEGIN_EXAMPLE_RE, END_EXAMPLE_RE)
continue
m = COLON_LINE_EXAMPLE_RE.match(line)
if m is not None:
with builder.push_context():
parse_example_block(parser_input, builder, COLON_LINE_EXAMPLE_RE, None)
m = CONTROL_LINE_RE.match(line)
if m is not None:
control_line = OrgControlLine(view, builder.top())
_extend_region(control_line, region)
parser_input.next_line()
continue
_extend_region(builder.top(), region)
parser_input.next_line()
continue
def parse_list(parser_input: ParserInput, builder: OrgTreeBuilder):
view = parser_input.view
empty_lines = 0
while parser_input.get_current_line_region() is not None:
region = parser_input.get_current_line_region()
line = view.substr(region)
if line.startswith("*"):
break
line_is_empty = not bool(line.strip())
if line_is_empty:
empty_lines += 1
if empty_lines >= 2:
return
parser_input.next_line()
continue
else:
empty_lines = 0
indent = _calc_indent(line)
m = LIST_ENTRY_BEGIN_RE.match(line)
if m is not None:
while (
isinstance(builder.top(), OrgList) and builder.top().indent > indent
or isinstance(builder.top(), OrgListEntry) and builder.top().indent >= indent
):
builder.pop()
if (
not isinstance(builder.top(), OrgList)
or builder.top().indent < indent
):
builder.push(OrgList(view, builder.top(), indent))
builder.push(OrgListEntry(view, builder.top(), indent, m))
_extend_region(builder.top(), region)
parser_input.next_line()
continue
while (
not builder.is_context_empty()
and not (
isinstance(builder.top(), OrgListEntry)
and builder.top().indent < indent
)
):
builder.pop()
if builder.is_context_empty():
return
assert isinstance(builder.top(), OrgListEntry)
_extend_region(builder.top(), region)
parser_input.next_line()
def parse_example_block(parser_input: ParserInput, builder: OrgTreeBuilder, begin_re, end_re):
view = parser_input.view
region = parser_input.get_current_line_region()
if region is None:
return
line = view.substr(region)
m = begin_re.match(line)
if m is None:
return
src_block = OrgSrcBlock(view, builder.top())
builder.push(src_block)
_extend_region(src_block, region)
parser_input.next_line()
while True:
region = parser_input.get_current_line_region()
if region is None:
return
line = view.substr(region)
_extend_region(src_block, region)
parser_input.next_line()
if end_re is None:
m = begin_re.match(line)
if m is None:
break
else:
m = end_re.match(line)
if m is not None:
break
builder.pop()
#
# Details
#
def _calc_indent(line):
indent = 0
for c in line:
if c == ' ':
indent += 1
else:
break
return indent
def _extend_region(node, region):
# we don't want to be dependent on region class so we'll derive region class from runtime
region_cls = type(region)
while node:
if node.region is None:
node.region = region
else:
new_region = region_cls(node.region.a, region.b)
node.region = new_region
node = node.parent
def _node_text(node):
return node.view.substr(node.region)
if __name__ == '__main__':
import unittest
import mock_sublime
class TestListParsing(unittest.TestCase):
def test_simple_list(self):
view = mock_sublime.View(
" - some list item\n"
" - another list item\n"
)
parser = OrgListParser(view)
for region in view.sp_iter_all_line_regions():
result = parser.try_push_line(region)
self.assertTrue(result)
result = parser.finish()
self.assertEqual(len(result.children), 1)
self.assertEqual(len(result.children[0].children), 2)
self.assertEqual(_node_text(result.children[0].children[0]), " - some list item")
self.assertEqual(_node_text(result.children[0].children[1]), " - another list item")
def test_simple_list_with_child(self):
view = mock_sublime.View(
" - parent 1\n"
" - child\n"
" - parent 2\n"
)
result = parse_org_document_new(view, mock_sublime.Region(0, view.size()))
self.assertEqual(len(result.children), 1)
self.assertEqual(len(result.children[0].children), 2)
self.assertEqual(_node_text(result.children[0].children[0]), " - parent 1\n - child")
self.assertEqual(len(result.children[0].children[0].children), 1)
self.assertEqual(_node_text(result.children[0].children[0].children[0]), " - child")
self.assertEqual(_node_text(result.children[0].children[1]), " - parent 2")
def test_list_with_text(self):
view = mock_sublime.View(
" - parent 1\n"
" 1111\n"
" * child 1\n"
" 2222\n"
" * child 2\n"
" 3333\n"
" * child 3\n"
" 4444\n"
" - parent 2\n"
" 5555\n"
)
parser = OrgListParser(view)
for region in view.sp_iter_all_line_regions():
result = parser.try_push_line(region)
self.assertTrue(result)
result = parser.finish()
self.assertEqual(len(result.children), 1)
self.assertEqual(len(result.children[0].children), 2)
parent1, parent2 = result.children[0].children
self.assertEqual(
_node_text(parent1),
" - parent 1\n"
" 1111\n"
" * child 1\n"
" 2222\n"
" * child 2\n"
" 3333\n"
" * child 3\n"
" 4444"
)
self.assertEqual(len(parent1.children), 3)
for num, child_lst in enumerate(parent1.children, 1):
self.assertEqual(len(child_lst.children), 1)
child_entry, = child_lst.children
self.assertEqual(_node_text(child_entry), " * child {}".format(num))
class GlobalScopeParsing(unittest.TestCase):
def test_headline_parsing(self):
view = mock_sublime.View(
"* This is org headline\n"
"** TODO headline 2\n"
"*** DONE headline 3\n"
"**** TODO [#b] headline 4\n"
"** UNDONE HEADLINE 5\n"
"** UNDONE [#a] HeAdLiNe 6\n"
"*** more headlines 7 :tag1:tag2:\n"
)
root = parse_org_document_new(view, mock_sublime.Region(0, view.size()))
headline_item_list = []
for item in iter_tree_depth_first(root):
if isinstance(item, OrgHeadline):
headline_item_list.append((
org_headline_get_text(item),
item.level,
org_headline_get_tag_list(item)
))
self.assertEqual(headline_item_list, [
("This is org headline", 1, []),
("headline 2", 2, []),
("headline 3", 3, []),
("headline 4", 4, []),
("UNDONE HEADLINE 5", 2, []),
("UNDONE [#a] HeAdLiNe 6", 2, []),
("more headlines 7", 3, ["tag1", "tag2"]),
])
def test_control_line_parsing(self):
view = mock_sublime.View(
"#+ARCHIVE: foo\n"
"#+BAR: QUX\n"
"#+GG: once upon a time...\n"
"#+BEGIN_SRC\n"
"#+END_SRC\n"
)
root = parse_org_document_new(view, mock_sublime.Region(0, view.size()))
all_control_key_value_list = []
for item in iter_tree_depth_first(root):
if isinstance(item, OrgControlLine):
all_control_key_value_list.append(org_control_line_get_key_value(item))
self.assertEqual(all_control_key_value_list, [
("ARCHIVE", "foo"),
("BAR", "QUX"),
("GG", "once upon a time..."),
])
unittest.main()
|
StarcoderdataPython
|
1799861
|
#!/usr/bin/python3
"""
Application loader.
Handles:
- flaskenv loading
- requirement updates for all plugins/macros and main application (if network is available)
- database upgrade
- loading WSGI server in production mode
"""
if __name__ == "__main__":
from sys import path
path.append('webapi')
# Load .flaskenv
from os.path import isfile
if isfile('.flaskenv'):
from libs.basics.system import load_export_file
load_export_file('.flaskenv')
# Pre-load config
from libs.config import Config
config = Config()
# Install python packages
from libs.basics.network import is_up
if is_up("8.8.8.8") or is_up("1.1.1.1"):
from sys import executable
from subprocess import check_call
from os import listdir
from os.path import isfile, join
check_call([executable, '-m', 'pip', 'install', '--upgrade', 'pip', 'wheel'])
check_call([executable, '-m', 'pip', 'install', '--upgrade', '-r', 'requirements.txt'])
blueprint_path = config.get('webapi', 'plugin_path')
for blueprint in listdir(blueprint_path):
requirements_path = join(blueprint_path, blueprint, 'requirements.txt')
if isfile(requirements_path):
check_call([executable, '-m', 'pip', 'install', '--upgrade', '-r', requirements_path])
macro_path = config.get('webapi', 'macro_path')
for macro in listdir(macro_path):
requirements_path = join(macro_path, macro, 'requirements.txt')
if isfile(requirements_path):
check_call([executable, '-m', 'pip', 'install', '--upgrade', '-r', requirements_path])
else:
print("Internet not reachable")
# Create application
from webapi import create_app
webapi = create_app()
# Start application
from os import getenv
if getenv('FLASK_ENV') == 'development':
webapi.run(
host=getenv('FLASK_RUN_HOST', '0.0.0.0'),
port=int(getenv('FLASK_RUN_PORT', '5000')),
debug=bool(getenv('FLASK_DEBUG', True))
)
else:
from waitress import serve
serve(
webapi,
host=getenv('FLASK_RUN_HOST', '0.0.0.0'),
port=int(getenv('FLASK_RUN_PORT', '5000'))
)
|
StarcoderdataPython
|
1725547
|
import numpy as np
from datashader.composite import add, saturate, over, source
src = np.array([[0x00000000, 0x00ffffff, 0xffffffff],
[0x7dff0000, 0x7d00ff00, 0x7d0000ff],
[0xffff0000, 0xff000000, 0x3a3b3c3d]], dtype='uint32')
clear = np.uint32(0)
clear_white = np.uint32(0x00ffffff)
white = np.uint32(0xffffffff)
blue = np.uint32(0xffff0000)
half_blue = np.uint32(0x7dff0000)
half_purple = np.uint32(0x7d7d007d)
def test_source():
o = src.copy()
o[0, :2] = clear
np.testing.assert_equal(source(src, clear), o)
o[0, :2] = clear_white
np.testing.assert_equal(source(src, clear_white), o)
o[0, :2] = half_blue
np.testing.assert_equal(source(src, half_blue), o)
def test_over():
o = src.copy()
o[0, 1] = 0
np.testing.assert_equal(over(src, clear), o)
np.testing.assert_equal(over(src, clear_white), o)
o = np.array([[0xffffffff, 0xffffffff, 0xffffffff],
[0xffff8282, 0xff82ff82, 0xff8282ff],
[0xffff0000, 0xff000000, 0xffd2d2d2]])
np.testing.assert_equal(over(src, white), o)
o = np.array([[0xffff0000, 0xffff0000, 0xffffffff],
[0xffff0000, 0xff827d00, 0xff82007d],
[0xffff0000, 0xff000000, 0xffd20d0d]])
np.testing.assert_equal(over(src, blue), o)
o = np.array([[0x7dff0000, 0x7dff0000, 0xffffffff],
[0xbcff0000, 0xbc56a800, 0xbc5600a8],
[0xffff0000, 0xff000000, 0x9ab51616]])
np.testing.assert_equal(over(src, half_blue), o)
o = np.array([[0x7d7d007d, 0x7d7d007d, 0xffffffff],
[0xbcd3002a, 0xbc2aa82a, 0xbc2a00d3],
[0xffff0000, 0xff000000, 0x9a641664]])
np.testing.assert_equal(over(src, half_purple), o)
def test_add():
o = src.copy()
o[0, 1] = 0
np.testing.assert_equal(add(src, clear), o)
np.testing.assert_equal(add(src, clear_white), o)
o = np.array([[0xffffffff, 0xffffffff, 0xffffffff],
[0xffffffff, 0xffffffff, 0xffffffff],
[0xffffffff, 0xffffffff, 0xffffffff]])
np.testing.assert_equal(add(src, white), o)
o = np.array([[0xffff0000, 0xffff0000, 0xffffffff],
[0xffff0000, 0xffff7d00, 0xffff007d],
[0xffff0000, 0xffff0000, 0xffff0d0d]])
np.testing.assert_equal(add(src, blue), o)
o = np.array([[0x7dff0000, 0x7dff0000, 0xffffffff],
[0xfaff0000, 0xfa7f7f00, 0xfa7f007f],
[0xffff0000, 0xff7d0000, 0xb7c01313]])
np.testing.assert_equal(add(src, half_blue), o)
o = np.array([[0x7d7d007d, 0x7d7d007d, 0xffffffff],
[0xfabe003e, 0xfa3e7f3e, 0xfa3e00be],
[0xffff003d, 0xff3d003d, 0xb7681368]])
np.testing.assert_equal(add(src, half_purple), o)
def test_saturate():
o = src.copy()
o[0, 1] = 0
np.testing.assert_equal(saturate(src, clear), o)
np.testing.assert_equal(saturate(src, clear_white), o)
o = np.full((3, 3), white, dtype='uint32')
np.testing.assert_equal(saturate(src, white), o)
o = np.full((3, 3), blue, dtype='uint32')
np.testing.assert_equal(saturate(src, blue), o)
o = np.array([[0x7dff0000, 0x7dff0000, 0xffff8282],
[0xfaff0000, 0xfa7f7f00, 0xfa7f007f],
[0xffff0000, 0xff7d0000, 0xb7c01313]])
np.testing.assert_equal(saturate(src, half_blue), o)
o = np.array([[0x7d7d007d, 0x7d7d007d, 0xffbf82bf],
[0xfabe003e, 0xfa3e7f3e, 0xfa3e00be],
[0xffbf003d, 0xff3d003d, 0xb7681368]])
np.testing.assert_equal(saturate(src, half_purple), o)
|
StarcoderdataPython
|
51224
|
import sys
import asyncio
import zmq
import zmq.asyncio
from zmq.auth import Authenticator
from zmq.auth.thread import _inherit_docstrings, ThreadAuthenticator, \
AuthenticationThread
# Copying code from zqm classes since no way to inject these dependencies
class MultiZapAuthenticator(Authenticator):
"""
`Authenticator` supports only one ZAP socket in a single process, this lets
you have multiple ZAP sockets
"""
count = 0
def __init__(self, context=None, encoding='utf-8', log=None):
MultiZapAuthenticator.count += 1
super().__init__(context=context, encoding=encoding, log=log)
def start(self):
"""Create and bind the ZAP socket"""
self.zap_socket = self.context.socket(zmq.REP)
self.zap_socket.linger = 1
zapLoc = 'inproc://zeromq.zap.{}'.format(MultiZapAuthenticator.count)
self.zap_socket.bind(zapLoc)
self.log.debug('Starting ZAP at {}'.format(zapLoc))
def stop(self):
"""Close the ZAP socket"""
if self.zap_socket:
self.log.debug(
'Stopping ZAP at {}'.format(self.zap_socket.LAST_ENDPOINT))
super().stop()
@_inherit_docstrings
class ThreadMultiZapAuthenticator(ThreadAuthenticator):
def start(self):
"""Start the authentication thread"""
# create a socket to communicate with auth thread.
self.pipe = self.context.socket(zmq.PAIR)
self.pipe.linger = 1
self.pipe.bind(self.pipe_endpoint)
authenticator = MultiZapAuthenticator(self.context, encoding=self.encoding,
log=self.log)
self.thread = AuthenticationThread(self.context, self.pipe_endpoint,
encoding=self.encoding, log=self.log,
authenticator=authenticator)
self.thread.start()
# Event.wait:Changed in version 2.7: Previously, the method always returned None.
if sys.version_info < (2, 7):
self.thread.started.wait(timeout=10)
else:
if not self.thread.started.wait(timeout=10):
raise RuntimeError("Authenticator thread failed to start")
class AsyncioAuthenticator(MultiZapAuthenticator):
"""ZAP authentication for use in the asyncio IO loop"""
def __init__(self, context=None, loop=None):
super().__init__(context)
self.loop = loop or asyncio.get_event_loop()
self.__poller = None
self.__task = None
# TODO: Remove this commented method later
# @asyncio.coroutine
# def __handle_zap(self):
# while True:
# events = yield from self.__poller.poll()
# if self.zap_socket in dict(events):
# msg = yield from self.zap_socket.recv_multipart()
# self.handle_zap_message(msg)
async def __handle_zap(self):
while True:
events = await self.__poller.poll()
if self.zap_socket in dict(events):
msg = await self.zap_socket.recv_multipart()
self.handle_zap_message(msg)
def start(self):
"""Start ZAP authentication"""
super().start()
self.__poller = zmq.asyncio.Poller()
self.__poller.register(self.zap_socket, zmq.POLLIN)
self.__task = asyncio.ensure_future(self.__handle_zap())
def stop(self):
"""Stop ZAP authentication"""
if self.__task:
self.__task.cancel()
if self.__poller:
self.__poller.unregister(self.zap_socket)
self.__poller = None
super().stop()
|
StarcoderdataPython
|
3270380
|
<filename>quidel/tests/test_handle_wip_sensor.py
import unittest
from delphi_quidel.handle_wip_sensor import add_prefix
from delphi_quidel.constants import SENSORS
class MyTestCase(unittest.TestCase):
def test_handle_wip_sensor(self):
# Test wip_signal = True, Add prefix to all signals
sensors = list(SENSORS.keys())
signal_names = add_prefix(sensors, True, prefix="wip_")
assert all(s.startswith("wip_") for s in signal_names)
# Test wip_signal = list, Add prefix to signal list
signal_names = add_prefix(sensors, [sensors[0]], prefix="wip_")
assert signal_names[0].startswith("wip_")
assert all(not s.startswith("wip_") for s in signal_names[1:])
# Test wip_signal = False, Add prefix to unpublished signals
signal_names = add_prefix(["xyzzy", sensors[0]], False, prefix="wip_")
assert signal_names[0].startswith("wip_")
assert all(not s.startswith("wip_") for s in signal_names[1:])
if __name__ == '__main__':
unittest.main()
|
StarcoderdataPython
|
27163
|
<reponame>raimota/Gerador-Validador-CPF_CNPJ<filename>app/models/forms.py
from flask_wtf import FlaskForm
from wtforms import StringField
from wtforms.validators import DataRequired
class Campos(FlaskForm):
es = StringField('es')
|
StarcoderdataPython
|
1608751
|
<gh_stars>0
import sys, csv, getopt, codecs, datetime
from collections import OrderedDict
class Day(object):
def __init__(self, date, distance):
self.date = date
self.distance = distance
def csv_day(self):
return [self.date, self.distance]
def get_date(entry):
date_str = entry.split(';')[1]
date = datetime.datetime.strptime(date_str, "%m/%d/%y").date()
return date
def get_distance(entry):
return float(entry.split(';')[2])
def parse_csv(inputfile):
reader = csv.reader(codecs.open(inputfile,'rU','utf-16'))
journal = {}
for line in reader:
distance = 0
if len(line) == 1:
continue
date = get_date(line[0])
try:
distance = get_distance(line[4])
except:
pass
if date in journal:
journal[date].distance += distance
else:
journal[date] = Day(date, distance)
return journal
def main(argv):
inputfile = ''
outputfile = ''
verbose = False
try:
opts, args = getopt.getopt(argv,"hi:o:",["ifile=","ofile="])
except getopt.GetoptError:
print 'tripcsv.py -i <inputfile> -o <outputfile>'
sys.exit(2)
for opt, arg in opts:
if opt == '-h':
print 'tripcsv.py -i <inputfile> -o <outputfile>'
sys.exit()
elif opt in ("-i", "--ifile"):
inputfile = arg
elif opt in ("-o", "--ofile"):
outputfile = arg
output = parse_csv(inputfile)
if verbose:
for date, entry in output.items():
print date, ": ", entry.displayDay()
else:
print "Date,Distance"
for date, entry in OrderedDict(sorted(output.items(), key=lambda t: t[0])).items():
csv_day = entry.csv_day()
if csv_day != None: #idk why
print "{0},{1}".format(csv_day[0], csv_day[1])
if __name__ == "__main__":
main(sys.argv[1:])
|
StarcoderdataPython
|
3352533
|
from unittest import TestCase
from Cuenta import Cuenta
class TestCuenta(TestCase):
def test_depositar(self):
C = Cuenta("Mayorquin", "0", 0.0)
self.assertEqual(C.depositar(5000), 5000)
def test_retirar(self):
C= Cuenta("mayorquin", "0", 500.00)
self.assertEqual(C.retirar(200), 300)
|
StarcoderdataPython
|
114799
|
# This file implements spiking neural networks as described
# in the work:
# <NAME>, Coarse scale representation of spiking neural networks:
# backpropagation through spikes and applications to neuromorphic hardware,
# International Conference on Neuromorphic Systems (ICONS), 2020
import argparse
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from torchvision import datasets, transforms
import numpy as np
import math as m
from spikingnet import SpikingLayer, SpikingVextLayer, poisson_spikes
class SpikingShallowNetwork(nn.Module):
def __init__(self, Nin, Nout, Nsp, t1, beta=5, scale=1):
super(SpikingShallowNetwork, self).__init__()
self.Nsp = Nsp
self.Nout = Nout
self.Nin = Nin
self.l1 = nn.Linear(self.Nin, self.Nout, bias=None)
# torch.nn.init.constant_(self.l1.weight, 0.0005)
self.sl = SpikingLayer(t1, beta=beta)
self.scale = scale
def forward(self, x, device):
x = x.view(-1, 28*28)
s = torch.zeros(x.shape[0], self.Nout).to(device)
v = torch.zeros(x.shape[0], self.Nout).to(device)
nsp = torch.zeros(x.shape[0], self.Nout).to(device)
for i in range(self.Nsp):
xi = poisson_spikes(x, self.scale).to(device)
xi = self.l1(xi)
s, v = self.sl(xi, s, v)
nsp += s
return nsp
class SpikingHiddenNetwork(nn.Module):
def __init__(self, Nin, Nhid, Nout, Nsp, t1, t2, beta=5, scale=1):
super(SpikingHiddenNetwork, self).__init__()
self.Nsp = Nsp
self.Nhid = Nhid
self.Nout = Nout
self.l1 = nn.Linear(Nin, self.Nhid)
self.l2 = nn.Linear(self.Nhid, self.Nout, bias=None)
self.sl1 = SpikingLayer(t1, beta=beta)
self.sl2 = SpikingLayer(t2, beta=beta)
self.scale = scale
def forward(self, x, device):
x = x.view(-1, 28*28)
s1 = torch.zeros(x.shape[0], self.Nhid).to(device)
v1 = torch.zeros(x.shape[0], self.Nhid).to(device)
s2 = torch.zeros(x.shape[0], self.Nout).to(device)
v2 = torch.zeros(x.shape[0], self.Nout).to(device)
nsp = torch.zeros(x.shape[0], self.Nout).to(device)
for i in range(self.Nsp):
xi = poisson_spikes(x, self.scale).to(device)
s1, v1 = self.sl1(self.l1(xi), s1, v1)
xi = self.l2(s1)
s2, v2 = self.sl2(xi, s2, v2)
nsp += s2
return nsp
class SpikingConvNetwork(nn.Module):
def __init__(self, Nin, Nout, Nsp, t1, t2, beta=5, scale=1):
super(SpikingConvNetwork, self).__init__()
self.Nsp = Nsp
self.Nout = Nout
self.Nin = Nin
self.Nhid = 784
self.conv1 = nn.Conv2d(1, 4, (5,5), stride=2, padding=2)
self.l1 = nn.Linear(self.Nhid, self.Nout, bias=None)
self.sl1 = SpikingLayer(t1, beta=beta)
self.sl2 = SpikingLayer(t2, beta=beta)
self.scale = scale
def forward(self, x, device):
s1 = torch.zeros(x.shape[0], self.Nhid).to(device)
v1 = torch.zeros(x.shape[0], self.Nhid).to(device)
s2 = torch.zeros(x.shape[0], self.Nout).to(device)
v2 = torch.zeros(x.shape[0], self.Nout).to(device)
nsp = torch.zeros(x.shape[0], self.Nout).to(device)
for i in range(self.Nsp):
xi = poisson_spikes(x, self.scale).to(device)
xi = self.conv1(xi)
xi = xi.view(xi.shape[0],-1)
s1, v1 = self.sl1(xi, s1, v1)
xi2 = self.l1(s1)
s2, v2 = self.sl2(xi2, s2, v2)
nsp += s2
return nsp
class SpikingConvNetwork2(nn.Module):
def __init__(self, Nin, Nout, Nsp, t1, t2, beta=5, scale=1):
super(SpikingConvNetwork2, self).__init__()
self.Nsp = Nsp
self.Nout = Nout
self.Nin = Nin
self.Nhid1 = Nin
self.Nhid2 = 600
self.scale = scale
self.conv1 = nn.Conv2d(1, 4, (5,5), stride=2, padding=2)
self.l1 = nn.Linear(self.Nhid2, self.Nout, bias=None)
self.conv2 = nn.Conv2d(4, 6, (5,5), stride=1, padding=0)
self.sl1 = SpikingLayer(t1, beta=beta)
self.sl2 = SpikingLayer(t1, beta=beta)
self.sl3 = SpikingLayer(t2, beta=beta)
def forward(self, x, device):
s1 = torch.zeros(x.shape[0], 4, 14, 14).to(device)
v1 = torch.zeros(x.shape[0], 4, 14, 14).to(device)
s2 = torch.zeros(x.shape[0], 6, 10, 10).to(device)
v2 = torch.zeros(x.shape[0], 6, 10, 10).to(device)
s3 = torch.zeros(x.shape[0], self.Nout).to(device)
v3 = torch.zeros(x.shape[0], self.Nout).to(device)
nsp = torch.zeros(x.shape[0], self.Nout).to(device)
for i in range(self.Nsp):
xi = poisson_spikes(x,self.scale).to(device)
xi = self.conv1(xi)
s1, v1 = self.sl1(xi, s1, v1)
xi = self.conv2(s1)
s2, v2 = self.sl2(xi, s2, v2)
xi = s2.view(s2.shape[0],-1)
xi2 = self.l1(xi)
s3, v3 = self.sl3(xi2, s3, v3)
nsp += s3
return nsp
class SpikingLeNet5(nn.Module):
def __init__(self, Nsp, t1, t2, beta=5, scale=1):
self.Nsp = Nsp
super(SpikingLeNet5, self).__init__()
self.conv1 = nn.Conv2d(in_channels=1, out_channels=6,
kernel_size=5, stride=1, padding=2, bias=True)
self.max_pool_1 = nn.MaxPool2d(kernel_size=2)
self.conv2 = nn.Conv2d(in_channels=6, out_channels=16,
kernel_size=5, stride=1, padding=0, bias=True)
self.max_pool_2 = nn.MaxPool2d(kernel_size=2)
self.fc1 = nn.Linear(16*5*5, 120, bias=False)
self.fc2 = nn.Linear(120,84, bias=False)
self.fc3 = nn.Linear(84,10, bias=False)
self.sl1 = SpikingLayer(t1, beta=beta)
self.sl2 = SpikingLayer(t1, beta=beta)
self.sl3 = SpikingLayer(t2, beta=beta)
self.sl4 = SpikingLayer(t2, beta=beta)
self.sl5 = SpikingLayer(t2, beta=beta)
self.scale = scale
def build_x(self, x):
xi = torch.zeros_like(x)
xout = torch.rand_like(x)
xout[xout>self.scale*x] = 0.0
xout[xout>0] = 1.0
return xout
def forward(self, x, device):
s1 = torch.zeros(x.shape[0], 6, 28, 28).to(device)
v1 = torch.zeros(x.shape[0], 6, 28, 28).to(device)
s2 = torch.zeros(x.shape[0], 16, 10, 10).to(device)
v2 = torch.zeros(x.shape[0], 16, 10, 10).to(device)
s3 = torch.zeros(x.shape[0], 120).to(device)
v3 = torch.zeros(x.shape[0], 120).to(device)
s4 = torch.zeros(x.shape[0], 84).to(device)
v4 = torch.zeros(x.shape[0], 84).to(device)
s5 = torch.zeros(x.shape[0], 10).to(device)
v5 = torch.zeros(x.shape[0], 10).to(device)
nsp = torch.zeros(x.shape[0], 10).to(device)
for i in range(self.Nsp):
xi = self.build_x(x).to(device)
xi = self.conv1(xi)
s1, v1 = self.sl1(xi, s1, v1)
xi = self.max_pool_1(s1)
xi = self.conv2(xi)
s2, v2 = self.sl2(xi, s2, v2)
xi = self.max_pool_2(s2)
xi = xi.view(xi.shape[0],-1)
xi = self.fc1(xi)
s3, v3 = self.sl3(xi, s3, v3)
xi = self.fc2(s3)
s4, v4 = self.sl4(xi, s4, v4)
xi = self.fc3(s4)
s5, v5 = self.sl5(xi, s5, v5)
nsp += s5
return nsp
class SpikingLeNet5const(nn.Module):
def __init__(self, Nsp, t0, t1, t2, beta=5, scale=1):
self.Nsp = Nsp
super(SpikingLeNet5const, self).__init__()
self.conv1 = nn.Conv2d(in_channels=1, out_channels=6,
kernel_size=5, stride=1, padding=2, bias=True)
self.max_pool_1 = nn.MaxPool2d(kernel_size=2)
self.conv2 = nn.Conv2d(in_channels=6, out_channels=16,
kernel_size=5, stride=1, padding=0, bias=True)
self.max_pool_2 = nn.MaxPool2d(kernel_size=2)
self.fc1 = nn.Linear(16*5*5, 120, bias=False)
self.fc2 = nn.Linear(120,84, bias=False)
self.fc3 = nn.Linear(84,10, bias=False)
self.sl0 = SpikingVextLayer(t0, beta=beta)
self.sl1 = SpikingLayer(t1, beta=beta)
self.sl2 = SpikingLayer(t1, beta=beta)
self.sl3 = SpikingLayer(t2, beta=beta)
self.sl4 = SpikingLayer(t2, beta=beta)
self.sl5 = SpikingLayer(t2, beta=beta)
self.scale = scale
def build_x(self, x):
xi = torch.zeros_like(x)
xout = torch.rand_like(x)
xout[xout>self.scale*x] = 0.0
xout[xout>0] = 1.0
return xout
def forward(self, x, device):
s0 = torch.zeros(x.shape[0], 1, 28, 28).to(device)
v0 = torch.zeros(x.shape[0], 1, 28, 28).to(device)
s1 = torch.zeros(x.shape[0], 6, 28, 28).to(device)
v1 = torch.zeros(x.shape[0], 6, 28, 28).to(device)
s2 = torch.zeros(x.shape[0], 16, 10, 10).to(device)
v2 = torch.zeros(x.shape[0], 16, 10, 10).to(device)
s3 = torch.zeros(x.shape[0], 120).to(device)
v3 = torch.zeros(x.shape[0], 120).to(device)
s4 = torch.zeros(x.shape[0], 84).to(device)
v4 = torch.zeros(x.shape[0], 84).to(device)
s5 = torch.zeros(x.shape[0], 10).to(device)
v5 = torch.zeros(x.shape[0], 10).to(device)
nsp = torch.zeros(x.shape[0], 10).to(device)
for i in range(self.Nsp):
s0, v0 = self.sl0(x, s0, v0)
xi = self.conv1(s0)
s1, v1 = self.sl1(xi, s1, v1)
xi = self.max_pool_1(s1)
xi = self.conv2(xi)
s2, v2 = self.sl2(xi, s2, v2)
xi = self.max_pool_2(s2)
xi = xi.view(xi.shape[0],-1)
xi = self.fc1(xi)
s3, v3 = self.sl3(xi, s3, v3)
xi = self.fc2(s3)
s4, v4 = self.sl4(xi, s4, v4)
xi = self.fc3(s4)
s5, v5 = self.sl5(xi, s5, v5)
nsp += s5
return nsp
def train(args, model, device, train_loader, optimizer, epoch, scale=4):
model.train()
Nsp = model.Nsp
for batch_idx, (data, target) in enumerate(train_loader):
bsize = target.shape[0]
optimizer.zero_grad()
mtarget = target
mdata = data
data, mtarget = mdata.to(device), mtarget.to(device)
output = scale*(model(data, device)-0.5*model.Nsp)
loss = F.cross_entropy(output, mtarget)
loss.backward()
optimizer.step()
if batch_idx % 5 == 0:
print('Train Epoch: {} [{}/{} ({:.0f}%)]\tLoss: {:.6f}'.format(
epoch, batch_idx * len(data), len(train_loader.dataset),
100. * batch_idx / len(train_loader), loss.item()))
# print(output)
def test(args, model, device, test_loader, scale=4):
model.eval()
test_loss = 0
correct = 0
with torch.no_grad():
for data, target in test_loader:
mtarget = target
mdata = data
data, mtarget = mdata.to(device), mtarget.to(device)
output = scale*(model(data, device)-0.5*model.Nsp)
test_loss += F.cross_entropy(output, mtarget).item()
pred = output.max(1, keepdim=True)[1]
correct += pred.eq(target.view_as(pred).to(device)).sum().item()
test_loss /= len(test_loader.dataset)
print('\nTest set: Average loss: {:.4f}, Accuracy: {}/{} ({:.0f}%)\n'.format(
test_loss, correct, len(test_loader.dataset),
100. * correct / len(test_loader.dataset)))
return 100. * correct / len(test_loader.dataset)
def train_mse(args, model, device, train_loader, optimizer, epoch):
model.train()
for batch_idx, (data, target) in enumerate(train_loader):
bsize = target.shape[0]
optimizer.zero_grad()
mtarget = torch.zeros(target.shape[0],10)
for i in range(target.shape[0]):
mtarget[i,target[i]]= args.spikes
data, target = data.to(device), mtarget.to(device)
output = model(data, device)
loss = F.mse_loss(output, target)
loss.backward()
optimizer.step()
if batch_idx % 5 == 0:
print('Train Epoch: {} [{}/{} ({:.0f}%)]\tLoss: {:.6f}'.format(
epoch, batch_idx * len(data), len(train_loader.dataset),
100. * batch_idx / len(train_loader), loss.item()))
# print(output)
def test_mse(args, model, device, test_loader):
model.eval()
Nst = model.Nsp
test_loss = 0
correct = 0
with torch.no_grad():
for data, target in test_loader:
mtarget = torch.zeros(target.shape[0],10)
for i in range(target.shape[0]):
mtarget[i,target[i]]= args.spikes
data, mtarget = data.to(device), mtarget.to(device)
output = model(data, device)
test_loss += F.mse_loss(output, mtarget, reduction='sum').item() # sum up batch loss
pred = output.max(1, keepdim=True)[1] # get the index of the max log-probability
correct += pred.eq(target.view_as(pred).to(device)).sum().item()
test_loss /= len(test_loader.dataset)
print('\nTest set: Average loss: {:.4f}, Accuracy: {}/{} ({:.0f}%)\n'.format(
test_loss, correct, len(test_loader.dataset),
100. * correct / len(test_loader.dataset)))
return 100. * correct / len(test_loader.dataset)
def main():
# Training settings
parser = argparse.ArgumentParser(description='SpikingNet example')
parser.add_argument('name', metavar='N', type=str, nargs=1,
help='filename')
parser.add_argument('--batch-size', type=int, default=64, metavar='N',
help='input batch size for training (default: 64)')
parser.add_argument('--test-batch-size', type=int, default=1000, metavar='N',
help='input batch size for testing (default: 1000)')
parser.add_argument('--epochs', type=int, default=10, metavar='N',
help='number of epochs to train (default: 10)')
parser.add_argument('--lr', type=float, default=0.001, metavar='LR',
help='learning rate (default: 0.001)')
parser.add_argument('--no-cuda', action='store_true', default=False,
help='disables CUDA training')
parser.add_argument('--dataset', type=int, default=0, metavar='N',
help='dataset: mnist-0 fashionmnist-1')
parser.add_argument('--length', type=int, default=8, metavar='N',
help='length: (default: 8)')
parser.add_argument('--leakage1', type=int, default=4, metavar='N',
help='leakage2: (default: 4)')
parser.add_argument('--leakage2', type=int, default=4, metavar='N',
help='leakage1: (default: 4)')
parser.add_argument('--leakage0', type=int, default=4, metavar='N',
help='leakage0: (default: 4)')
parser.add_argument('--beta', type=float, default=5.0, metavar='N',
help='beta: (default: 5.0)')
parser.add_argument('--scale', type=float, default=1.0, metavar='N',
help='scale: (default: 1.0)')
parser.add_argument('--cost', type=int, default=1, metavar='N',
help='cost function 0 - xent, 1 - mse: (default: 1)')
parser.add_argument('--spikes', type=int, default=4, metavar='N',
help='# output spikes in mse: (default: 4)')
parser.add_argument('--model', type=int, default=0, metavar='N',
help='model: shallow-0 hidden-1 conv1-2 conv2-3 \
Lenet5-4 Lenet5const-5')
args = parser.parse_args()
use_cuda = not args.no_cuda and torch.cuda.is_available()
device = torch.device("cuda" if use_cuda else "cpu")
kwargs = {'num_workers': 1, 'pin_memory': True} if use_cuda else {}
if args.dataset == 1:
train_loader = torch.utils.data.DataLoader(
datasets.FashionMNIST('fashionMNIST', train=True, download=True,
transform=transforms.Compose([
transforms.ToTensor()
])),
batch_size=args.batch_size, shuffle=True, **kwargs)
test_loader = torch.utils.data.DataLoader(
datasets.FashionMNIST('fashionMNIST', train=False, transform=transforms.Compose([
transforms.ToTensor()])),
batch_size=args.test_batch_size, shuffle=True, **kwargs)
else:
train_loader = torch.utils.data.DataLoader(
datasets.MNIST('MNIST', train=True, download=True,
transform=transforms.Compose([
transforms.ToTensor()
])),
batch_size=args.batch_size, shuffle=True, **kwargs)
test_loader = torch.utils.data.DataLoader(
datasets.MNIST('MNIST', train=False, transform=transforms.Compose([
transforms.ToTensor()
])),
batch_size=args.test_batch_size, shuffle=True, **kwargs)
if args.model == 0:
model = SpikingShallowNetwork(784, 10, args.length, args.leakage1,
beta=args.beta, scale=args.scale).to(device)
elif args.model == 1:
model = SpikingHiddenNetwork(784, 10, 30, args.length, args.leakage1,
args.leakage2, beta=args.beta, scale=args.scale).to(device)
elif args.model == 2:
model = SpikingConvNetwork(784, 10, args.length, args.leakage1,
args.leakage2, beta=args.beta, scale=args.scale).to(device)
elif args.model == 3:
model = SpikingConvNetwork2(784, 10, args.length, args.leakage1,
args.leakage2, beta=args.beta, scale=args.scale).to(device)
elif args.model == 4:
model = SpikingLeNet5(args.length, args.leakage1, args.leakage2,
beta=args.beta, scale=args.scale).to(device)
elif args.model == 5:
model = SpikingLeNet5const(args.length, args.leakage0, args.leakage1,
args.leakage2, beta=args.beta, scale=args.scale).to(device)
if args.cost == 0:
trainf = train
testf = test
else:
trainf = train_mse
testf = test_mse
optimizer = optim.Adam(model.parameters(), lr=args.lr*8/args.length)
data = []
for epoch in range(1, args.epochs + 1):
trainf(args, model, device, train_loader, optimizer, epoch)
result = testf(args, model, device, test_loader)
data.append([epoch, result])
data = np.array(data)
# condstring = "%d_%d_%d_%d_%d" % (args.length, args.leakage0, args.leakage1, args.leakage2, args.dataset)
condstring = "%d_%d_%d_%d" % (args.length, args.leakage1, args.beta, args.dataset)
filename = args.name[0] + "_" + condstring + ".npy"
filemode = args.name[0] + "_" + condstring + ".pt"
np.save(filename, data)
torch.save(model.state_dict(), filemode)
if __name__ == '__main__':
main()
|
StarcoderdataPython
|
1781807
|
from abc import ABC, abstractmethod
class Debuggable(ABC):
"""
The Debuggable "mixin" marks a class as having the debug_str method, allowing other functions to print this
information when needed.
"""
@abstractmethod
def debug_str(self):
"""A debug string is used for providing better error messages during both parsing and at runtime."""
|
StarcoderdataPython
|
51479
|
from django.core.exceptions import PermissionDenied
from django.utils import timezone
from .models import Item
from datetime import datetime
def active_auction(function):
def wrap(request, *args, **kwargs):
item = Item.objects.get(slug=kwargs['slug'])
if item.end_of_auction > timezone.now():
return function(request, *args, **kwargs)
else:
raise PermissionDenied
wrap.__doc__ = function.__doc__
wrap.__name__ = function.__name__
return wrap
|
StarcoderdataPython
|
1647960
|
<filename>python/hash_tailored_audience_file.py
#!/usr/bin/env python
"""
Copyright (C) 2014-2016 Twitter Inc and other contributors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import argparse
import csv
import re
import hashlib
import sys
def setup(args, flags):
"""Sets up arguments and flags for processing hashes.
Args:
args: named to setup type, infile and outfile
Returns:
boolean: true or false
"""
if args.type == 'MOBILEDEVICEID':
# mobile device IDs can be a mixture of IDFA, ADID and ANDROID in a single file
flags['regex'] = re.compile('^[a-z0-9][a-z0-9\-]+[a-z0-9]$')
elif args.type == 'IDFA':
# flags['uppercase'] = True
flags['regex'] = re.compile('^[a-z0-9][a-z0-9\-]+[a-z0-9]$')
elif args.type == 'ADID':
flags['regex'] = re.compile('^[a-z0-9][a-z0-9\-]+[a-z0-9]$')
elif args.type == 'ANDROID':
flags['regex'] = re.compile('^[a-z0-9]+$')
elif args.type == 'EMAIL':
flags['regex'] = re.compile('^[a-z0-9][a-z0-9_\-\.\+]+\@[a-z0-9][a-z0-9\.]+[a-z]$')
elif args.type == 'PHONE' or args.type == 'TWITTERID':
flags['dropleadingzeros'] = True
flags['regex'] = re.compile('^\d+$')
elif args.type == 'TWITTERSCREENNAME':
flags['dropleadingat'] = True
flags['regex'] = re.compile('^[a-z0-9_]+$')
else:
# There is an invalid type.
print ("ERROR: invalid type")
return False
# Flags should be correctly set if so return a true value
return True
def hashFile(args, flags):
"""Hashes the file based on the params setup in args.
Args:
args: named to setup type, infile and outfile
Returns:
dict: {"written": N, "skipped": N}
"""
skipped = 0
written = 0
if args.infile.name.endswith(".csv"):
csv_file = True
reader = csv.reader(args.infile, dialect='excel')
else:
csv_file = False
reader = args.infile
for text in reader:
if not csv_file:
text = [text]
for line in text:
if not line:
break
line = line.rstrip()
# Remove whitespace
line = ''.join(line.split())
# Set case
if flags['uppercase']:
line = line.upper()
else:
line = line.lower()
# Drop leading '@'
if flags['dropleadingat']:
line = line.lstrip('@')
# Drop leading zeros
if flags['dropleadingzeros']:
line = line.lstrip('0')
if flags['regex'].match(line) is None:
skipped += 1
continue
if debug:
print ("\t" + line)
hashed = hashlib.sha256(line).hexdigest()
args.outfile.write(hashed + "\n")
written += 1
# Close --infile and --outfile
args.infile.close()
args.outfile.close()
hash_info = {"written": written, "skipped": skipped}
return hash_info
if __name__ == "__main__":
debug = False
parser = argparse.ArgumentParser(description='Hash the contents of a file for TA upload.')
aud_types = ['MOBILEDEVICEID',
'IDFA',
'ADID',
'ANDROID',
'EMAIL',
'PHONE',
'TWITTERID',
'TWITTERSCREENNAME']
# Set the type.
parser.add_argument('--type', required=True, metavar='TWITTERID', help='source data type.',
choices=aud_types)
# parse --infile e.g. the in location of the file.
parser.add_argument('--infile', required=True, type=argparse.FileType('rU'),
metavar='/path/to/source.txt', help='input file to parse.')
# parse --outfile e.g. the location of the file
parser.add_argument('--outfile', required=True, type=argparse.FileType('w'),
metavar='/path/to/output.txt', help='file to write output.')
# Parse the arguments from the command to the variable args.
args = parser.parse_args()
# Setup a dictionary with Flags
flags = {'uppercase': False, 'dropleadingzeros': False, 'dropleadingat': False}
# If setup is correctly configured..
if setup(args, flags) is True:
# Run the hashFile function with the variables
hashed_info = hashFile(args, flags)
print ("Written:\t" + str(hashed_info['written']))
print ("Skipped:\t" + str(hashed_info['skipped']))
# Exit
sys.exit()
|
StarcoderdataPython
|
3342551
|
# desafio 15
c = float(input('Digite a temperatura em ºC: '))
f = (c*9/5)+32
k = c+273.15
print('A temperatura é de {}ºF e {}K' .format(f, k))
# desafio 15
km = float(input('Digite quantos Km o carro percorreu: '))
dias = float(input('Digite por quantos dias alugou o carro: '))
aluguel = (dias*60)+(km*0.15)
print('O valor a pagar é de R${:.2f}' .format(aluguel))
|
StarcoderdataPython
|
3339271
|
from setuptools import setup
setup(
version="0.0.2",
name="dblpbib",
packages=["dblpbib"],
description="Download all bibtex references for provided author",
author="<NAME>",
author_email="<EMAIL>",
entry_points = {
'console_scripts': [
'dblpbib = dblpbib:main',
],
},
install_requires=["requests"],
)
|
StarcoderdataPython
|
3237630
|
<filename>training/views.py
from django.contrib import messages
from django.http import HttpResponseRedirect
from django.shortcuts import render
# Create your views here.
from django.urls import reverse
from ems_admin.decorators import log_activity
from ems_auth.decorators import hr_required
from organisation_details.decorators import organisationdetail_required
from settings.selectors import get_all_currencies, get_currency
from training.models import Training, TrainingSchedule
from training.selectors import get_all_training_schedules, get_applicant_trainings, \
get_training_schedule, get_pending_training_applications, get_training_application
from training.services import approve_training_application_service, reject_training_application_service
@organisationdetail_required
@log_activity
def user_training_page(request):
applicant = request.user.solitonuser.employee
if request.POST:
programme = request.POST.get('programme')
institution = request.POST.get('institution')
duration = request.POST.get('duration')
cost = request.POST.get('cost')
start_date = request.POST.get('start_date')
end_date = request.POST.get('end_date')
business_case = request.POST.get('business_case')
objectives = request.POST.get('objectives')
preparations = request.POST.get('preparations')
skills = request.POST.get('skills')
knowledge = request.POST.get('knowledge')
currency_id = request.POST.get('currency_id')
currency = get_currency(currency_id)
Training.objects.create(
applicant=applicant,
programme=programme,
institution=institution,
duration=duration,
cost=cost,
start_date=start_date,
end_date=end_date,
business_case=business_case,
objectives=objectives,
preparations=preparations,
skills=skills,
knowledge=knowledge,
currency=currency
)
return HttpResponseRedirect(reverse(user_training_page))
currencies = get_all_currencies()
trainings = get_applicant_trainings(applicant)
context = {
"training_page": "active",
"currencies": currencies,
"trainings": trainings
}
return render(request, 'training/user_training.html', context)
@hr_required
@log_activity
def schedule_training_page(request):
if request.POST:
programme = request.POST.get('programme')
duration = request.POST.get('duration')
venue = request.POST.get('venue')
purpose = request.POST.get('purpose')
date = request.POST.get('date')
TrainingSchedule.objects.create(
programme=programme,
duration=duration,
venue=venue,
purpose=purpose,
date=date
)
return HttpResponseRedirect(reverse(schedule_training_page))
training_schedules = get_all_training_schedules()
context = {
"training_page": "active",
"training_schedules": training_schedules
}
return render(request, 'training/schedule_training.html', context)
@hr_required
@log_activity
def edit_training_schedule(request, training_schedule_id):
if request.POST:
programme = request.POST.get('programme')
duration = request.POST.get('duration')
venue = request.POST.get('venue')
purpose = request.POST.get('purpose')
date = request.POST.get('date')
transaction_schedules = TrainingSchedule.objects.filter(id=training_schedule_id)
transaction_schedules.update(
programme=programme,
duration=duration,
venue=venue,
purpose=purpose,
date=date
)
return HttpResponseRedirect(reverse(schedule_training_page))
training_schedule = get_training_schedule(training_schedule_id)
context = {
"training_page": "active",
"training_schedule": training_schedule
}
return render(request, "training/edit_training_schedule.html", context)
@hr_required
@log_activity
def delete_training_schedule(request, training_schedule_id):
training_schedule = get_training_schedule(training_schedule_id)
training_schedule.delete()
return HttpResponseRedirect(reverse(schedule_training_page))
@hr_required
@log_activity
def training_schedules_page(request):
training_schedules = get_all_training_schedules()
context = {
"training_page": "active",
"training_schedules": training_schedules
}
return render(request, 'training/training_schedules.html', context)
@hr_required
@organisationdetail_required
@log_activity
def approve_training_page(request):
approver = request.user
pending_applications = get_pending_training_applications(approver)
context = {
"training_page": "active",
"pending_applications": pending_applications
}
return render(request, 'training/approve_training_applications.html', context)
@log_activity
def approve_training_application(request, training_application_id):
approver = request.user
training_application = get_training_application(training_application_id)
approved_training_application = approve_training_application_service(approver, training_application)
if approved_training_application:
messages.success(request, "You approved %s's training application" % approved_training_application.applicant)
else:
messages.error(request, "You are not associated to any role on the system")
return HttpResponseRedirect(reverse('approve_training_page'))
@log_activity
def reject_training_application(request, training_application_id):
rejecter = request.user
training_application = get_training_application(training_application_id)
rejected_training_application = reject_training_application_service(rejecter, training_application)
if rejected_training_application:
messages.success(request, "You rejected %s's training application" % rejected_training_application.applicant)
else:
messages.error(request, "You are not associated to any role on the system")
return HttpResponseRedirect(reverse('approve_overtime_page'))
|
StarcoderdataPython
|
3206085
|
import json
import logging
from io import StringIO
from urllib import parse
from bottle import abort
from devmine.lib.composition import rank
from devmine.app.controllers.application_controller import (
ApplicationController,
enable_cors
)
class SearchController(ApplicationController):
"""Class for handling search query."""
def query(self, db, q):
"""Return search result as a JSON string"""
enable_cors()
try:
io = StringIO(parse.unquote(q))
feature_weights = json.load(io)
ranking, elsapsed_time = rank(db, feature_weights)
except:
logging.exception('SearchController:query')
abort(400, 'Malformed JSON query')
sorted_ranking = sorted(
ranking, key=lambda user: user['rank'], reverse=True)
results = {'results': sorted_ranking[:100],
'elapsed_time': "%0.9f" % (elsapsed_time)}
return json.dumps(results)
|
StarcoderdataPython
|
181770
|
from edge.command.common.precommand_check import check_gcloud_authenticated, check_project_exists, check_billing_enabled
from edge.config import GCProjectConfig, StorageBucketConfig, EdgeConfig
from edge.enable_api import enable_service_api
from edge.exception import EdgeException
from edge.gcloud import is_authenticated, get_gcloud_account, get_gcloud_project, get_gcloud_region, get_gcp_regions, \
project_exists, is_billing_enabled
from edge.state import EdgeState
from edge.storage import setup_storage
from edge.tui import TUI, StepTUI, SubStepTUI, TUIStatus, qmark
from edge.versions import get_gcloud_version, Version, get_kubectl_version, get_helm_version
from edge.path import get_model_dvc_pipeline
import questionary
def edge_init():
success_title = "Initialised successfully"
success_message = f"""
What's next? We suggest you proceed with:
Commit the new vertex:edge configuration to git:
git add edge.yaml && git commit -m "Initialise vertex:edge"
Configure an experiment tracker (optional):
./edge.sh experiments init
Configure data version control:
./edge.sh dvc init
Train and deploy a model (see 'Training a model' section of the README for more details):
./edge.sh model init
dvc repro {get_model_dvc_pipeline()}
./edge.sh model deploy
Happy herding! 🐏
""".strip()
failure_title = "Initialisation failed"
failure_message = "See the errors above. See README for more details."
with TUI(
"Initialising vertex:edge",
success_title,
success_message,
failure_title,
failure_message
) as tui:
with StepTUI(message="Checking your local environment", emoji="🖥️") as step:
with SubStepTUI("Checking gcloud version") as sub_step:
gcloud_version = get_gcloud_version()
expected_gcloud_version_string = "2021.05.21"
expected_gcloud_version = Version.from_string(expected_gcloud_version_string)
if not gcloud_version.is_at_least(expected_gcloud_version):
raise EdgeException(
f"We found gcloud version {str(gcloud_version)}, "
f"but we require at least {str(expected_gcloud_version)}. "
"Update gcloud by running `gcloud components update`."
)
try:
gcloud_alpha_version = get_gcloud_version("alpha")
expected_gcloud_alpha_version_string = "2021.06.00"
expected_gcloud_alpha_version = Version.from_string(expected_gcloud_alpha_version_string)
if not gcloud_alpha_version.is_at_least(expected_gcloud_alpha_version):
raise EdgeException(
f"We found gcloud alpha component version {str(gcloud_alpha_version)}, "
f"but we require at least {str(expected_gcloud_alpha_version)}. "
"Update gcloud by running `gcloud components update`."
)
except KeyError:
raise EdgeException(
f"We couldn't find the gcloud alpha components, "
f"please install these by running `gcloud components install alpha`"
)
with SubStepTUI("Checking kubectl version") as sub_step:
kubectl_version = get_kubectl_version()
expected_kubectl_version_string = "v1.19.0"
expected_kubectl_version = Version.from_string(expected_kubectl_version_string)
if not kubectl_version.is_at_least(expected_kubectl_version):
raise EdgeException(
f"We found gcloud version {str(kubectl_version)}, "
f"but we require at least {str(expected_kubectl_version)}. "
"Please visit https://kubernetes.io/docs/tasks/tools/ for installation instructions."
)
with SubStepTUI("Checking helm version") as sub_step:
helm_version = get_helm_version()
expected_helm_version_string = "v3.5.2"
expected_helm_version = Version.from_string(expected_helm_version_string)
if not helm_version.is_at_least(expected_helm_version):
raise EdgeException(
f"We found gcloud version {str(helm_version)}, "
f"but we require at least {str(expected_helm_version)}. "
"Please visit https://helm.sh/docs/intro/install/ for installation instructions."
)
with StepTUI(message="Checking your GCP environment", emoji="☁️") as step:
check_gcloud_authenticated()
with SubStepTUI(message="Verifying GCloud configuration") as sub_step:
gcloud_account = get_gcloud_account()
if gcloud_account is None or gcloud_account == "":
raise EdgeException(
"gcloud account is unset. "
"Run `gcloud auth login && gcloud auth application-default login` to authenticate "
"with the correct account"
)
gcloud_project = get_gcloud_project()
if gcloud_project is None or gcloud_project == "":
raise EdgeException(
"gcloud project id is unset. "
"Run `gcloud config set project $PROJECT_ID` to set the correct project id"
)
gcloud_region = get_gcloud_region()
if gcloud_region is None or gcloud_region == "":
raise EdgeException(
"gcloud region is unset. "
"Run `gcloud config set compute/region $REGION` to set the correct region"
)
sub_step.update(status=TUIStatus.NEUTRAL)
sub_step.set_dirty()
if not questionary.confirm(f"Is this the correct GCloud account: {gcloud_account}", qmark=qmark).ask():
raise EdgeException(
"Run `gcloud auth login && gcloud auth application-default login` to authenticate "
"with the correct account"
)
if not questionary.confirm(f"Is this the correct project id: {gcloud_project}", qmark=qmark).ask():
raise EdgeException("Run `gcloud config set project <project_id>` to set the correct project id")
if not questionary.confirm(f"Is this the correct region: {gcloud_region}", qmark=qmark).ask():
raise EdgeException("Run `gcloud config set compute/region <region>` to set the correct region")
with SubStepTUI(f"{gcloud_region} is available on Vertex AI") as sub_step:
if gcloud_region not in get_gcp_regions(gcloud_project):
formatted_regions = "\n ".join(get_gcp_regions(gcloud_project))
raise EdgeException(
"Vertex AI only works in certain regions. "
"Please choose one of the following by running `gcloud config set compute/region <region>`:\n"
f" {formatted_regions}"
)
gcloud_config = GCProjectConfig(
project_id=gcloud_project,
region=gcloud_region,
)
check_project_exists(gcloud_project)
check_billing_enabled(gcloud_project)
with StepTUI(message="Initialising Google Storage and vertex:edge state file", emoji="💾") as step:
with SubStepTUI("Enabling Storage API") as sub_step:
enable_service_api("storage-component.googleapis.com", gcloud_project)
with SubStepTUI("Configuring Google Storage bucket", status=TUIStatus.NEUTRAL) as sub_step:
sub_step.set_dirty()
storage_bucket_name = questionary.text(
"Now you need to choose a name for a storage bucket that will be used for data version control, "
"model assets and keeping track of the vertex:edge state\n "
"NOTE: Storage bucket names must be unique and follow certain conventions. "
"Please see the following guidelines for more information "
"https://cloud.google.com/storage/docs/naming-buckets."
"\n Enter Storage bucket name to use: ",
qmark=qmark
).ask().strip()
if storage_bucket_name is None or storage_bucket_name == "":
raise EdgeException("Storage bucket name is required")
storage_config = StorageBucketConfig(
bucket_name=storage_bucket_name,
dvc_store_directory="dvcstore",
vertex_jobs_directory="vertex",
)
storage_state = setup_storage(gcloud_project, gcloud_region, storage_bucket_name)
_state = EdgeState(
storage=storage_state
)
_config = EdgeConfig(
google_cloud_project=gcloud_config,
storage_bucket=storage_config,
)
skip_saving_state = False
with SubStepTUI("Checking if vertex:edge state file exists") as sub_step:
if EdgeState.exists(_config):
sub_step.update(
"The state file already exists. "
"This means that vertex:edge has already been initialised using this storage bucket.",
status=TUIStatus.WARNING
)
sub_step.set_dirty()
if not questionary.confirm(
f"Do you want to delete the state and start over (this action is destructive!)",
qmark=qmark,
default=False,
).ask():
skip_saving_state = True
if skip_saving_state:
with SubStepTUI("Saving state file skipped", status=TUIStatus.WARNING) as sub_step:
pass
else:
with SubStepTUI("Saving state file") as sub_step:
_state.save(_config)
with StepTUI(message="Saving configuration", emoji="⚙️") as step:
with SubStepTUI("Saving configuration to edge.yaml") as sub_step:
_config.save("./edge.yaml")
|
StarcoderdataPython
|
1635170
|
'''
Написать функцию maxfun(), которая принимает переменное число параметров —
числовую последовательность S, функцию F1 и, возможно, ещё несколько
функций F2 … Fn. Возвращает она ту из функций Fi, сумма значений которой
на всех элементах S наибольшая. Если таких функций больше одной,
возвращается Fi с наибольшим i.
Input:
from math import *
print(maxfun(range(-2,10), sin, cos, exp)(1))
Output: 2.718281828459045
'''
from math import *
def maxfun(S, *F):
return F[max(enumerate(sum(f(x) for x in S) for f in F), key=lambda x: x[1])[0]]
|
StarcoderdataPython
|
3359149
|
# Generated by Django 3.0.3 on 2020-09-05 08:06
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('wallet', '0019_position_current_value'),
]
operations = [
migrations.RenameField(
model_name='position',
old_name='current_value',
new_name='total_networth',
),
]
|
StarcoderdataPython
|
3257501
|
import asyncio
from enum import Enum
from sspq import *
from argparse import ArgumentParser, ArgumentTypeError
class OrderedEnum(Enum):
def __ge__(self, other):
if self.__class__ is other.__class__:
return self.value >= other.value
return NotImplemented
def __gt__(self, other):
if self.__class__ is other.__class__:
return self.value > other.value
return NotImplemented
def __le__(self, other):
if self.__class__ is other.__class__:
return self.value <= other.value
return NotImplemented
def __lt__(self, other):
if self.__class__ is other.__class__:
return self.value < other.value
return NotImplemented
class LogLevel(OrderedEnum):
FAIL = '1'
WARN = '2'
INFO = '3'
DBUG = '4'
@classmethod
def parse(cls, string: str) -> super:
_string = string.lower()
if _string == 'fail':
return cls.FAIL
if _string == 'warn':
return cls.WARN
if _string == 'info':
return cls.INFO
if _string == 'dbug':
return cls.DBUG
raise ArgumentTypeError(string + ' is NOT a valid loglevel')
class Server_Client():
"""
This represents a client connected to a server.
"""
def __init__(self, reader: asyncio.StreamReader, writer: asyncio.StreamWriter):
self.reader = reader
self.writer = writer
self.address = writer.get_extra_info('peername')
self.message = None
self.disconnected = False
self.message_event = asyncio.Event()
def get_user_handler(log_level: LogLevel=LogLevel.INFO, retry_override: int=None):
async def user_handler(reader, writer):
client = Server_Client(reader=reader, writer=writer)
if log_level >= LogLevel.INFO:
print(f'User {client.address} connected')
while True:
try:
msg = await read_message(client.reader)
except MessageException as e:
if log_level >= LogLevel.WARN:
print(f'User {client.address} disconnected because: {e}')
client.disconnected = True
client.message_event.set()
client.writer.close()
return
except EOFError:
if log_level >= LogLevel.INFO:
print(f'User {client.address} disconnected')
client.disconnected = True
client.message_event.set()
client.writer.close()
return
if msg.type == MessageType.SEND:
if log_level >= LogLevel.DBUG:
print('Recieved: ' + msg.payload.decode())
if retry_override is not None:
msg.retries = retry_override
await message_queue.put(msg)
elif msg.type == MessageType.RECEIVE:
if client.message is not None:
if log_level >= LogLevel.WARN:
print('Receive Message is going to be dropped because client need to confirm his message.')
continue
if log_level >= LogLevel.DBUG:
print('User{} wants to receive'.format(str(client.address)))
client.message_event.clear()
await client_queue.put(client)
elif msg.type == MessageType.CONFIRM:
if client.message is None:
if log_level >= LogLevel.WARN:
print('Confirm Message is going to be dropped because client has no message to confirm.')
continue
if log_level >= LogLevel.DBUG:
print('User{} confirms message'.format(str(client.address)))
client.message = None
client.message_event.set()
await asyncio.sleep(0)
elif msg.type == MessageType.DEAD_RECEIVE:
if client.message is not None:
if log_level >= LogLevel.WARN:
print('Dead-Receive Message is going to be dropped because client need to confirm his message.')
continue
if log_level >= LogLevel.DBUG:
print('User{} wants to dead receive'.format(str(client.address)))
client.message_event.clear()
await dead_letter_client_queue.put(client)
else:
if log_level >= LogLevel.WARN:
print('Received unknown packet:\n' + msg.encode().decode())
await asyncio.sleep(0)
return user_handler
async def message_handler(message: Message, client: Server_Client):
client.message = message
await message.send(client.writer)
await client.message_event.wait()
if client.message is not None:
if client.message.retries == 0:
if not NDLQ:
await dead_letter_queue.put(client.message)
else:
if client.message.retries != 255:
client.message.retries -= 1
await message_queue.put(client.message)
async def queue_handler(loop):
while True:
msg = await message_queue.get()
client = await client_queue.get()
while client.disconnected:
client = await client_queue.get()
asyncio.ensure_future(message_handler(msg, client), loop=loop)
async def dead_letter_handler(message: Message, client: Server_Client):
client.message = message
await message.send(client.writer)
await client.message_event.wait()
if client.message is not None:
await dead_letter_queue.put(message)
async def dead_letter_queue_handler(loop, active: bool=True):
while True:
if active:
msg = await dead_letter_queue.get()
client = await dead_letter_client_queue.get()
while client.disconnected:
client = await dead_letter_client_queue.get()
asyncio.ensure_future(dead_letter_handler(msg, client), loop=loop)
else:
client = await dead_letter_client_queue.get()
await Message(type=MessageType.NO_RECEIVE).send(client.writer)
# Entry Point
if __name__ == "__main__":
# Setup argparse
parser = ArgumentParser(description='SSPQ Server - Super Simple Python Queue Server', add_help=True)
parser.add_argument('--host', action='store', default='127.0.0.1', required=False, help='Set the host address. Use 0.0.0.0 to make the server public', dest='host', metavar='<address>')
parser.add_argument('-p', '--port', action='store', default=SSPQ_PORT, type=int, required=False, help='Set the port the server listens to', dest='port', metavar='<port>')
parser.add_argument('-ll', '--loglevel', action='store', default='info', type=LogLevel.parse, choices=[
LogLevel.FAIL, LogLevel.WARN, LogLevel.INFO, LogLevel.DBUG
], required=False, help='Set the appropriate log level for the output on stdout. Possible values are: [ fail | warn | info | dbug ]', dest='log_level', metavar='<level>')
parser.add_argument('-ndlq', '--no-dead-letter-queue', action='store_true', required=False, help='Flag to dissable the dead letter queueing, failed packages are then simply dropped after the retries run out.', dest='ndlq')
parser.add_argument('-r', '--force-retries', action='store', type=int, choices=range(0, 256), required=False, help='This overrides the retry values of all incoming packets to the given value. Values between 0 and 254 are possible retry values if 255 is used all packages are infinitely retried.', dest='retry', metavar='[0-255]')
parser.add_argument('-v', '--version', action='version', version='%(prog)s v1.0.0')
args = parser.parse_args()
NDLQ = args.ndlq
# Setup asyncio & queues
loop = asyncio.get_event_loop()
message_queue = asyncio.Queue()
client_queue = asyncio.Queue()
dead_letter_queue = asyncio.Queue()
dead_letter_client_queue = asyncio.Queue()
coro = asyncio.start_server(get_user_handler(log_level=args.log_level, retry_override=args.retry), args.host, args.port, loop=loop)
server = loop.run_until_complete(coro)
queue_worker = asyncio.ensure_future(queue_handler(loop=loop), loop=loop)
dead_letter_queue_worker = asyncio.ensure_future(dead_letter_queue_handler(loop=loop, active=(not args.ndlq)), loop=loop)
# Serve requests until Ctrl+C is pressed
print(f'Serving on {server.sockets[0].getsockname()}')
try:
loop.run_forever()
except KeyboardInterrupt:
pass
# Close the server
server.close()
queue_worker.cancel()
dead_letter_queue_worker.cancel()
loop.run_until_complete(server.wait_closed())
loop.close()
|
StarcoderdataPython
|
1622722
|
<filename>test/orm/test_lazytest1.py
import sqlalchemy as sa
from sqlalchemy.test import testing
from sqlalchemy import Integer, String, ForeignKey
from sqlalchemy.test.schema import Table
from sqlalchemy.test.schema import Column
from sqlalchemy.orm import mapper, relation, create_session
from test.orm import _base
class LazyTest(_base.MappedTest):
@classmethod
def define_tables(cls, metadata):
Table('infos', metadata,
Column('pk', Integer, primary_key=True),
Column('info', String(128)))
Table('data', metadata,
Column('data_pk', Integer, primary_key=True),
Column('info_pk', Integer,
ForeignKey('infos.pk')),
Column('timeval', Integer),
Column('data_val', String(128)))
Table('rels', metadata,
Column('rel_pk', Integer, primary_key=True),
Column('info_pk', Integer,
ForeignKey('infos.pk')),
Column('start', Integer),
Column('finish', Integer))
@classmethod
@testing.resolve_artifact_names
def insert_data(cls):
infos.insert().execute(
{'pk':1, 'info':'pk_1_info'},
{'pk':2, 'info':'pk_2_info'},
{'pk':3, 'info':'pk_3_info'},
{'pk':4, 'info':'pk_4_info'},
{'pk':5, 'info':'pk_5_info'})
rels.insert().execute(
{'rel_pk':1, 'info_pk':1, 'start':10, 'finish':19},
{'rel_pk':2, 'info_pk':1, 'start':100, 'finish':199},
{'rel_pk':3, 'info_pk':2, 'start':20, 'finish':29},
{'rel_pk':4, 'info_pk':3, 'start':13, 'finish':23},
{'rel_pk':5, 'info_pk':5, 'start':15, 'finish':25})
data.insert().execute(
{'data_pk':1, 'info_pk':1, 'timeval':11, 'data_val':'11_data'},
{'data_pk':2, 'info_pk':1, 'timeval':9, 'data_val':'9_data'},
{'data_pk':3, 'info_pk':1, 'timeval':13, 'data_val':'13_data'},
{'data_pk':4, 'info_pk':2, 'timeval':23, 'data_val':'23_data'},
{'data_pk':5, 'info_pk':2, 'timeval':13, 'data_val':'13_data'},
{'data_pk':6, 'info_pk':1, 'timeval':15, 'data_val':'15_data'})
@testing.resolve_artifact_names
def testone(self):
"""A lazy load which has multiple join conditions.
Including two that are against the same column in the child table.
"""
class Information(object):
pass
class Relation(object):
pass
class Data(object):
pass
session = create_session()
mapper(Data, data)
mapper(Relation, rels, properties={
'datas': relation(Data,
primaryjoin=sa.and_(
rels.c.info_pk ==
data.c.info_pk,
data.c.timeval >= rels.c.start,
data.c.timeval <= rels.c.finish),
foreign_keys=[data.c.info_pk])})
mapper(Information, infos, properties={
'rels': relation(Relation)
})
info = session.query(Information).get(1)
assert info
assert len(info.rels) == 2
assert len(info.rels[0].datas) == 3
|
StarcoderdataPython
|
135611
|
from test_support import verbose, verify
import sys
import new
class Eggs:
def get_yolks(self):
return self.yolks
print 'new.module()'
m = new.module('Spam')
if verbose:
print m
m.Eggs = Eggs
sys.modules['Spam'] = m
import Spam
def get_more_yolks(self):
return self.yolks + 3
print 'new.classobj()'
C = new.classobj('Spam', (Spam.Eggs,), {'get_more_yolks': get_more_yolks})
if verbose:
print C
print 'new.instance()'
c = new.instance(C, {'yolks': 3})
if verbose:
print c
o = new.instance(C)
verify(o.__dict__ == {},
"new __dict__ should be empty")
del o
o = new.instance(C, None)
verify(o.__dict__ == {},
"new __dict__ should be empty")
del o
def break_yolks(self):
self.yolks = self.yolks - 2
print 'new.instancemethod()'
im = new.instancemethod(break_yolks, c, C)
if verbose:
print im
verify(c.get_yolks() == 3 and c.get_more_yolks() == 6,
'Broken call of hand-crafted class instance')
im()
verify(c.get_yolks() == 1 and c.get_more_yolks() == 4,
'Broken call of hand-crafted instance method')
# It's unclear what the semantics should be for a code object compiled at
# module scope, but bound and run in a function. In CPython, `c' is global
# (by accident?) while in Jython, `c' is local. The intent of the test
# clearly is to make `c' global, so let's be explicit about it.
codestr = '''
global c
a = 1
b = 2
c = a + b
'''
ccode = compile(codestr, '<string>', 'exec')
# Jython doesn't have a __builtins__, so use a portable alternative
import __builtin__
g = {'c': 0, '__builtins__': __builtin__}
# this test could be more robust
print 'new.function()'
func = new.function(ccode, g)
if verbose:
print func
func()
verify(g['c'] == 3,
'Could not create a proper function object')
# bogus test of new.code()
# Note: Jython will never have new.code()
if hasattr(new, 'code'):
print 'new.code()'
d = new.code(3, 3, 3, 3, codestr, (), (), (),
"<string>", "<name>", 1, "", (), ())
# test backwards-compatibility version with no freevars or cellvars
d = new.code(3, 3, 3, 3, codestr, (), (), (),
"<string>", "<name>", 1, "")
if verbose:
print d
|
StarcoderdataPython
|
141563
|
<reponame>84KaliPleXon3/micropython-esp32
# Calling an inherited classmethod
class Base:
@classmethod
def foo(cls):
print(cls.__name__)
try:
Base.__name__
except AttributeError:
import sys
print("SKIP")
sys.exit()
class Sub(Base):
pass
Sub.foo()
# overriding a member and accessing it via a classmethod
class A(object):
foo = 0
@classmethod
def bar(cls):
print(cls.foo)
def baz(self):
print(self.foo)
class B(A):
foo = 1
B.bar() # class calling classmethod
B().bar() # instance calling classmethod
B().baz() # instance calling normal method
|
StarcoderdataPython
|
54633
|
<gh_stars>1-10
from __future__ import annotations
import typing
from dsalgo.algebra.abstract.abstract_structure import Monoid
from dsalgo.number_theory.floor_sqrt import floor_sqrt
S = typing.TypeVar("S")
class SqrtDecomposition(typing.Generic[S]):
def __init__(self, monoid: Monoid[S], arr: list[S]) -> None:
n = len(arr)
sqrt = floor_sqrt(n)
num_buckets = (n + sqrt - 1) // sqrt
buckets = [monoid.e() for _ in range(num_buckets)]
data_size = sqrt * num_buckets
data = [monoid.e() for _ in range(data_size)]
data[:n] = arr.copy()
for i in range(num_buckets):
for j in range(sqrt * i, sqrt * (i + 1)):
buckets[i] = monoid.op(buckets[i], data[j])
self.__data = data
self.__buckets = buckets
self.__sqrt = sqrt
self.__original_size = n
self.__monoid = monoid
def __len__(self) -> int:
return self.__original_size
def __setitem__(self, i: int, x: S) -> None:
assert 0 <= i < len(self)
self.__data[i] = x
idx = i // self.__sqrt
self.__buckets[idx] = self.__monoid.e()
for j in range(self.__sqrt * idx, self.__sqrt * (idx + 1)):
self.__buckets[idx] = self.__monoid.op(
self.__buckets[idx],
self.__data[j],
)
def __getitem__(self, i: int) -> S:
assert 0 <= i < len(self)
return self.__data[i]
def get(self, left: int, right: int) -> S:
assert 0 <= left <= right <= len(self)
v = self.__monoid.e()
for i in range(len(self.__buckets)):
if left >= self.__sqrt * (i + 1):
continue
if right <= self.__sqrt * i:
break
if left <= self.__sqrt * i and self.__sqrt * (i + 1) <= right:
v = self.__monoid.op(v, self.__buckets[i])
continue
for j in range(self.__sqrt * i, self.__sqrt * (i + 1)):
if j < left:
continue
if j >= right:
break
v = self.__monoid.op(v, self.__data[j])
return v
# a = list(range(10))
# m = Monoid[int](op=lambda x, y: x + y, e=lambda: 0)
# sd = SqrtDecomposition[int](m, a)
# print(sd.get(1, 10))
|
StarcoderdataPython
|
136455
|
import boto3
import sure # noqa # pylint: disable=unused-import
from moto import mock_guardduty
@mock_guardduty
def test_create_detector():
client = boto3.client("guardduty", region_name="us-east-1")
response = client.create_detector(
Enable=True,
ClientToken="745645734574758463758",
FindingPublishingFrequency="ONE_HOUR",
DataSources={"S3Logs": {"Enable": True}},
Tags={},
)
response.should.have.key("DetectorId")
response["DetectorId"].shouldnt.equal(None)
@mock_guardduty
def test_create_detector_with_minimal_params():
client = boto3.client("guardduty", region_name="us-east-1")
response = client.create_detector(Enable=True)
response.should.have.key("DetectorId")
response["DetectorId"].shouldnt.equal(None)
@mock_guardduty
def test_list_detectors_initial():
client = boto3.client("guardduty", region_name="us-east-1")
response = client.list_detectors()
response.should.have.key("DetectorIds").equals([])
@mock_guardduty
def test_list_detectors():
client = boto3.client("guardduty", region_name="us-east-1")
d1 = client.create_detector(
Enable=True,
ClientToken="745645734574758463758",
FindingPublishingFrequency="ONE_HOUR",
DataSources={"S3Logs": {"Enable": True}},
Tags={},
)["DetectorId"]
d2 = client.create_detector(Enable=False,)["DetectorId"]
response = client.list_detectors()
response.should.have.key("DetectorIds")
set(response["DetectorIds"]).should.equal({d1, d2})
|
StarcoderdataPython
|
3210914
|
from setuptools import setup
setup(name='kfn',
version='0.1',
description='Kubeflow notebook component builder',
url='https://github.com/bartgras/kf-notebook-component',
author='<NAME>',
author_email='<EMAIL>',
license='MIT',
packages=['kfn', 'kfn.test'],
install_requires=[
'kfp',
'papermill',
'jupytext',
'nbconvert'
],
zip_safe=False)
|
StarcoderdataPython
|
33106
|
<filename>dynamicserialize/dstypes/gov/noaa/nws/ncep/common/dataplugin/gempak/request/Station.py
# File auto-generated against equivalent DynamicSerialize Java class
#
# SOFTWARE HISTORY
#
# Date Ticket# Engineer Description
# ------------ ---------- ----------- --------------------------
# Sep 16, 2016 pmoyer Generated
import numpy
class Station(object):
def __init__(self):
self.elevation = None
self.state = None
self.stationId = None
self.longitude = None
self.latitude = None
self.wmoIndex = None
self.country = None
def getElevation(self):
return self.elevation
def setElevation(self, elevation):
self.elevation = elevation
def getState(self):
return self.state
def setState(self, state):
self.state = state
def getStationId(self):
return self.stationId
def setStationId(self, stationId):
self.stationId = stationId
def getLongitude(self):
return self.longitude
def setLongitude(self, longitude):
self.longitude = numpy.float64(longitude)
def getLatitude(self):
return self.latitude
def setLatitude(self, latitude):
self.latitude = numpy.float64(latitude)
def getWmoIndex(self):
return self.wmoIndex
def setWmoIndex(self, wmoIndex):
self.wmoIndex = wmoIndex
def getCountry(self):
return self.country
def setCountry(self, country):
self.country = country
|
StarcoderdataPython
|
1653817
|
# coding=utf-8
import re
import logging
import base64
import os
import datetime
from openerp.http import request
import openerp
from .. import client
_logger = logging.getLogger(__name__)
def get_img_data(pic_url):
import requests
headers = {
'Accept': 'textml,application/xhtml+xml,application/xml;q=0.9,image/webp,/;q=0.8',
'Accept-Encoding': 'gzip, deflate',
'Accept-Language': 'zh-CN,zh;q=0.8,en;q=0.6,zh-TW;q=0.4',
'Cache-Control': 'no-cache',
'Host': 'mmbiz.qpic.cn',
'Pragma': 'no-cache',
'Connection': 'keep-alive',
'User-Agent': 'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/56.0.2924.87 Safari/537.36',
}
r = requests.get(pic_url,headers=headers,timeout=50)
return r.content
def main(robot):
def input_handle(message, session):
from .. import client
entry = client.wxenv(request.env)
client = entry
serviceid = message.target
openid = message.source
mtype = message.type
_logger.info('>>> wx msg: %s'%message.__dict__)
if message.id==entry.OPENID_LAST.get(openid):
_logger.info('>>> 重复的微信消息')
return
entry.OPENID_LAST[openid] = message.id
origin_content = ''
attachment_ids = []
if mtype=='image':
pic_url = message.img
media_id = message.__dict__.get('MediaId','')
_logger.info(pic_url)
_data = get_img_data(pic_url)
_filename = datetime.datetime.now().strftime("%m%d%H%M%S") + os.path.basename(pic_url)
attachment = request.env['ir.attachment'].sudo().create({
'name': '__wx_image|%s'%media_id,
'datas': base64.encodestring(_data),
'datas_fname': _filename,
'res_model': 'mail.compose.message',
'res_id': int(0)
})
attachment_ids.append(attachment.id)
elif mtype in ['voice']:
media_id = message.media_id
media_format = message.format
r = client.wxclient.download_media(media_id)
_filename = '%s.%s'%(media_id,media_format)
_data = r.content
attachment = request.env['ir.attachment'].sudo().create({
'name': '__wx_voice|%s'%message.media_id,
'datas': base64.encodestring(_data),
'datas_fname': _filename,
'res_model': 'mail.compose.message',
'res_id': int(0)
})
attachment_ids.append(attachment.id)
elif mtype=='location':
origin_content = '对方发送位置: %s 纬度为:%s 经度为:%s'%(message.label, message.location[0], message.location[1])
elif mtype=='text':
origin_content = message.content
content = origin_content.lower()
rs = request.env()['wx.autoreply'].sudo().search([])
for rc in rs:
_key = rc.key.lower()
if rc.type==1:
if content==_key:
ret_msg = rc.action.get_wx_reply()
return entry.create_reply(ret_msg, message)
elif rc.type==2:
if _key in content:
ret_msg = rc.action.get_wx_reply()
return entry.create_reply(ret_msg, message)
elif rc.type==3:
try:
flag = re.compile(_key).match(content)
except:flag=False
if flag:
ret_msg = rc.action.get_wx_reply()
return entry.create_reply(ret_msg, message)
#客服对话
uuid, record_uuid = entry.get_uuid_from_openid(openid)
ret_msg = ''
cr, uid, context, db = request.cr, request.uid or openerp.SUPERUSER_ID, request.context, request.db
if not uuid:
rs = request.env['wx.user'].sudo().search( [('openid', '=', openid)] )
if not rs.exists():
info = client.wxclient.get_user_info(openid)
info['group_id'] = ''
wx_user = request.env['wx.user'].sudo().create(info)
else:
wx_user = rs[0]
anonymous_name = wx_user.nickname
channel = request.env.ref('oejia_wx.channel_wx')
channel_id = channel.id
session_info, ret_msg = request.env["im_livechat.channel"].create_mail_channel(channel_id, anonymous_name, content, record_uuid)
if session_info:
uuid = session_info['uuid']
entry.create_uuid_for_openid(openid, uuid)
if not record_uuid:
wx_user.update_last_uuid(uuid)
if uuid:
message_type = "message"
message_content = origin_content
request_uid = request.session.uid or openerp.SUPERUSER_ID
author_id = False # message_post accept 'False' author_id, but not 'None'
if request.session.uid:
author_id = request.env['res.users'].sudo().browse(request.session.uid).partner_id.id
mail_channel = request.env["mail.channel"].sudo(request_uid).search([('uuid', '=', uuid)], limit=1)
msg = mail_channel.sudo(request_uid).with_context(mail_create_nosubscribe=True).message_post(author_id=author_id, email_from=mail_channel.anonymous_name, body=message_content, message_type='comment', subtype='mail.mt_comment', content_subtype='plaintext',attachment_ids=attachment_ids)
if ret_msg:
return entry.create_reply(ret_msg, message)
robot.add_handler(input_handle, type='text')
robot.add_handler(input_handle, type='image')
robot.add_handler(input_handle, type='voice')
robot.add_handler(input_handle, type='location')
|
StarcoderdataPython
|
3222004
|
<gh_stars>1-10
# -*- coding: utf-8 -*-
# @Time : 2020/8/4 下午5:25
# @Author : 司云中
# @File : seckill.py
# @Software: Pycharm
import uuid
from rest_framework.generics import GenericAPIView
class SecKillOperation(GenericAPIView):
"""秒杀活动"""
# serializer_class = SecKillSerializer
|
StarcoderdataPython
|
1781411
|
<gh_stars>0
import sys
import fileinput
import re
import pandas as pd
import numpy as np
import csv
Lookup_gene={}
for each_line_of_text in list(fileinput.FileInput('Homo_sapiens.GRCh37.75.gtf')):
Ensembl_name = re.findall(r'ENSG+[0-9]{11}',each_line_of_text)
HUGO_name = re.findall(r'gene_name\s"(.*?)"',each_line_of_text)
if(len(Ensembl_name) is not 0 or len(HUGO_name) is not 0):
Lookup_gene.setdefault(Ensembl_name[0], HUGO_name[0])
fileinput.close()
index = 0
if (len(sys.argv) > 2):
df = pd.read_csv(sys.argv[2])
column_num = int(sys.argv[1][2:])-1
else:
df = pd.read_csv(sys.argv[1])
column_num = 0
for line in list(df.iloc[:,1]):
Ensembl = re.findall(r'ENSG+[0-9]{11}',line)
if(len(Ensembl) is not 0):
try:
hugo_name = Lookup_gene[Ensembl[0]]
df.replace(df.iloc[index,column_num],hugo_name, inplace=True)
except KeyError as e:
df.replace(df.iloc[index,column_num],'unknown', inplace=True)
index += 1
fileinput.close()
print(df.to_csv(quoting=csv.QUOTE_NONNUMERIC,index=False, line_terminator='\n'))
|
StarcoderdataPython
|
130266
|
<filename>django_orm/postgresql/sql/aggregates.py<gh_stars>1-10
# -*- coding: utf-8 -*-
from django.db.models.sql import aggregates
from django.db import models
class ArrayLength(aggregates.Aggregate):
sql_function = 'array_length'
sql_template = '%(function)s(%(field)s, 1)'
is_computed = True
def __init__(self, col, source=None, is_summary=False, **extra):
_sum = extra.pop('sum', False)
if _sum:
self.sql_template = "sum(%s)" % self.sql_template
super(ArrayLength, self).__init__(col, source, is_summary=True, **extra)
|
StarcoderdataPython
|
1611225
|
import unittest
from bolt.core.command import Command, RegexCommand, ParseCommand
from bolt.discord.events import MessageCreate
from bolt.discord.models.message import Message
class TestCommand(unittest.TestCase):
def dummycallback(self):
pass
def test_command_matches(self):
command = Command("test command", self.dummycallback)
self.assertTrue(command.matches("test command"))
def test_command_not_matches(self):
command = Command("test command", self.dummycallback)
self.assertFalse(command.matches("test"))
def test_command_args_kwargs(self):
command = Command("test command", self.dummycallback)
args, kwargs = command.parse("test command")
self.assertListEqual(args, [])
self.assertDictEqual(kwargs, {})
def test_change_trigger(self):
command = Command("test command", self.dummycallback, trigger="?!")
self.assertTrue(command.matches("?!test command"))
def test_invoke(self):
def dummycallback(event):
return True
event = MessageCreate()
event.message = Message.marshal({"id": "1", "channel_id": "2", "content": "!test"})
command = Command("test command", dummycallback, trigger="!")
self.assertTrue(command.invoke(event))
class TestRegexCommand(unittest.TestCase):
def dummycallback(self):
pass
def test_command_matches(self):
command = RegexCommand("^test command$", self.dummycallback)
self.assertTrue(command.matches("test command"))
def test_command_not_matches(self):
command = RegexCommand("^test command$", self.dummycallback)
self.assertFalse(command.matches(" test commanding"))
self.assertFalse(command.matches("test commanding"))
def test_command_args_kwargs(self):
command = RegexCommand("^test command ([0-9]+)$", self.dummycallback)
args, kwargs = command.parse("test command 1234")
self.assertListEqual(args, ['1234'])
self.assertDictEqual(kwargs, {})
def test_change_trigger(self):
command = RegexCommand("test command", self.dummycallback, trigger="?!")
self.assertTrue(command.matches("?!test command"))
class TestParseCommand(unittest.TestCase):
def dummycallback(self):
pass
def test_command_matches(self):
command = ParseCommand("test command", self.dummycallback)
self.assertTrue(command.matches("test command"))
def test_command_not_matches(self):
command = ParseCommand("test command", self.dummycallback)
self.assertFalse(command.matches(" test commanding"))
self.assertFalse(command.matches("test commanding"))
def test_command_kwargs(self):
command = ParseCommand("test command {fake_id:d}", self.dummycallback)
args, kwargs = command.parse("test command 1234")
self.assertListEqual(args, [])
self.assertDictEqual(kwargs, {"fake_id": 1234})
def test_command_args(self):
command = ParseCommand("test command {:d}", self.dummycallback)
args, kwargs = command.parse("test command 1234")
self.assertListEqual(args, [1234])
self.assertDictEqual(kwargs, {})
def test_command_args_kwargs(self):
command = ParseCommand("test command {:d} {fake_thing}", self.dummycallback)
args, kwargs = command.parse("test command 1234 mortimer")
self.assertListEqual(args, [1234])
self.assertDictEqual(kwargs, {"fake_thing": "mortimer"})
def test_change_trigger(self):
command = ParseCommand("test command", self.dummycallback, trigger="?!")
self.assertTrue(command.matches("?!test command"))
|
StarcoderdataPython
|
1619389
|
# K-means Clustering
# Importing the librabies
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
# Importing the dataset
dataset = pd.read_csv("Mall_Customers.csv")
X = dataset.iloc[:,[3,4]].values
# Using the elbow method to find optimal number of clusters
from sklearn.cluster import KMeans
wcss = []
for i in range(1,11):
kmeans = KMeans(n_clusters = i, init="k-means++", max_iter = 300, n_init = 10, random_state = 0)
kmeans.fit(X)
wcss.append(kmeans.inertia_)
plt.figure("Use elbow method to find optimal number of clusters")
plt.plot(range(1,11), wcss)
plt.title("The Elbow Method")
plt.xlabel("Number of clusters")
plt.ylabel("wcss") # "wcss" = "Within Cluster Sum of Squares"
plt.show()
# Appling the k-means to the dataset
# "n_cluster" parameter value decided by graph (elbow method)
kmeans = KMeans(n_clusters = 5, init="k-means++", max_iter = 300, n_init = 10, random_state = 0)
y_kmeans = kmeans.fit_predict(X)
# Visualizing the clusters
plt.figure("Visualizing the different clusters")
plt.scatter(X[y_kmeans == 0, 0], X[y_kmeans == 0, 1], s = 100, c = 'red', label = "Careful")
plt.scatter(X[y_kmeans == 1, 0], X[y_kmeans == 1, 1], s = 100, c = 'blue', label = "Standard")
plt.scatter(X[y_kmeans == 2, 0], X[y_kmeans == 2, 1], s = 100, c = 'green', label = "Target")
plt.scatter(X[y_kmeans == 3, 0], X[y_kmeans == 3, 1], s = 100, c = 'cyan', label = "Careless")
plt.scatter(X[y_kmeans == 4, 0], X[y_kmeans == 4, 1], s = 100, c = 'magenta', label = "Sensible")
plt.scatter(kmeans.cluster_centers_[:,0], kmeans.cluster_centers_[:,1], s = 300, c = 'yellow', label="Centroids")
plt.title("Cluster of clients")
plt.xlabel("Annual Income (k$)")
plt.ylabel("Spending Score(1-100)")
plt.legend()
plt.show()
|
StarcoderdataPython
|
3284628
|
class InstanceReferenceGeometry(GeometryBase,IDisposable,ISerializable):
""" InstanceReferenceGeometry(instanceDefinitionId: Guid,transform: Transform) """
def ConstructConstObject(self,*args):
""" ConstructConstObject(self: CommonObject,parentObject: object,subobject_index: int) """
pass
def Dispose(self):
""" Dispose(self: CommonObject,disposing: bool) """
pass
def NonConstOperation(self,*args):
""" NonConstOperation(self: CommonObject) """
pass
def OnSwitchToNonConst(self,*args):
""" OnSwitchToNonConst(self: GeometryBase) """
pass
def __enter__(self,*args):
""" __enter__(self: IDisposable) -> object """
pass
def __exit__(self,*args):
""" __exit__(self: IDisposable,exc_type: object,exc_value: object,exc_back: object) """
pass
def __init__(self,*args):
""" x.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signature """
pass
@staticmethod
def __new__(self,instanceDefinitionId,transform):
""" __new__(cls: type,instanceDefinitionId: Guid,transform: Transform) """
pass
ParentIdefId=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Get: ParentIdefId(self: InstanceReferenceGeometry) -> Guid
"""
Xform=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Get: Xform(self: InstanceReferenceGeometry) -> Transform
"""
|
StarcoderdataPython
|
1604354
|
Nome = str(input('Digite o seu nome: ')).strip()
print('analizando o seu nome...')
print('Seu nome em maiuscula é: ', Nome.upper())
print('Seu nome em minuscula é: ', Nome.lower())
print('Seu nome possui {} letras'.format(len(Nome) - Nome.count(' ')))
print('Seu primeiro nome é {} e possui {} letras'.format(Nome[:Nome.find(' ')], Nome.find(' ')))
|
StarcoderdataPython
|
4826555
|
import uuid
from pyramid import httpexceptions
from pyramid.settings import asbool
from pyramid.security import NO_PERMISSION_REQUIRED, Authenticated
from kinto.core import get_user_info as core_get_user_info
from kinto.core.errors import raise_invalid
from kinto.core.events import ACTIONS
from kinto.core.storage.exceptions import UnicityError
from kinto.core.utils import build_request, reapply_cors, hmac_digest, instance_uri, view_lookup
from kinto.authorization import RouteFactory
from kinto.views.buckets import Bucket
from kinto.views.collections import Collection
def create_bucket(request, bucket_id):
"""Create a bucket if it doesn't exists."""
bucket_put = request.method.lower() == "put" and request.path.endswith("buckets/default")
# Do nothing if current request will already create the bucket.
if bucket_put:
return
# Do not intent to create multiple times per request (e.g. in batch).
already_created = request.bound_data.setdefault("buckets", {})
if bucket_id in already_created:
return
bucket_uri = instance_uri(request, "bucket", id=bucket_id)
bucket = resource_create_object(request=request, resource_cls=Bucket, uri=bucket_uri)
already_created[bucket_id] = bucket
def create_collection(request, bucket_id):
# Do nothing if current request does not involve a collection.
subpath = request.matchdict.get("subpath")
if not (subpath and subpath.rstrip("/").startswith("collections/")):
return
collection_id = subpath.split("/")[1]
collection_uri = instance_uri(request, "collection", bucket_id=bucket_id, id=collection_id)
# Do not intent to create multiple times per request (e.g. in batch).
already_created = request.bound_data.setdefault("collections", {})
if collection_uri in already_created:
return
# Do nothing if current request will already create the collection.
collection_put = request.method.lower() == "put" and request.path.endswith(collection_id)
if collection_put:
return
collection = resource_create_object(
request=request, resource_cls=Collection, uri=collection_uri
)
already_created[collection_uri] = collection
def resource_create_object(request, resource_cls, uri):
"""Implicitly create a resource (or fail silently).
In the default bucket, the bucket and collection are implicitly
created. This helper creates one of those resources using a
simulated request and context that is appropriate for the
resource. Also runs create events as though the resource were
created in a subrequest.
If the resource already exists, do nothing.
"""
resource_name, matchdict = view_lookup(request, uri)
# Build a fake request, mainly used to populate the create events that
# will be triggered by the resource.
fakerequest = build_request(request, {"method": "PUT", "path": uri})
fakerequest.matchdict = matchdict
fakerequest.bound_data = request.bound_data
fakerequest.authn_type = request.authn_type
fakerequest.selected_userid = request.selected_userid
fakerequest.errors = request.errors
fakerequest.current_resource_name = resource_name
obj_id = matchdict["id"]
# Fake context, required to instantiate a resource.
context = RouteFactory(fakerequest)
context.resource_name = resource_name
resource = resource_cls(fakerequest, context)
# Check that provided id is valid for this resource.
if not resource.model.id_generator.match(obj_id):
error_details = {"location": "path", "description": "Invalid {} id".format(resource_name)}
raise_invalid(resource.request, **error_details)
data = {"id": obj_id}
try:
obj = resource.model.create_record(data)
except UnicityError as e:
# The record already exists; skip running events
return e.record
# Since the current request is not a resource (but a straight Service),
# we simulate a request on a resource.
# This will be used in the resource event payload.
resource.postprocess(obj, action=ACTIONS.CREATE)
return obj
def default_bucket(request):
if request.method.lower() == "options":
path = request.path.replace("default", "unknown")
subrequest = build_request(request, {"method": "OPTIONS", "path": path})
return request.invoke_subrequest(subrequest)
if Authenticated not in request.effective_principals:
# Pass through the forbidden_view_config
raise httpexceptions.HTTPForbidden()
settings = request.registry.settings
if asbool(settings["readonly"]):
raise httpexceptions.HTTPMethodNotAllowed()
bucket_id = request.default_bucket_id
# Implicit object creations.
# Make sure bucket exists
create_bucket(request, bucket_id)
# Make sure the collection exists
create_collection(request, bucket_id)
path = request.path.replace("/buckets/default", "/buckets/{}".format(bucket_id))
querystring = request.url[(request.url.index(request.path) + len(request.path)) :]
try:
# If 'id' is provided as 'default', replace with actual bucket id.
body = request.json
body["data"]["id"] = body["data"]["id"].replace("default", bucket_id)
except Exception:
body = request.body or {"data": {}}
subrequest = build_request(
request, {"method": request.method, "path": path + querystring, "body": body}
)
subrequest.bound_data = request.bound_data
try:
response = request.invoke_subrequest(subrequest)
except httpexceptions.HTTPException as error:
is_redirect = error.status_code < 400
if error.content_type == "application/json" or is_redirect:
response = reapply_cors(subrequest, error)
else:
# Ask the upper level to format the error.
raise error
return response
def default_bucket_id(request):
settings = request.registry.settings
secret = settings["userid_hmac_secret"]
# Build the user unguessable bucket_id UUID from its user_id
digest = hmac_digest(secret, request.prefixed_userid)
return str(uuid.UUID(digest[:32]))
def get_user_info(request):
user_info = {**core_get_user_info(request), "bucket": request.default_bucket_id}
return user_info
def includeme(config):
# Redirect default to the right endpoint
config.add_view(default_bucket, route_name="default_bucket", permission=NO_PERMISSION_REQUIRED)
config.add_view(
default_bucket, route_name="default_bucket_collection", permission=NO_PERMISSION_REQUIRED
)
config.add_route("default_bucket_collection", "/buckets/default/{subpath:.*}")
config.add_route("default_bucket", "/buckets/default")
# Provide helpers
config.add_request_method(default_bucket_id, reify=True)
# Override kinto.core default user info
config.add_request_method(get_user_info)
config.add_api_capability(
"default_bucket",
description="The default bucket is an alias for a personal"
" bucket where collections are created implicitly.",
url="https://kinto.readthedocs.io/en/latest/api/1.x/"
"buckets.html#personal-bucket-default",
)
|
StarcoderdataPython
|
1627275
|
epochs = 50
class_weight={0:1.,1:1}
batch_size=8
|
StarcoderdataPython
|
3277116
|
<filename>migrations/versions/f017a3d88213_added_set_table.py<gh_stars>0
"""Added Set table
Revision ID: f017a3d88213
Revises: <PASSWORD>
Create Date: 2017-12-05 23:09:29.667000
"""
# revision identifiers, used by Alembic.
revision = 'f017a3<PASSWORD>'
down_revision = '<PASSWORD>'
from alembic import op
import sqlalchemy as sa
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.create_table('set',
sa.Column('id', sa.Text(), nullable=False),
sa.Column('author_id', sa.Integer(), nullable=False),
sa.Column('name', sa.Text(), nullable=True),
sa.Column('description', sa.Text(), nullable=True),
sa.Column('definition', sa.Text(), nullable=True),
sa.Column('parent', sa.Integer(), nullable=True),
sa.Column('permissions', sa.Integer(), nullable=True),
sa.Column('timestamp', sa.DateTime(), nullable=True),
sa.ForeignKeyConstraint(['author_id'], ['users.id'], ),
sa.PrimaryKeyConstraint('id', 'author_id')
)
op.create_index(op.f('ix_set_permissions'), 'set', ['permissions'], unique=False)
op.create_index(op.f('ix_set_timestamp'), 'set', ['timestamp'], unique=False)
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_index(op.f('ix_set_timestamp'), table_name='set')
op.drop_index(op.f('ix_set_permissions'), table_name='set')
op.drop_table('set')
# ### end Alembic commands ###
|
StarcoderdataPython
|
1670661
|
<reponame>q0w/snug
import json
import snug
BASE = 'https://api.github.com'
class repo(snug.Query[dict]):
"""a repository lookup by owner and name"""
def __init__(self, name, owner):
self.name, self.owner = name, owner
def __iter__(self):
request = snug.GET(BASE + f'/repos/{self.owner}/{self.name}')
return json.loads((yield request).content)
def star(self) -> snug.Query[bool]:
"""star this repo"""
req = snug.PUT(BASE + f'/user/starred/{self.owner}/{self.name}')
return (yield req).status_code == 204
@snug.related
class issue(snug.Query[dict]):
"""get an issue in this repo"""
def __init__(self, repo, number):
self.repo, self.number = repo, number
def __iter__(self):
request = snug.GET(
BASE +
f'repos/{self.repo.owner}/'
f'{self.repo.name}/issues/{self.number}')
return json.loads((yield request).content)
def comments(self, since: datetime) -> snug.Query[list]:
"""retrieve comments for this issue"""
request = snug.GET(
BASE +
f'repos/{self.issue.repo.owner}/{self.issue.repo.name}/'
f'issues/{self.issue.number}/comments',
params={'since': since.strftime('%Y-%m-%dT%H:%M:%SZ')})
return json.loads((yield request).content)
|
StarcoderdataPython
|
1714613
|
<gh_stars>10-100
### Copyright (C) 2017 NVIDIA Corporation. All rights reserved.
### Licensed under the CC BY-NC-SA 4.0 license (https://creativecommons.org/licenses/by-nc-sa/4.0/legalcode).
from .base_options import BaseOptions
class BothOptions(BaseOptions):
def initialize(self):
BaseOptions.initialize(self)
# for displays
self.parser.add_argument('--display_freq', type=int, default=100, help='frequency of showing training results on screen')
self.parser.add_argument('--print_freq', type=int, default=100, help='frequency of showing training results on console')
self.parser.add_argument('--save_latest_freq', type=int, default=1000, help='frequency of saving the latest results')
self.parser.add_argument('--save_epoch_freq', type=int, default=50, help='frequency of saving checkpoints at the end of epochs')
self.parser.add_argument('--no_html', action='store_true', help='do not save intermediate training results to [opt.checkpoints_dir]/[opt.name]/web/')
self.parser.add_argument('--debug', action='store_true', help='only do one epoch and displays at each iteration')
# for training
self.parser.add_argument('--continue_train', action='store_true', help='continue training: load the latest model')
self.parser.add_argument('--load_pretrain', type=str, default='', help='load the pretrained model from the specified location')
self.parser.add_argument('--which_epoch', type=str, default='latest', help='which epoch to load? set to latest to use latest cached model')
self.parser.add_argument('--phase', type=str, default='train', help='train, val, test, etc')
self.parser.add_argument('--niter', type=int, default=100, help='# of iter at starting learning rate')
self.parser.add_argument('--niter_decay', type=int, default=0, help='# of iter to linearly decay learning rate to zero')
self.parser.add_argument('--beta1', type=float, default=0.5, help='momentum term of adam')
self.parser.add_argument('--lr', type=float, default=0.0002, help='initial learning rate for adam')
# for discriminators
self.parser.add_argument('--num_D', type=int, default=2, help='number of discriminators to use')
self.parser.add_argument('--n_layers_D', type=int, default=3, help='only used if which_model_netD==n_layers')
self.parser.add_argument('--ndf', type=int, default=64, help='# of discrim filters in first conv layer')
self.parser.add_argument('--lambda_feat', type=float, default=10.0, help='weight for feature matching loss')
self.parser.add_argument('--no_ganFeat_loss', action='store_true', help='if specified, do *not* use discriminator feature matching loss')
self.parser.add_argument('--no_vgg_loss', action='store_true', help='if specified, do *not* use VGG feature matching loss')
self.parser.add_argument('--no_lsgan', action='store_true', help='do *not* use least square GAN, if false, use vanilla GAN')
self.parser.add_argument('--pool_size', type=int, default=0, help='the size of image buffer that stores previously generated images')
self.parser.add_argument('--ntest', type=int, default=float("inf"), help='# of test examples.')
self.parser.add_argument('--results_dir', type=str, default='results', help='saves results here.')
self.parser.add_argument('--aspect_ratio', type=float, default=1.0, help='aspect ratio of result images')
self.parser.add_argument('--how_many', type=int, default=50, help='how many test images to run')
self.parser.add_argument('--cluster_path', type=str, default='features_clustered_010.npy', help='the path for clustered results of encoded features')
self.parser.add_argument('--use_encoded_image', action='store_true', help='if specified, encode the real image to get the feature map')
self.parser.add_argument("--export_onnx", type=str, help="export ONNX model to a given file")
self.parser.add_argument("--engine", type=str, help="run serialized TRT engine")
self.parser.add_argument("--onnx", type=str, help="run ONNX model via TRT")
# My stuff for launch.py to work
self.parser.add_argument("--isTrain", type=bool, help="Training Script or Not")
self.parser.add_argument("--mode", type=str, help="Train, Meta-Train, Finetune")
self.parser.add_argument("--k", type=int, help="Number of images to finetune on")
self.parser.add_argument("--T", type=int, help="Number of iterations to finetune on")
self.parser.add_argument("--train_dataset", type=str, help="Train Dataset specified with all txtfiles")
self.parser.add_argument("--test_dataset", type=str, help="Test Dataset specified with all txtfiles")
self.parser.add_argument("--model_checkpoints", type=str, help="Model_checkpoint directory ")
self.parser.add_argument('--model_list', nargs='+', help='Pass in visualizations that you want to complete')
self.parser.add_argument('--start_FT_vid', type=int, default= 1, help="Which video from 1-8 that we should start training from")
self.parser.add_argument('--meta_iter', type=int, help='Pass the number of meta-iterations')
self.parser.add_argument('--start_meta_iter', type=int, default = 0, help='Pass the number of meta-iterations')
self.parser.add_argument('--init_weights', type=bool, default=False, help="Determine whether or not to upload pretrained weights for meta-learning")
self.parser.add_argument('--save_meta_iter', type=int, default=25, help="Determine whether or not to upload pretrained weights for meta-learning")
self.parser.add_argument('--test_meta_iter', type=int, default=300, help="Determine whether or not to upload pretrained weights for meta-learning")
self.parser.add_argument('--epsilon', type=float, default=0.1, help = "Meta Learning Rate as detailed in Reptile paper")
self.parser.add_argument('--only_generator', type=bool, default=False, help="Only train the meta-generator")
self.parser.add_argument('--one_vid', type=int, default=0, help="Only train the meta-generator")
|
StarcoderdataPython
|
152964
|
"""
GitLab API:
https://docs.gitlab.com/ee/api/instance_level_ci_variables.html
https://docs.gitlab.com/ee/api/project_level_variables.html
https://docs.gitlab.com/ee/api/group_level_variables.html
"""
import re
import pytest
import responses
from gitlab.v4.objects import GroupVariable, ProjectVariable, Variable
key = "TEST_VARIABLE_1"
value = "TEST_1"
new_value = "TEST_2"
variable_content = {
"key": key,
"variable_type": "env_var",
"value": value,
"protected": False,
"masked": True,
}
variables_url = re.compile(
r"http://localhost/api/v4/(((groups|projects)/1)|(admin/ci))/variables"
)
variables_key_url = re.compile(
rf"http://localhost/api/v4/(((groups|projects)/1)|(admin/ci))/variables/{key}"
)
@pytest.fixture
def resp_list_variables():
with responses.RequestsMock() as rsps:
rsps.add(
method=responses.GET,
url=variables_url,
json=[variable_content],
content_type="application/json",
status=200,
)
yield rsps
@pytest.fixture
def resp_get_variable():
with responses.RequestsMock() as rsps:
rsps.add(
method=responses.GET,
url=variables_key_url,
json=variable_content,
content_type="application/json",
status=200,
)
yield rsps
@pytest.fixture
def resp_create_variable():
with responses.RequestsMock() as rsps:
rsps.add(
method=responses.POST,
url=variables_url,
json=variable_content,
content_type="application/json",
status=200,
)
yield rsps
@pytest.fixture
def resp_update_variable():
updated_content = dict(variable_content)
updated_content["value"] = new_value
with responses.RequestsMock() as rsps:
rsps.add(
method=responses.PUT,
url=variables_key_url,
json=updated_content,
content_type="application/json",
status=200,
)
yield rsps
@pytest.fixture
def resp_delete_variable(no_content):
with responses.RequestsMock() as rsps:
rsps.add(
method=responses.DELETE,
url=variables_key_url,
json=no_content,
content_type="application/json",
status=204,
)
yield rsps
def test_list_instance_variables(gl, resp_list_variables):
variables = gl.variables.list()
assert isinstance(variables, list)
assert isinstance(variables[0], Variable)
assert variables[0].value == value
def test_get_instance_variable(gl, resp_get_variable):
variable = gl.variables.get(key)
assert isinstance(variable, Variable)
assert variable.value == value
def test_create_instance_variable(gl, resp_create_variable):
variable = gl.variables.create({"key": key, "value": value})
assert isinstance(variable, Variable)
assert variable.value == value
def test_update_instance_variable(gl, resp_update_variable):
variable = gl.variables.get(key, lazy=True)
variable.value = new_value
variable.save()
assert variable.value == new_value
def test_delete_instance_variable(gl, resp_delete_variable):
variable = gl.variables.get(key, lazy=True)
variable.delete()
def test_list_project_variables(project, resp_list_variables):
variables = project.variables.list()
assert isinstance(variables, list)
assert isinstance(variables[0], ProjectVariable)
assert variables[0].value == value
def test_get_project_variable(project, resp_get_variable):
variable = project.variables.get(key)
assert isinstance(variable, ProjectVariable)
assert variable.value == value
def test_create_project_variable(project, resp_create_variable):
variable = project.variables.create({"key": key, "value": value})
assert isinstance(variable, ProjectVariable)
assert variable.value == value
def test_update_project_variable(project, resp_update_variable):
variable = project.variables.get(key, lazy=True)
variable.value = new_value
variable.save()
assert variable.value == new_value
def test_delete_project_variable(project, resp_delete_variable):
variable = project.variables.get(key, lazy=True)
variable.delete()
def test_list_group_variables(group, resp_list_variables):
variables = group.variables.list()
assert isinstance(variables, list)
assert isinstance(variables[0], GroupVariable)
assert variables[0].value == value
def test_get_group_variable(group, resp_get_variable):
variable = group.variables.get(key)
assert isinstance(variable, GroupVariable)
assert variable.value == value
def test_create_group_variable(group, resp_create_variable):
variable = group.variables.create({"key": key, "value": value})
assert isinstance(variable, GroupVariable)
assert variable.value == value
def test_update_group_variable(group, resp_update_variable):
variable = group.variables.get(key, lazy=True)
variable.value = new_value
variable.save()
assert variable.value == new_value
def test_delete_group_variable(group, resp_delete_variable):
variable = group.variables.get(key, lazy=True)
variable.delete()
|
StarcoderdataPython
|
115893
|
<gh_stars>0
# 7652413
import euler
N = 7
m = 0
d = '123456789'
for i in xrange(euler.product(xrange(1, N + 1))):
v = []
dd = list(d)
for j in xrange(N):
v.append(dd.pop(i % (N - j)))
i /= (N - j)
vv = int(''.join(v))
if euler.is_prime(vv) and vv > m:
m = vv
print m
|
StarcoderdataPython
|
1769513
|
from typing import Any, Tuple
import numba as nb
import numpy as np
from nptyping import NDArray
from .definitions import LatticeState, gathered_order_parameters
from .tensor_tools import SQRT2, SQRT6, SQRT16, dot10, ten6_to_mat
@nb.njit(nb.int32(nb.float32), cache=True)
def biaxial_ordering(lam: float) -> int:
if lam < (SQRT16 - 1e-3):
return 1
if lam > (SQRT16 + 1e-3):
return -1
if abs(lam - SQRT16) < 1e-3:
return 0
return -100
@nb.njit(nb.types.UniTuple(nb.float32, 3)(nb.float32[:], nb.float32[:], nb.float32), cache=True)
def _q0q2w(mt20: NDArray[(6,), np.float32],
mt22: NDArray[(6,), np.float32],
lam: np.float32
) -> Tuple[np.float32, np.float32, np.float32]:
m_q = mt20 + lam * SQRT2 * mt22
m_q = ten6_to_mat(m_q)
ev = np.linalg.eigvalsh(m_q)
i = np.argsort(ev**2)[::-1]
wn, wm, wl = ev[i]
nq0 = 1
nq2 = 1
o = biaxial_ordering(lam)
if o == 1 or o == 0:
nq0 = 1 / (SQRT6 / 3)
nq2 = 1 / SQRT2
if o == -1:
nq0 = 1 / (SQRT6 / 3 * 2)
nq2 = 1 / SQRT2
q0 = wn * nq0
q2 = (wl - wm) * nq2
w = (q0**3 - 3 * q0 * q2**2) / (q0**2 + q2**2)**(3 / 2)
return q0, q2, w
@nb.njit(nb.float32(nb.float32[:]), cache=True)
def _d322(mt32: NDArray[(10,), np.float32]) -> np.float32:
return np.sqrt(dot10(mt32, mt32))
@nb.jit(nopython=False, forceobj=True, parallel=True)
def calculate_order_parameters(state: LatticeState) -> NDArray[(Any,), Any]:
"""Calculate instantaneous order parameters after the `LatticeState` has been updated."""
avg = state.lattice_averages[0]
q0, q2, w = _q0q2w(avg['t20'], avg['t22'], float(state.parameters.lam))
energy = avg['energy']
p = avg['p']
d322 = _d322(avg['t32'])
return np.array([
(energy, q0, q2, w, p, d322)
], dtype=gathered_order_parameters)
|
StarcoderdataPython
|
1760351
|
import haiku as hk
import jax
import jax.numpy as jnp
import numpy as np
from functools import partial
import optax
from jax.experimental.maps import mesh
from jax.experimental import PartitionSpec
from jax.experimental.pjit import pjit
from clip_model import TextCLIP
import clip_jax
def cfg_encode_text(config, tokens):
clip = TextCLIP( # the same as orig, except context_length = 77 - 2 (special tokens)
embed_dim = 512,
context_length = 75,
vocab_size = 49408,
# can possibly vary
rotary_dims = config["rotary_dims"],
transformer_width = config["d_model"],
transformer_heads = config["n_heads"],
transformer_layers = config["layers"])
return clip.encode_text(tokens)
def pmap_batch(batch):
"""Splits the first axis of `arr` evenly across the number of devices."""
per_device_batch_size = batch.shape[0] // jax.device_count()
batch = batch[:per_device_batch_size * jax.device_count()] # trim the rest of the batch
return batch.reshape(jax.device_count(), per_device_batch_size, *batch.shape[1:])
class ClipTrainer:
def __init__(self, config):
self.config = config
optimizer = config["optimizer"]
_, clip_target, _, _ = clip_jax.load('ViT-B/32', "cpu")
encode_text = partial(cfg_encode_text,config)
clip_init_fn = hk.transform(hk.experimental.optimize_rng_use(encode_text)).init
def init(key, xs):
params = clip_init_fn(key, tokens = xs)
opt_state = optimizer.init(params)
return {
"params": params,
"step": np.array(0),
"opt_state": opt_state
}
key = hk.PRNGSequence(42)
x = jax.random.randint(next(key), (jax.local_device_count(),75), 0, 49408)
clip_apply_fn = hk.without_apply_rng(hk.transform(encode_text)).apply
def train_loss(params, x, y):
return jnp.mean(jnp.square(clip_apply_fn(params, x) - clip_target(y)))
@partial(jax.pmap, axis_name='dp')
def eval_pmap(params, x, y):
loss = train_loss(params, x, y)
return jax.lax.pmean(loss, axis_name='dp')
@partial(jax.pmap, axis_name='dp')
def train_pmap(params, opt_state, x, y):
val_grad_fn = jax.value_and_grad(train_loss)
loss, grad = val_grad_fn(params, x, y)
grads = jax.lax.pmean(grad, axis_name='dp')
loss = jax.lax.pmean(loss, axis_name='dp')
updates, opt_state = optimizer.update(grad, opt_state, params)
params = optax.apply_updates(params, updates)
return loss, params, opt_state
self.train_pmap = train_pmap
self.eval_pmap = eval_pmap
self.state = init(next(key), x)
self.eval_weights = None
param_count = hk.data_structures.tree_size(self.state['params'])
print(f"Total parameters: {param_count}")
def train(self, sample):
obs, target = map(pmap_batch, (sample["obs"], sample["target"]))
loss, params, opt_state = self.train_pmap(self.state["params"], self.state["opt_state"], obs, target)
self.state = {
"params": params,
"step": self.state["step"] + 1,
"opt_state": opt_state,
}
return np.array(loss).mean()
def eval(self, sample):
obs, target = map(pmap_batch, (sample["obs"], sample["target"]))
loss = self.eval_pmap(self.state["params"], obs, target)
return np.array(loss).mean()
|
StarcoderdataPython
|
3384229
|
# Copyright (c) 2013 <NAME> <<EMAIL>>
# This file is part of SimpleGUITk - https://github.com/dholm/simpleguitk
# See the file 'COPYING' for copying permission.
from .frame import create_frame
from .image import get_height
from .image import get_width
from .image import load_image
from .input import KEY_MAP
from .timers import create_timer
|
StarcoderdataPython
|
142865
|
import pytest
from frameioclient import FrameioClient
@pytest.fixture
def frameioclient(token):
return FrameioClient("aaaabbbbccccddddeeee")
|
StarcoderdataPython
|
1676894
|
# food1 = 'rau muong'
# food2 = 'ca vien chien'
# food3 = 'pho'
# food4 = 'suon xao chua ngot'
# food5 = 'rau'
menu = ['rau muong', 'ca vien chien', 'pho','suong xao chua ngot','rau']
# seperator
# print(*menu, sep=', ') #pythonic
#
# menu.append('bun cha')
#
# print(*menu, sep=', ')
# print(len(menu))
# print(menu[-1])
menu[0] = 'rau ma'
print(*menu, sep=', ')
|
StarcoderdataPython
|
3282836
|
<reponame>ozgurgunes/django-manifest<filename>tests/data_dicts.py
# -*- coding: utf-8 -*-
""" Manifest Data Dicts for Tests
"""
from django.utils.translation import ugettext_lazy as _
from manifest import defaults
LOGIN_FORM = {
"invalid": [
# No identification.
{
"data": {"identification": "", "password": "<PASSWORD>"},
"error": (
"identification",
[_("Please enter your username or email address.")],
),
},
# No password.
{
"data": {"identification": "john", "password": ""},
"error": ("password", [_("This field is required.")]),
},
# Wrong user.
{
"data": {"identification": "johnn", "password": "<PASSWORD>"},
"error": (
"__all__",
[_("Please check your identification and password.")],
),
},
# Wrong password.
{
"data": {"identification": "john", "password": "<PASSWORD>"},
"error": (
"__all__",
[_("Please check your identification and password.")],
),
},
],
"valid": [
{"identification": "john", "password": "<PASSWORD>"},
{"identification": "<EMAIL>", "password": "<PASSWORD>"},
],
}
LOGIN_SERIALIZER = {
"invalid": [
# No identification.
{
"data": {"identification": "", "password": "<PASSWORD>"},
"error": ("identification", [_("This field may not be blank.")]),
},
# No password.
{
"data": {"identification": "john", "password": ""},
"error": ("password", [_("This field may not be blank.")]),
},
# Wrong user.
{
"data": {"identification": "johnn", "password": "<PASSWORD>"},
"error": (
"non_field_errors",
[_("Please check your identification and password.")],
),
},
# Wrong password.
{
"data": {"identification": "john", "password": "<PASSWORD>"},
"error": (
"non_field_errors",
[_("Please check your identification and password.")],
),
},
],
"valid": [
{"identification": "john", "password": "<PASSWORD>"},
{"identification": "<EMAIL>", "password": "<PASSWORD>"},
],
}
REGISTER_FORM = {
"invalid": [
# Non-alphanumeric username.
{
"data": {
"username": "foo@bar",
"email": "<EMAIL>",
"password": "<PASSWORD>",
"password2": "<PASSWORD>",
"tos": "on",
},
"error": (
"username",
[
_(
"Username must contain only letters, "
"numbers and underscores."
)
],
),
},
# Passwords are not same.
{
"data": {
"username": "foo2bar",
"email": "<EMAIL>",
"password1": "<PASSWORD>",
"password2": "<PASSWORD>",
"tos": "on",
},
"error": (
"password2",
[_("The two password fields didn’t match.")],
),
},
# Already taken username.
{
"data": {
"username": "john",
"email": "<EMAIL>",
"password1": "<PASSWORD>",
"password2": "<PASSWORD>",
"tos": "on",
},
"error": (
"username",
[_("A user with that username already exists.")],
),
},
# Already taken email.
{
"data": {
"username": "johndoe",
"email": "<EMAIL>",
"password": "<PASSWORD>",
"password2": "<PASSWORD>",
"tos": "on",
},
"error": (
"email",
[
_(
"This email address is already in use. "
"Please supply a different email."
)
],
),
},
# Forbidden username.
{
"data": {
"username": "test",
"email": "<EMAIL>",
"password": "<PASSWORD>",
"password2": "<PASSWORD>",
"tos": "on",
},
"error": ("username", [_("This username is not allowed.")]),
},
],
"valid": [
{
"username": "alice",
"email": "<EMAIL>",
"password1": "<PASSWORD>",
"password2": "<PASSWORD>",
"tos": "on",
}
],
}
REGISTER_SERIALIZER = {
"invalid": [
# Non-alphanumeric username.
{
"data": {
"username": "foo@bar",
"email": "<EMAIL>",
"password": "<PASSWORD>",
"password2": "<PASSWORD>",
"tos": "on",
},
"error": (
"username",
[
_(
"Username must contain only letters, "
"numbers and underscores."
)
],
),
},
# Passwords are not same.
{
"data": {
"username": "foo2bar",
"email": "<EMAIL>",
"password1": "<PASSWORD>",
"password2": "<PASSWORD>",
"tos": "on",
},
"error": (
"password1",
[_("The two password fields didn’t match.")],
),
},
# Already taken username.
{
"data": {
"username": "john",
"email": "<EMAIL>",
"password1": "<PASSWORD>",
"password2": "<PASSWORD>",
"tos": "on",
},
"error": (
"username",
[_("A user with that username already exists.")],
),
},
# Already taken email.
{
"data": {
"username": "johndoe",
"email": "<EMAIL>",
"password": "<PASSWORD>",
"password2": "<PASSWORD>",
"tos": "on",
},
"error": (
"email",
[
_(
"This email address is already in use. "
"Please supply a different email."
)
],
),
},
# Forbidden username.
{
"data": {
"username": "test",
"email": "<EMAIL>",
"password": "<PASSWORD>",
"password2": "<PASSWORD>",
"tos": "on",
},
"error": ("username", [_("This username is not allowed.")]),
},
],
"valid": [
{
"username": "foobar",
"email": "<EMAIL>",
"password1": "<PASSWORD>",
"password2": "<PASSWORD>",
"tos": "on",
}
],
}
PROFILE_UPDATE_FORM = {
"invalid": [
# Invalid name.
{
"data": {
"first_name": "",
"last_name": "",
"gender": "M",
"birth_date": "1970-01-01",
},
"error": ("first_name", [_("This field is required.")]),
},
# Invalid gender.
{
"data": {
"first_name": "John",
"last_name": "Smith",
"gender": "",
"birth_date": "1970-01-01",
},
"error": ("gender", [_("This field is required.")]),
},
# Invalid birth date.
{
"data": {
"first_name": "John",
"last_name": "Smith",
"gender": "M",
"birth_date": "",
},
"error": ("birth_date", [_("This field is required.")]),
},
],
"valid": [
{
"first_name": "John",
"last_name": "Smith",
"gender": "M",
"birth_date": "1970-01-01",
},
{
"first_name": "Jane",
"last_name": "Smith",
"gender": "F",
"birth_date": "1970-01-01",
},
],
}
PROFILE_UPDATE_SERIALIZER = {
"invalid": [
# Invalid name.
{
"data": {
"first_name": "",
"last_name": "",
"gender": "M",
"birth_date": "01/01/1970",
},
"error": ("first_name", [_("This field may not be blank.")]),
},
# Invalid gender.
{
"data": {
"first_name": "John",
"last_name": "Smith",
"gender": "",
"birth_date": "01/01/1970",
},
"error": ("gender", [_('"" is not a valid choice.')]),
},
],
"valid": [
{
"first_name": "John",
"last_name": "Smith",
"gender": "M",
"birth_date": "01/01/1970",
},
{
"first_name": "Jane",
"last_name": "Smith",
"gender": "F",
"birth_date": "01/01/1970",
},
],
}
EMAIL_CHANGE_FORM = {
"invalid": [
# No change in e-mail address.
{
"data": {"email": "<EMAIL>"},
"error": (
"email",
[_("You're already known under this email address.")],
),
},
# An e-mail address used by another user.
{
"data": {"email": "<EMAIL>"},
"error": (
"email",
[
_(
"This email address is already in use. "
"Please supply a different email."
)
],
),
},
],
"valid": [{"email": "<EMAIL>"}],
}
EMAIL_CHANGE_SERIALIZER = {
"invalid": [
# No change in e-mail address.
{
"data": {"email": "<EMAIL>"},
"error": (
"email",
[_("You're already known under this email address.")],
),
},
# An e-mail address used by another.
{
"data": {"email": "<EMAIL>"},
"error": (
"email",
[
_(
"This email address is already in use. "
"Please supply a different email."
)
],
),
},
],
"valid": [{"email": "<EMAIL>"}],
}
REGION_UPDATE_FORM = {
"invalid": [
# Invalid timezone.
{
"data": {"timezone": "test1", "locale": "tr"},
"error": (
"timezone",
[
_(
"Select a valid choice. "
"test1 is not one of the available choices."
)
],
),
},
# Invalid locale.
{
"data": {"timezone": "Europe/Istanbul", "locale": "test2"},
"error": (
"locale",
[
_(
"Select a valid choice. "
"test2 is not one of the available choices."
)
],
),
},
],
"valid": [
{"timezone": "Europe/Istanbul", "locale": "tr"},
{"timezone": "UTC", "locale": "en"},
],
}
REGION_UPDATE_SERIALIZER = {
"invalid": [
# Invalid timezone.
{
"data": {"timezone": "test1", "locale": "tr"},
"error": ("timezone", [_('"test1" is not a valid choice.')]),
},
# Invalid locale.
{
"data": {"timezone": "Europe/Istanbul", "locale": "test2"},
"error": ("locale", [_('"test2" is not a valid choice.')]),
},
],
"valid": [
{"timezone": "Europe/Istanbul", "locale": "tr"},
{"timezone": "UTC", "locale": "en"},
],
}
PASSWORD_RESET_SERIALIZER = {
"invalid": [
# No email.
{
"data": {"email": ""},
"error": ("email", [_("This field may not be blank.")]),
},
# Invalid email.
{
"data": {"email": "test.com"},
"error": ("email", [_("Enter a valid email address.")]),
},
],
"valid": [{"email": "<EMAIL>"}],
}
PASSWORD_CHANGE_FORM = {
"invalid": [
# Wrong old password.
{
"data": {
"old_password": "<PASSWORD>",
"new_password1": "<PASSWORD>",
"new_password2": "<PASSWORD>",
},
"error": (
"old_password",
[
_(
"Your old password was entered incorrectly. "
"Please enter it again."
)
],
),
},
# Invalid email.
{
"data": {
"old_password": "<PASSWORD>",
"new_password1": "<PASSWORD>",
"new_password2": "<PASSWORD>",
},
"error": (
"new_password2",
[_("The two password fields didn’t match.")],
),
},
],
"valid": [
{
"old_password": "<PASSWORD>",
"new_password1": "<PASSWORD>",
"new_password2": "<PASSWORD>",
}
],
}
PASSWORD_CHANGE_SERIALIZER = {
"invalid": [
# Wrong old password.
{
"data": {
"old_password": "<PASSWORD>",
"new_password1": "<PASSWORD>",
"new_password2": "<PASSWORD>",
},
"error": (
"old_password",
[
_(
"Your old password was entered incorrectly. "
"Please enter it again."
)
],
),
},
# Invalid email.
{
"data": {
"old_password": "<PASSWORD>",
"new_password1": "<PASSWORD>",
"new_password2": "<PASSWORD>",
},
"error": (
"new_password2",
[_("The two password fields didn’t match.")],
),
},
],
"valid": [
{
"old_password": "<PASSWORD>",
"new_password1": "<PASSWORD>",
"new_password2": "<PASSWORD>",
}
],
}
def picture_upload_form(self):
return {
"invalid": [
{
"data": {"picture": None},
"error": ("picture", [_("This field is required.")]),
},
{
"data": {"picture": object},
"error": (
"picture",
[
_(
"No file was submitted. "
"Check the encoding type on the form."
)
],
),
},
],
"invalid_file_size": {
"data": {"picture": self.raw_image_file},
"error": ("picture", [_("Image size is too big.")]),
},
"invalid_file_type": {
"data": {
"picture": self.get_raw_file(
self.create_image(".tiff", "TIFF")
)
},
"error": (
"picture",
[_("%s only." % defaults.MANIFEST_PICTURE_FORMATS)],
),
},
"invalid_file_extension": {
"data": {
"picture": self.get_raw_file(self.create_image(".svg", "TIFF"))
},
"error": ("picture", [_("File extension “svg” is not allowed.")]),
},
"valid": [{"picture": self.raw_image_file}],
}
def picture_upload_serializer(self):
return {
"invalid": [
{
"data": {"picture": None},
"error": ("picture", [_("This field may not be null.")]),
},
{
"data": {"picture": object},
"error": (
"picture",
[
_(
"The submitted data was not a file. "
"Check the encoding type on the form."
)
],
),
},
],
"invalid_file_size": {
"data": {"picture": self.image_file},
"error": (
"picture",
[
_(
"The submitted data was not a file. "
"Check the encoding type on the form."
)
],
),
},
"invalid_file_type": {
"data": {
"picture": self.get_raw_file(
self.create_image(".tiff", "TIFF")
)
},
"error": (
"picture",
[_("%s only." % defaults.MANIFEST_PICTURE_FORMATS)],
),
},
"invalid_file_extension": {
"data": {
"picture": self.get_raw_file(self.create_image(".svg", "TIFF"))
},
"error": ("picture", [_("File extension “svg” is not allowed.")]),
},
"valid": [{"picture": self.raw_image_file}],
}
|
StarcoderdataPython
|
3310900
|
<reponame>kdmoreira/python_solutions<gh_stars>0
def sync(schedule_list):
"""This function returns the union of a set of schedules."""
sync_schedule = set()
for schedule in schedule_list:
sync_schedule = sync_schedule | schedule
return sync_schedule
schedules = [{'1234', '2345', '3456'}, {'4567', '5678', '6789'}, {'7890', '890'}]
union = sync(schedules)
print(union)
|
StarcoderdataPython
|
1618166
|
#! /usr/bin/env python
# -*- coding: utf-8 -*-
"""
Module that contains Plot Twist proxy mesh validator implementation
"""
from __future__ import print_function, division, absolute_import
__author__ = "<NAME>"
__license__ = "MIT"
__maintainer__ = "<NAME>"
__email__ = "<EMAIL>"
import pyblish.api
import tpDcc
import artellapipe
class SelectVerticesWithoutVertexColors(pyblish.api.Action):
label = 'Select Vertices without Vertex Colors'
on = 'failed'
def process(self, context, plugin):
assert tpDcc.is_maya(), 'Select Vertices without Vertex Color Action is only available in Maya!'
vertices_without_vertex_colors = context.data.get('vertices_without_vertex_colors', None)
assert vertices_without_vertex_colors, 'No vertices without vertex colors to select'
vertices_to_select = list()
for shape_node, vertices_ids in vertices_without_vertex_colors.items():
for vertex_id in vertices_ids:
vertices_to_select.append('{}.vtx[{}]'.format(shape_node, vertex_id))
assert vertices_to_select, 'No vertices to select'
tpDcc.Dcc.select_object(vertices_to_select)
class ValidatePlotTwistProxyMesh(pyblish.api.ContextPlugin):
"""
Checks if modeling file has a valid proxy mesh
"""
label = 'General - Check Proxy'
order = pyblish.api.ValidatorOrder
hosts = ['maya']
families = ['proxy']
optional = False
actions = [SelectVerticesWithoutVertexColors]
def process(self, context):
assert tpDcc.is_maya(), 'Validate Proxy Mesh is only available in Maya!'
from tpDcc.dccs.maya.core import node, api
root_group_name = artellapipe.NamesMgr().solve_name('root_group')
proxy_group_name = artellapipe.NamesMgr().solve_name('proxy_group')
geo_group_name = artellapipe.NamesMgr().solve_name('geo_group')
proxy_geo = artellapipe.NamesMgr().solve_name('proxy_geo')
proxy_geo_parent = '{}|{}|{}'.format(root_group_name, proxy_group_name, geo_group_name)
assert proxy_geo and tpDcc.Dcc.object_exists(
proxy_geo), 'Proxy geo "{}" does not exist in current scene!'.format(proxy_geo)
assert proxy_geo_parent and tpDcc.Dcc.object_exists(
proxy_geo_parent), 'Proxy geo parent "{}" does not exists in current scene!'.format(proxy_geo_parent)
proxy_prefix = proxy_geo.split('_')[0]
proxy_geos = tpDcc.Dcc.list_nodes('{}_*'.format(proxy_prefix), node_type='transform') or list()
assert len(proxy_geos) == 1, 'Invalid number ({}) of proxy geometries found in current scene: {}'.format(
len(proxy_geos), proxy_geos)
proxy_geo = proxy_geos[0]
proxy_geo_shapes = tpDcc.Dcc.list_shapes(proxy_geo)
assert proxy_geo_shapes, 'No sahpes found in proxy geo geometry!'
# We check that all vertex colors have
vertices_without_vertex_colors = dict()
for proxy_shape in proxy_geo_shapes:
proxy_shape_node = node.get_mobject(proxy_shape)
proxy_shape_vtx_it = api.IterateVertices(proxy_shape_node)
proxy_shape_vertex_colors = proxy_shape_vtx_it.get_vertex_colors(skip_vertices_without_vertex_colors=False)
for vtx_id, vtx_color in proxy_shape_vertex_colors.items():
if vtx_color:
continue
if proxy_shape not in vertices_without_vertex_colors:
vertices_without_vertex_colors[proxy_shape] = list()
vertices_without_vertex_colors[proxy_shape].append(vtx_id)
if vertices_without_vertex_colors:
context.data['vertices_without_vertex_colors'] = vertices_without_vertex_colors
assert not vertices_without_vertex_colors, 'Some vertices of the proxy shapes have no vertex color ' \
'applied to them: {}!'.format(vertices_without_vertex_colors)
|
StarcoderdataPython
|
1771916
|
import os
from typing import Dict, List, Tuple
import numpy as np
import tensorflow as tf
from model.conll_dataset import CoNLLDataset
from .data_utils import get_chunks, pad_words, pad_chars
from .general_utils import Progbar
class NERModel:
"""Specialized class of Model for NER"""
def __init__(self, config):
self.config = config
self.logger = config.logger
"""Define placeholders = entries to computational graph"""
batch_size, sentence_length, word_length = None, None, None
self.word_ids = tf.placeholder(tf.int32, shape=[batch_size, sentence_length], name="word_ids")
self.sequence_lengths = tf.placeholder(tf.int32, shape=[batch_size], name="sequence_lengths")
self.char_ids = tf.placeholder(tf.int32, shape=[batch_size, sentence_length, word_length], name="char_ids")
self.word_lengths = tf.placeholder(tf.int32, shape=[batch_size, sentence_length], name="word_lengths")
self.labels = tf.placeholder(tf.int32, shape=[batch_size, sentence_length], name="labels")
# hyper parameters
self.dropout = tf.placeholder(dtype=tf.float32, shape=[], name="dropout")
self.lr = tf.placeholder(dtype=tf.float32, shape=[], name="lr")
"""
Defines self.word_embeddings
If self.config.embeddings is not None and is a np array initialized
with pre-trained word vectors, the word embeddings is just a look-up
and we don't train the vectors. Otherwise, a random matrix with
the correct shape is initialized.
"""
with tf.variable_scope("words"):
if self.config.embeddings is None:
self.logger.info("WARNING: randomly initializing word vectors")
_word_embeddings = tf.get_variable(
name="_word_embeddings",
dtype=tf.float32,
shape=[len(self.config.vocab_words), self.config.dim_word])
else:
_word_embeddings = tf.Variable(
initial_value=self.config.embeddings,
trainable=self.config.train_embeddings,
name="_word_embeddings",
dtype=tf.float32)
word_embeddings = tf.nn.embedding_lookup(params=_word_embeddings, ids=self.word_ids,
name="word_embeddings")
with tf.variable_scope("chars"):
if self.config.use_chars:
# get char embeddings matrix
_char_embeddings = tf.get_variable(
name="_char_embeddings",
dtype=tf.float32,
shape=[len(self.config.vocab_chars), self.config.dim_char])
char_embeddings = tf.nn.embedding_lookup(params=_char_embeddings, ids=self.char_ids,
name="char_embeddings")
# put the time dimension on axis=1
# bi lstm on chars
batch_size, sentence_length, word_length, char_dim = shapes(char_embeddings)
_outputs, _output_states = tf.nn.bidirectional_dynamic_rnn(
cell_fw=tf.contrib.rnn.LSTMCell(num_units=self.config.hidden_size_char, state_is_tuple=True),
cell_bw=tf.contrib.rnn.LSTMCell(num_units=self.config.hidden_size_char, state_is_tuple=True),
inputs=tf.reshape(char_embeddings, shape=[batch_size * sentence_length, word_length, self.config.dim_char]),
sequence_length=tf.reshape(self.word_lengths, shape=[batch_size * sentence_length]),
dtype=tf.float32)
# read and concat output
_output_state_fw, _output_state_bw = _output_states
_, output_fw = _output_state_fw
_, output_bw = _output_state_bw
# shape = (batch size, max sentence length, char hidden size)
output = tf.reshape(tensor=tf.concat([output_fw, output_bw], axis=-1),
shape=[batch_size, sentence_length, 2 * self.config.hidden_size_char])
word_embeddings = tf.concat([word_embeddings, output], axis=-1)
word_embeddings = tf.nn.dropout(word_embeddings, self.dropout)
"""
Defines self.logits
For each word in each sentence of the batch, it corresponds to a vector
of scores, of dimension equal to the number of tags.
"""
with tf.variable_scope("bi-lstm"):
(output_fw, output_bw), _ = tf.nn.bidirectional_dynamic_rnn(
cell_fw=tf.contrib.rnn.LSTMCell(self.config.hidden_size_lstm),
cell_bw=tf.contrib.rnn.LSTMCell(self.config.hidden_size_lstm),
inputs=word_embeddings,
sequence_length=self.sequence_lengths,
dtype=tf.float32)
output = tf.nn.dropout(x=tf.concat([output_fw, output_bw], axis=-1), keep_prob=self.dropout)
with tf.variable_scope("proj"):
pred = tf.matmul(a=tf.reshape(output, [-1, 2 * self.config.hidden_size_lstm]),
b=tf.get_variable("W", dtype=tf.float32, shape=[2 * self.config.hidden_size_lstm,
len(self.config.vocab_tags)])) \
+ tf.get_variable("b", shape=[len(self.config.vocab_tags)], dtype=tf.float32,
initializer=tf.zeros_initializer())
nsteps = tf.shape(output)[1]
self.logits = tf.reshape(pred, [-1, nsteps, len(self.config.vocab_tags)])
"""
Defines self.labels_pred
This op is defined only in the case where we don't use a CRF since in
that case we can make the prediction "in the graph" (thanks to tf
functions in other words). With theCRF, as the inference is coded
in python and not in pure tensroflow, we have to make the prediciton
outside the graph.
"""
if not self.config.use_crf:
self.labels_pred = tf.cast(tf.argmax(self.logits, axis=-1), tf.int32)
"""Defines the loss"""
if self.config.use_crf:
log_likelihood, self.trans_params = tf.contrib.crf.crf_log_likelihood(self.logits, self.labels, self.sequence_lengths)
self.loss = tf.reduce_mean(-log_likelihood)
else:
losses = tf.nn.sparse_softmax_cross_entropy_with_logits(logits=self.logits, labels=self.labels)
self.loss = tf.reduce_mean(tf.boolean_mask(tensor=losses, mask=tf.sequence_mask(self.sequence_lengths)))
# for tensorboard
tf.summary.scalar("loss", self.loss)
"""Defines self.train_op that performs an update on a batch"""
with tf.variable_scope("train_step"):
_lr_m = self.config.lr_method.lower() # lower to make sure
if _lr_m == 'adam': # sgd method
optimizer = tf.train.AdamOptimizer(self.lr)
elif _lr_m == 'adagrad':
optimizer = tf.train.AdagradOptimizer(self.lr)
elif _lr_m == 'sgd':
optimizer = tf.train.GradientDescentOptimizer(self.lr)
elif _lr_m == 'rmsprop':
optimizer = tf.train.RMSPropOptimizer(self.lr)
else:
raise NotImplementedError("Unknown method {}".format(_lr_m))
if self.config.clip > 0: # gradient clipping if clip is positive
grads, vs = zip(*optimizer.compute_gradients(self.loss))
grads, gnorm = tf.clip_by_global_norm(grads, self.config.clip)
self.train_op = optimizer.apply_gradients(zip(grads, vs))
else:
self.train_op = optimizer.minimize(self.loss)
"""Defines self.sess and initialize the variables"""
self.logger.info("Initializing tf session")
self.sess = tf.Session()
self.sess.run(tf.global_variables_initializer())
self.saver = tf.train.Saver()
def train(self, train: CoNLLDataset, dev: CoNLLDataset) -> None:
"""Performs training with early stopping and lr exponential decay
Args:
train: dataset that yields tuple of (sentences, tags)
dev: dataset
"""
best_score = 0
nepoch_no_imprv = 0 # for early stopping
self.add_summary() # tensorboard
for epoch in range(self.config.nepochs):
self.logger.info("Epoch {:} out of {:}".format(epoch + 1, self.config.nepochs))
score = self.run_epoch(train, dev, epoch)
self.config.lr *= self.config.lr_decay # decay learning rate
# early stopping and saving best parameters
if score >= best_score:
nepoch_no_imprv = 0
best_score = score
self.save_session()
self.logger.info("- new best score!")
else:
nepoch_no_imprv += 1
if nepoch_no_imprv >= self.config.nepoch_no_imprv:
self.logger.info("- early stopping {} epochs without improvement".format(nepoch_no_imprv))
break
def run_epoch(self, train: CoNLLDataset, dev: CoNLLDataset, epoch: int) -> int:
"""Performs one complete pass over the train set and evaluate on dev
Args:
train: dataset that yields tuple of sentences, tags
dev: dataset
epoch: (int) index of the current epoch
Returns:
f1: (python float), score to select model on, higher is better
"""
# progbar stuff for logging
batch_size = self.config.batch_size
nbatches = (len(train) + batch_size - 1) // batch_size
prog = Progbar(target=nbatches)
# iterate over dataset
for i, (words, labels) in enumerate(train.get_minibatches(batch_size)):
fd, _ = self.get_feed_dict(words, labels, self.config.lr, self.config.dropout)
_, train_loss, summary = self.sess.run([self.train_op, self.loss, self.merged], feed_dict=fd)
prog.update(i + 1, [("train loss", train_loss)])
# tensorboard
if i % 10 == 0:
self.file_writer.add_summary(summary, epoch * nbatches + i)
metrics = self.run_evaluate(dev)
msg = " - ".join(["{} {:04.2f}".format(k, v) for k, v in metrics.items()])
self.logger.info(msg)
return metrics["f1"]
def evaluate(self, test: CoNLLDataset) -> None:
"""Evaluate model on test set
Args:
test: instance of class Dataset
"""
self.logger.info("Testing model over test set")
metrics = self.run_evaluate(test)
self.logger.info(msg=" - ".join(["{} {:04.2f}".format(k, v) for k, v in metrics.items()]))
def run_evaluate(self, test: CoNLLDataset) -> Dict[str, int]:
"""Evaluates performance on test set
Args:
test: dataset that yields tuple of (sentences, tags)
Returns:
metrics: (dict) metrics["acc"] = 98.4, ...
"""
accs = []
correct_preds, total_correct, total_preds = 0., 0., 0.
for words, labels in test.get_minibatches(self.config.batch_size):
labels_pred, sequence_lengths = self.predict_batch(words)
for lab, lab_pred, length in zip(labels, labels_pred, sequence_lengths):
lab = lab[:length]
lab_pred = lab_pred[:length]
accs += [a == b for (a, b) in zip(lab, lab_pred)]
lab_chunks = set(get_chunks(lab, self.config.vocab_tags))
lab_pred_chunks = set(get_chunks(lab_pred, self.config.vocab_tags))
correct_preds += len(lab_chunks & lab_pred_chunks)
total_preds += len(lab_pred_chunks)
total_correct += len(lab_chunks)
p = correct_preds / total_preds if correct_preds > 0 else 0
r = correct_preds / total_correct if correct_preds > 0 else 0
f1 = 2 * p * r / (p + r) if correct_preds > 0 else 0
acc = np.mean(accs)
return {"acc": 100 * acc, "f1": 100 * f1}
def predict(self, words_raw: List[str]) -> List[str]:
"""Returns list of tags
Args:
words_raw: list of words (string), just one sentence (no batch)
Returns:
preds: list of tags (string), one for each word in the sentence
"""
words = [self.config.processing_word(w) for w in words_raw]
if type(words[0]) == tuple:
words = zip(*words)
idx_to_tag = {idx: tag for tag, idx in self.config.vocab_tags.items()}
pred_ids, _ = self.predict_batch([words])
preds = [idx_to_tag[idx] for idx in list(pred_ids[0])]
return preds
def predict_batch(self, sentences: List[List[int]]) -> Tuple[List[List[int]], int]:
"""
Args:
sentences: list of sentences
Returns:
labels_pred: list of labels for each sentence
sequence_length
"""
fd, sequence_lengths = self.get_feed_dict(sentences, dropout=1.0)
if self.config.use_crf:
# get tag scores and transition params of CRF
viterbi_sequences = []
logits, trans_params = self.sess.run([self.logits, self.trans_params], feed_dict=fd)
# iterate over the sentences because no batching in vitervi_decode
for logit, sequence_length in zip(logits, sequence_lengths):
logit = logit[:sequence_length] # keep only the valid steps
viterbi_seq, viterbi_score = tf.contrib.crf.viterbi_decode(logit, trans_params)
viterbi_sequences += [viterbi_seq]
return viterbi_sequences, sequence_lengths
else:
labels_pred = self.sess.run(self.labels_pred, feed_dict=fd)
return labels_pred, sequence_lengths
def get_feed_dict(self,
sentences: List[List[int]],
labels: List[List[int]] = None,
lr: float = None, dropout: float = None) -> Tuple[Dict, int]:
"""Given some data, pad it and build a feed dictionary
Args:
sentences: list of sentences. A sentence is a list of ids of a list of
words. A word is a list of ids
labels: list of ids
lr: (float) learning rate
dropout: (float) keep prob
Returns:
dict {placeholder: value}
"""
# perform padding of the given data
if self.config.use_chars:
char_ids, word_ids = zip(*sentences)
char_ids, word_lengths = pad_chars(char_ids)
word_ids, sequence_lengths = pad_words(word_ids)
else:
word_ids, sequence_lengths = pad_words(sentences)
# build feed dictionary
feed = {}
feed[self.word_ids] = word_ids
feed[self.sequence_lengths] = sequence_lengths
if self.config.use_chars:
feed[self.char_ids] = char_ids
feed[self.word_lengths] = word_lengths
if labels is not None:
labels, _ = pad_words(labels)
feed[self.labels] = labels
if lr is not None:
feed[self.lr] = lr
if dropout is not None:
feed[self.dropout] = dropout
return feed, sequence_lengths
def reinitialize_weights(self, scope_name):
"""Reinitializes the weights of a given layer"""
variables = tf.contrib.framework.get_variables(scope_name)
init = tf.variables_initializer(variables)
self.sess.run(init)
def restore_session(self, dir_model):
"""Reload weights into session
Args:
sess: tf.Session()
dir_model: dir with weights
"""
self.logger.info("Reloading the latest trained model...")
self.saver.restore(self.sess, dir_model)
def save_session(self):
"""Saves session = weights"""
if not os.path.exists(self.config.dir_model):
os.makedirs(self.config.dir_model)
self.saver.save(self.sess, self.config.dir_model)
def close_session(self):
"""Closes the session"""
self.sess.close()
def add_summary(self):
"""Defines variables for Tensorboard
Args:
dir_output: (string) where the results are written
"""
self.merged = tf.summary.merge_all()
self.file_writer = tf.summary.FileWriter(self.config.dir_output, self.sess.graph)
def shapes(tensor):
shape = tf.shape(tensor)
return [shape[i] for i in range(shape.get_shape()[0])]
|
StarcoderdataPython
|
3390274
|
<gh_stars>0
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# OpenData DWD FX Radar processing tool
# Author: <NAME>
# v0: First test version to read in FX binary data and create first static plot
# v1: Included automatic download of latest image and further improvements
# v2: Splittet program into single functions for better code structure
# --> not yet finished, has to be done
# Load necessary python modules
import numpy as np
from datetime import datetime
from sys import argv
# Define user inputs
temp_folder = '/work/bm0834/u233139/DWDRadar/temp/' # Static link for debug options
#infile_fx = '/work/bm0834/u233139/DWDRadar/FXLATEST_000_MF002' # Static link for debug options
#infile_fx = temp_folder+"FXLATEST_000_MF002" # Latest image update option
infile_fx = sys.argv[1] # Option for argument input
plot_path = sys.argv[2] # Option for argument input
#plot_path = '/work/bm0834/u233139/DWDRadar/plots/' # Static link for debug options
# Define DWD FX Radar download function
def download_dwd_radar(latest):
"This function downloads latest DWD radar file."
## Download latest radar image to temporary directory
# Check if temporary folder exists otherwise create one
import os
import urllib
if not os.path.exists(temp_folder):
os.makedirs(temp_folder)
# Download file
if latest == 1:
latest_fx_file = urllib.URLopener()
latest_fx_file.retrieve("https://opendata.dwd.de/weather/radar/composit/fx/FXLATEST_000_MF002", temp_folder+"FXLATEST_000_MF002")
else:
print "Error, no other function implemented yet."
return 1
# If everything runs without errors, return 0
return 0
# Define DWD FX Radar processing function
def process_dwd_radar(infile_fx):
"This function process DWD radar file."
### Read and process DWD FX data ###
## Documentation about DWD OpenData FX format is found at:
## https://www.dwd.de/DE/leistungen/radarprodukte/formatbeschreibung_fxdaten.pdf
### Read header information and binary data
with open(infile_fx, 'r') as f:
f.seek(0, 0) # Set reading cursor to start of file
file_data = f.read() # Read all lines and store to object
h_line = file_data.split('\x03', 1)[0] # Read header information till first ETX
fx_bin_data = file_data.split('\x03', 1)[1] # Binary data is stored after header ETX
# Process header information and save variables
prod_type = h_line[0:2] # Product type (FX)
meas_time = h_line[2:8] # Measurement time UTC (DDhhmm)
meas_date = h_line[13:17] # Measurement date (MMYY)
byte_len = np.int(h_line[19:26]) # Product length in bytes
intv_min = np.int(h_line[51:55]) # Intervall length in minutes
dim_x = np.int(h_line[57:61]) # Number of Pixels (x-direction)
dim_y = np.int(h_line[62:66]) # Number of Pixels (y-direction)
# Convert header time information to python time object
datetime_meas = datetime.strptime(meas_date+'-'+meas_time, '%m%y-%d%H%M')
# Read binary data from buffer variable and clean buffer
fx_data = np.frombuffer(fx_bin_data, dtype='<i2').copy()
del file_data
# Set missing values according to file format specs.
fx_data[fx_data > 4095] = 0
# Data handling for optimized postprocessing
fx_data = np.asfarray(fx_data) # Convert data to float
fx_data = fx_data.reshape(dim_x, dim_y) # Reshape vector to array
# Convert pixel to dBz values
fx_data = fx_data/10.0
fx_data = (fx_data/2.0) - 32.5
# Set all values below to NaN, non physical values
fx_data[fx_data < 0.0] = np.nan
# If everything runs without errors, return dictonary with variables
return {'dim_x': dim_x, 'dim_y': dim_y, 'datetime_meas': datetime_meas, 'fx_data': fx_data}
# Define DWD FX Radar radar grid function
def create_radar_grid(dim_x, dim_y):
"This function downloads latest DWD radar file."
### Geolocation of pixels
# Creating array for positions
grid_pos = np.empty((dim_x, dim_y,))
grid_pos[:] = np.nan
# Reference for Coordinates Calculation see:
# https://www.dwd.de/DE/leistungen/radolan/radolan_info/radolan_radvor_op_komposit_format_pdf.pdf?__blob=publicationFile&v=7
# page 13 and following
# Set constants
earthRadius = 6370.04 # km
junctionNorth = 60.0 # N
junctionEast = 10.0 # E
# Equidistant cartesian coordinates
dx_dist = 1.0 # km
dy_dist = 1.0 # km
# Coordinates of corners of cartesian grid
lowerleft_x = -523.4622
lowerleft_y = -4658.645
lowerright_x = 376.5378
lowerright_y = -4658.645
upperright_x = 376.5378
upperright_y = -3758.645
upperleft_x = -523.4622
upperleft_y = -3758.645
# Create x and y equidistant vectors
x_cord_cart = np.arange(lowerleft_x,lowerright_x,dx_dist)
y_cord_cart = np.arange(lowerleft_y,upperleft_y,dy_dist)
# Convert cartesian coordinates to lat lon coordinates
x_vec_cord = np.rad2deg( np.arctan(-x_cord_cart[0:900]/y_cord_cart[0]) + np.deg2rad( junctionEast ) ) # see p.13, formula 1.4a
y_vec_cord = np.rad2deg( np.arcsin(( \
((earthRadius**2) * ((1+np.sin(np.deg2rad(junctionNorth)))**2) - ( (x_cord_cart[0:900]**2) + (y_cord_cart[0:900]**2))) / \
((earthRadius**2) * ((1+np.sin(np.deg2rad(junctionNorth)))**2) + ( (x_cord_cart[0:900]**2) + (y_cord_cart[0:900]**2))) )) )
# Create meshgrid to have coordinates matrix
xm_ll, ym_ll = np.meshgrid(x_vec_cord, y_vec_cord)
# If everything runs without errors, return dictonary with grid variables
return {'xm_ll': xm_ll, 'ym_ll': ym_ll}
# Define DWD FX Radar plot function function
def plot_dwd_radar(xm_ll, ym_ll, fx_data, datetime_meas, plot_path, temp_folder):
"This function gets DWD radar and grid information and creates a plot."
# Import map data for plotting
### Plot DWD Open Radardata on a map
import matplotlib.pyplot as plt
from mpl_toolkits.basemap import Basemap
# Create formatted time string
time_string = datetime.strftime(datetime_meas, '%d/%m/%Y %H:%M UTC')
time_file = datetime.strftime(datetime_meas, '%d%m%Y_%H%M')
# Create new figure
fig = plt.figure(num=None, facecolor='w', edgecolor='k')
plt.contourf(xm_ll, ym_ll, fx_data, interpolation='none')
plt.title("Rain at: "+time_string, fontweight="bold", size=18)
map = Basemap(llcrnrlon=3.5,llcrnrlat=46.5,urcrnrlon=15.5,urcrnrlat=55.5,
resolution='i')
map.drawcoastlines()
map.drawcountries()
lat_ticks = [55.0,53.0,51.0,49.0,47.0]
map.drawparallels(lat_ticks, labels=[1,0,0,0], linewidth=0.0)
lon_ticks = [4.0,6.0,8.0,10.0,12.0,14.0]
map.drawmeridians(lon_ticks, labels=[0,0,0,1], linewidth=0.0)
# Insert colorbar
cbar = plt.colorbar()
cbar.set_label('Radar reflectivity (dBz)')
# Save plot to file
plt.savefig(plot_path + 'DWD_OpenData_FX_5min_precip_'+ time_file + '.png', dpi=100, format='png')
plt.close()
#plt.show()
# Clean temporary directory
import os
os.remove(temp_folder+"FXLATEST_000_MF002")
# If everything runs without errors, return 0
return 0
# Main control function
def main():
# Define parameters
print "Starting DWD OpenData Radar FX plot generation..."
# Download latest Radar file
radar_download = download_dwd_radar(1) # 1 = latest radar image
# Process Radar file
radar_process = process_dwd_radar(infile_fx)
# Generate DWD RADOLAN grid
#print(radar_process["dim_x"], radar_process["dim_y"]) # Debug option for grid size
radar_grid = create_radar_grid(radar_process["dim_x"], radar_process["dim_y"])
# Generate latest DWD FX Radar plot
radar_plot = plot_dwd_radar(radar_grid["xm_ll"], radar_grid["ym_ll"], radar_process["fx_data"], \
radar_process["datetime_meas"], plot_path, temp_folder)
# End of main
# Call main routine
main()
print "Finished processing DWD OpenData for radar."
|
StarcoderdataPython
|
4837181
|
<filename>wavespectra/plot.py
import numpy as np
import xarray as xr
from wavespectra.core.attributes import attrs
LOG_FACTOR = 1e3
RADII_FREQ_TICKS_LOG = np.array([0.05, 0.1, 0.2, 0.3, 0.4])
RADII_FREQ_TICKS_LIN = np.arange(0.1, 1.1, 0.1)
RADII_PER_TICKS_LOG = (np.array([20, 10, 5, 3, 2]))
RADII_PER_TICKS_LIN = np.arange(5, 30, 5)
CBAR_TICKS = [1e-2, 1e-1, 1e0]
LOG_CONTOUR_LEVELS = np.logspace(np.log10(0.005), np.log10(1), 14)
SUPPORTED_KIND = ["pcolormesh", "contourf", "contour"]
class WavePlot:
"""Plot spectra in polar axis.
Args:
- darr (DataArray): Wavespectra DataArray.
- kind (str): Plot kind, one of (`contourf`, `contour`, `pcolormesh`).
- normalised (bool): Show efth normalised between 0 and 1.
- logradius (bool): Set log radii.
- as_period (bool): Set radii as wave period instead of frequency.
- rmin (float): Minimum value to clip the radius axis.
- rmax (float): Maximum value to clip the radius axis.
- show_theta_labels (bool): Show direction tick labels.
- show_radii_labels (bool): Show radii tick labels.
- radii_ticks (array): Tick values for radii.
- radii_labels_angle (float): Polar angle at which radii labels are positioned.
- radii_labels_size (float): Fontsize for radii labels.
- cbar_ticks (array): Tick values for colorbar.
- cmap (str, obj): Colormap to use.
- efth_min (float): Clip energy density below this value.
- clean_axis (bool): Remove radii and theta axis lines for clean view.
- kwargs: All extra kwargs are passed to the plotting method defined by `kind`.
Returns:
- pobj: The xarray object returned by calling `da.plot.{kind}(**kwargs)`.
"""
def __init__(
self,
darr,
kind="contourf",
normalised=True,
logradius=True,
as_period=False,
rmin=None,
rmax=None,
show_theta_labels=True,
show_radii_labels=True,
radii_ticks=None,
radii_labels_angle=22.5,
radii_labels_size=8,
cbar_ticks=None,
cmap="RdBu_r",
extend="neither",
efth_min=1e-3,
clean_axis=False,
**kwargs,
):
self.kind = kind
self.normalised = normalised
self.logradius = logradius
self.as_period = as_period
self.show_theta_labels = show_theta_labels
self.show_radii_labels = show_radii_labels
self.radii_labels_angle = radii_labels_angle
self.radii_labels_size = radii_labels_size
self.cmap = cmap
self.extend = extend
self.efth_min = efth_min
self.clean_axis = clean_axis
# Attributes set based on other attributes
self._darr = darr
self._rmin = rmin
self._rmax = rmax
self._radii_ticks = radii_ticks
self._cbar_ticks = cbar_ticks
self._kwargs = kwargs
self._validate()
def __repr__(self):
s = f"<Waveplot {self.kind}>"
for attr in ["normalised", "logradius", "as_period"]:
if getattr(self, attr):
s = s.replace(">", f" {attr.split('_')[-1]}>")
return s
def __call__(self):
"""Execute plotting method."""
# kwargs for polar axis
default_subplot_kws = {
"projection": self._kwargs.pop("projection", "polar"),
"theta_direction": self._kwargs.pop("theta_direction", -1),
"theta_offset": self._kwargs.pop("theta_offset", np.deg2rad(90)),
}
subplot_kws = {**default_subplot_kws, **self._kwargs.pop("subplot_kws", {})}
# Call plotting function
pobj = getattr(self.darr.plot, self.kind)(
subplot_kws=subplot_kws, **self.kwargs
)
# Adjusting axes
if isinstance(pobj, xr.plot.facetgrid.FacetGrid):
axes = list(pobj.axes.ravel())
cbar = pobj.cbar
else:
axes = [pobj.axes]
cbar = pobj.colorbar
for ax in axes:
ax.set_rgrids(
radii=self.radii_ticks,
labels=self.radii_ticklabels,
angle=self.radii_labels_angle,
size=self.radii_labels_size,
)
ax.set_rmax(self.rmax)
ax.set_rmin(self.rmin)
# Disable labels as they are drawn on top of ticks
ax.set_xlabel("")
ax.set_ylabel("")
# Disable or not tick labels
if self.show_theta_labels is False:
ax.set_xticklabels("")
if self.show_radii_labels is False:
ax.set_yticklabels("")
# Clean axis
if self.clean_axis:
ax.set_rticks([])
ax.set_xticks([])
# Adjusting colorbar
if cbar is not None:
cbar.set_ticks(self.cbar_ticks)
return pobj
@property
def dname(self):
return attrs.DIRNAME
@property
def fname(self):
return attrs.FREQNAME
@property
def darr(self):
# Define polar coordinates
_darr = self._polar_dir(self._darr)
# Overwrite attributes for labels
_darr = self._set_labels(_darr)
# Normalise
if self.normalised:
_darr = (_darr - _darr.min()) / (_darr.max() - _darr.min())
# Set lowest value for masking
if self.efth_min is not None:
_darr = _darr.where(_darr >= self.efth_min, self.efth_min)
# Convert frequencis to periods
if self.as_period:
_darr = self._to_period(_darr)
# Set log10 radii
if self.logradius:
_darr = self._to_logradius(_darr)
return _darr
@property
def rmin(self):
"""The radius centre."""
if self._rmin is None:
return float(self.darr[self.fname].min())
if self.logradius and self._rmin > 0:
return np.log10(self._rmin * LOG_FACTOR)
else:
return self._rmin
@property
def rmax(self):
"""The radius edge."""
if self._rmax is None:
return float(self.darr[self.fname].max())
if self.logradius and self._rmax > 0:
return np.log10(self._rmax * LOG_FACTOR)
else:
return self._rmax
@property
def cbar_ticks(self):
"""Colorbar ticks."""
if self._cbar_ticks is None and self.normalised and "contour" in self.kind and "levels" not in self._kwargs:
self._cbar_ticks = CBAR_TICKS
return self._cbar_ticks
@property
def radii_ticks(self):
"""Tick locations for the radii axis."""
radii_ticks = self._radii_ticks
# Assign default values
if self._radii_ticks is None:
if self.logradius:
if self.as_period:
radii_ticks = RADII_PER_TICKS_LOG
else:
radii_ticks = RADII_FREQ_TICKS_LOG
else:
if self.as_period:
radii_ticks = RADII_PER_TICKS_LIN
else:
radii_ticks = RADII_FREQ_TICKS_LIN
# Ensure numpy array
radii_ticks = np.array(radii_ticks)
# Taking the log10 if logradius is True
if self.logradius:
radii_ticks = np.log10(radii_ticks * LOG_FACTOR)
# Raise ValueError if radii ticks are not within (rmin, rmax)
if self.rmin >= radii_ticks.max() or self.rmax <= radii_ticks.min():
if self.logradius:
ticks = 10 ** radii_ticks / LOG_FACTOR
rmin = 10 ** self.rmin / LOG_FACTOR
rmax = 10 ** self.rmax / LOG_FACTOR
else:
ticks = radii_ticks
rmin = self.rmin
rmax = self.rmax
raise ValueError(
f"radii_ticks '{ticks}' outside the bounds defined by 'rmin={rmin}', "
f"'rmax={rmax}', perhaps you are trying to define frequency radii "
"ticks on period radii or vice-versa?"
)
# Clipping to radii limits
radii_ticks = np.unique(radii_ticks.clip(self.rmin, self.rmax))
return radii_ticks
@property
def radii_ticklabels(self):
"""Tick labels for the radii axis."""
units = self.darr[self.fname].attrs.get("units", "Hz")
if self.logradius:
ticks = 10 ** self.radii_ticks / 1000
else:
ticks = self.radii_ticks
ticklabels = [f"{v:g}" for v in ticks]
ticklabels[-1] = ticklabels[-1] + units
return ticklabels
@property
def kwargs(self):
_kwargs = {**self._kwargs, **{"x": self.dname, "y": self.fname}}
if "colors" not in self._kwargs:
_kwargs.update({"cmap": self.cmap})
if "contour" in self.kind:
_kwargs.update({"extend": self.extend})
if self.normalised and "contour" in self.kind and "levels" not in _kwargs:
_kwargs.update({"levels": LOG_CONTOUR_LEVELS})
return _kwargs
def _validate(self):
"""Validate input arguments."""
if self.kind not in SUPPORTED_KIND:
raise NotImplementedError(
f"Wavespectra only supports the following kinds: {SUPPORTED_KIND}"
)
def _polar_dir(self, darray):
"""Sort, wrap and convert directions to radians for polar plot."""
# Sort directions
darray = darray.sortby(self.dname)
if self.kind not in ["pcolor", "pcolormesh"]:
# Close circle if not pcolor type (pcolor already closes it)
if darray[self.dname][0] % 360 != darray[self.dname][-1] % 360:
dd = np.diff(darray[self.dname]).mean()
closure = darray.isel(**{self.dname: 0})
closure = closure.assign_coords(
{self.dname: darray[self.dname][-1] + dd}
)
darray = xr.concat((darray, closure), dim=self.dname)
# Convert to radians
darray = darray.assign_coords({self.dname: np.deg2rad(darray[self.dname])})
return darray
def _set_labels(self, darr):
"""Redefine attributes to show in plot labels."""
darr[self.fname].attrs["standard_name"] = "Wave frequency ($Hz$)"
darr[self.dname].attrs["standard_name"] = "Wave direction ($degree$)"
if self.normalised:
darr.attrs["standard_name"] = "Normalised Energy Density"
darr.attrs["units"] = ""
else:
darr.attrs["standard_name"] = "Energy Density"
darr.attrs["units"] = "$m^{2}s/deg$"
return darr
def _to_period(self, darr):
darr = darr.assign_coords({self.fname: 1 / darr[self.fname]})
darr[self.fname].attrs.update(
{"standard_name": "sea_surface_wave_period", "units": "s"}
)
return darr
def _to_logradius(self, darr):
fattrs = darr[self.fname].attrs
dattrs = darr[self.dname].attrs
sattrs = darr.attrs
freqs = np.log10(darr.freq * LOG_FACTOR)
freqs = freqs.where(np.isfinite(freqs), 0)
darr = darr.assign_coords({self.fname: freqs})
darr.attrs = sattrs
darr[self.fname].attrs = fattrs
darr[self.dname].attrs = dattrs
return darr
def polar_plot(*args, **kargs):
"""Plot spectra in polar axis.
Args:
- darr (DataArray): Wavespectra DataArray.
- kind (str): Plot kind, one of (`contourf`, `contour`, `pcolormesh`).
- normalised (bool): Show efth normalised between 0 and 1.
- logradius (bool): Set log radii.
- as_period (bool): Set radii as wave period instead of frequency.
- rmin (float): Minimum value to clip the radius axis.
- rmax (float): Maximum value to clip the radius axis.
- show_theta_labels (bool): Show direction tick labels.
- show_radii_labels (bool): Show radii tick labels.
- radii_ticks (array): Tick values for radii.
- radii_labels_angle (float): Polar angle at which radii labels are positioned.
- radii_labels_size (float): Fontsize for radii labels.
- cbar_ticks (array): Tick values for colorbar.
- cmap (str, obj): Colormap to use.
- efth_min (float): Clip energy density below this value.
- kwargs: All extra kwargs are passed to the plotting method defined by `kind`.
Returns:
- pobj: The xarray object returned by calling `da.plot.{kind}(**kwargs)`.
"""
wp = WavePlot(*args, **kargs)
return wp()
|
StarcoderdataPython
|
1679902
|
<reponame>PMARINA/Canvas-Autograder
"""Handles getting assignments from Canvas API.
Reference for Canvas: https://canvas.instructure.com/doc/api/assignments.html#method.assignments_api.index
"""
from typing import Dict
from typing import List
from requests import Response
import Canvas_Request
def process_assignments(
content: Response, assignments: List[Dict], section_id: str = "",
) -> None:
"""Add all assignments from response to list.
Args:
content (Response): response object from the api call
assignments (List[Dict]): Add all assignments found to this list
section_id (str): If you want to only add assignments from a single section. Defaults to "".
"""
def is_section_with_gradable(sec_info: Dict) -> bool:
"""Return if the section filter will select this section and if the section has any gradable assignments.
Args:
sec_info (Dict): The dict containing the section string and number of gradable submissions
Returns:
bool: If the assignment has your section with gradable assignments
"""
if sec_info["section_id"] == section_id:
if sec_info["needs_grading_count"] > 0:
return True
return False
content_dict: Dict = content.json()
for assignment in content_dict:
if not section_id:
if assignment["needs_grounding_count"] > 0:
assignments.append(assignment)
else:
for sec_info in assignment["needs_grading_count_by_section"]:
if is_section_with_gradable(sec_info):
assignments.append(assignment)
def get_all_assignments(course_id: str, section_id: str = "") -> List[Dict]:
"""Return all assignments in the course, and optionally, filters by section_id.
Args:
course_id (str): The course id for which you wish to acquire assignments
section_id (str): The section id you wish to filter by ("" for no filtering)
Raises:
Exception: If no gradable assignments are found...
Returns:
List[Dict]: A list of all assignments that match your query. See Canvas API for details of dict.
"""
assignments: List[Dict] = []
r: Response
if not section_id:
r = Canvas_Request.get_endpoint(f"courses/{course_id}/assignments")
else:
section_urlquery = "needs_grading_count_by_section=True"
url: str = f"courses/{course_id}/assignments?{section_urlquery}"
r = Canvas_Request.get_endpoint(url)
process_assignments(r, assignments, section_id)
while r.links["current"]["url"] != r.links["last"]["url"]:
r = Canvas_Request.get(r.links["next"]["url"])
process_assignments(r, assignments, section_id)
if len(assignments) == 0:
errmsg = (
"No gradable assignments found under your Canvas user."
" Are you a 'ta' or 'teacher'?"
)
raise Exception(errmsg)
return assignments
|
StarcoderdataPython
|
1644064
|
<reponame>hdoto/asterisk-mirror<filename>asterisk_mirror/main.py
# -*- coding: utf-8 -*-
import uuid
import signal
from threading import Thread, Event, Timer
from typing import List
from importlib import import_module
from asterisk_mirror.config import AsteriskConfig
from asterisk_mirror.stepper import Stepper
from asterisk_mirror.logics import MorseLogic, YearLogic, FlucLogic
# innner methods
def _merge_dict(source: str, destination: str):
for key, value in source.items():
if isinstance(value, dict):
node = destination.setdefault(key, {})
_merge_dict(value, node)
else:
destination[key] = value
return destination
# AsteriskMirror
class AsteriskMirror:
def __init__(self):
# configurations
config = AsteriskConfig()
self.stop_event = Event()
self.main_thread = None
self.timer_thread = None
self.stepper = Stepper([config.get('System.step_pin', int), config.get('System.direction_pin', int), config.get('System.enable_pin', int)])
self.transition = config.get('System.transition', int)
self.logics = []
self.logic_index = -1
# load and append logics
module = import_module('asterisk_mirror.logics')
for logic_str in config.get('System.logics').split(','):
logic_cls = getattr(module, logic_str.strip())
logic = logic_cls(self.stepper)
self.logics.append(logic)
print("AsteriskMirror [", "transition:", self.transition, "]")
def start(self):
if self.main_thread is not None:
print("AsteriskMirror: already started.")
return
print ("AsteriskMirror: starting...")
# renew threads
if self.timer_thread is not None:
self.stop_event.set()
self.timer_thread = Thread(target=self.timer_run)
self.stop_event.clear()
self.main_thread = Thread(target=self.run)
# start threads
self.main_thread.start()
self.timer_thread.start()
def stop(self):
print("AsteriskMirror: stopping...")
self.stop_event.set()
self.stepper.exit()
self.timer_thread = None
self.main_thread = None
def timer_run(self):
while not self.stop_event.is_set():
# set a new index of logics
self.logic_index = (self.logic_index+1)%len(self.logics)
print("AsteriskMirror: changes logic:", self.logics[self.logic_index])
# interrupt stepper thread and main thread
self.stepper.interrupt()
self.stop_event.wait(self.transition)
def run(self):
#print("AsteriskMirror.run starting...")
while not self.stop_event.is_set():
if self.logic_index >= 0 and len(self.logics) > 0:
logic = self.logics[self.logic_index]
logic.run()
else:
# wait until a right logic-index will be set
self.stop_event.wait(1)
# main
def main():
AsteriskConfig()
mirror = AsteriskMirror()
mirror.start()
# handles SIGINT(ctrl-c) and SIGTERM
def handler(signal, frame):
mirror.stop()
signal.signal(signal.SIGINT, handler)
signal.signal(signal.SIGTERM, handler)
if __name__ == '__main__':
main()
|
StarcoderdataPython
|
1648131
|
<filename>lib/model/faster_rcnn/faster_rcnn.py
#encoding=utf-8
import random
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.autograd import Variable
import torchvision.models as models
from torch.autograd import Variable
import numpy as np
from model.utils.config import cfg
from model.rpn.rpn import _RPN
from model.roi_layers import ROIAlign, ROIPool
# from model.roi_pooling.modules.roi_pool import _RoIPooling
# from model.roi_align.modules.roi_align import RoIAlignAvg
from model.rpn.proposal_target_layer_cascade import _ProposalTargetLayer
import time
import pdb
from model.utils.net_utils import _smooth_l1_loss, _crop_pool_layer, _affine_grid_gen, _affine_theta
class _fasterRCNN(nn.Module):
""" faster RCNN """
def __init__(self, classes, class_agnostic):
super(_fasterRCNN, self).__init__()
self.classes = classes
self.n_classes = len(classes)
self.class_agnostic = class_agnostic
# loss
self.RCNN_loss_cls = 0
self.RCNN_loss_bbox = 0
# define rpn
self.RCNN_rpn = _RPN(self.dout_base_model)
self.RCNN_proposal_target = _ProposalTargetLayer(self.n_classes)
# self.RCNN_roi_pool = _RoIPooling(cfg.POOLING_SIZE, cfg.POOLING_SIZE, 1.0/16.0)
# self.RCNN_roi_align = RoIAlignAvg(cfg.POOLING_SIZE, cfg.POOLING_SIZE, 1.0/16.0)
self.RCNN_roi_pool = ROIPool((cfg.POOLING_SIZE, cfg.POOLING_SIZE), 1.0/16.0)
self.RCNN_roi_align = ROIAlign((cfg.POOLING_SIZE, cfg.POOLING_SIZE), 1.0/16.0, 0)
self.lamda = 0.8
self.G_cls_feat = torch.zeros(self.n_classes, 4096).cuda()
self.not_first = False
self.label = torch.eye(self.n_classes, self.n_classes).cuda()
# for i in range(self.n_classes):
# self.label[i] = i
def forward(self, im_data, im_info, gt_boxes, num_boxes):
batch_size = im_data.size(0)
im_info = im_info.data
gt_boxes = gt_boxes.data
num_boxes = num_boxes.data
# feed image data to base model to obtain base feature map
base_feat = self.RCNN_base(im_data)
# base_feat = base_feat1.detach()
# if it is training phrase, then use ground trubut bboxes for refining
if self.training:
# feed base feature map tp RPN to obtain rois
self.RCNN_rpn.train()
rois, rpn_loss_cls, rpn_loss_bbox = self.RCNN_rpn(base_feat, im_info, gt_boxes, num_boxes)
self.RCNN_rpn.eval()
rois1, rpn_loss_cls1, rpn_loss_bbox1 = self.RCNN_rpn(base_feat, im_info, gt_boxes, num_boxes)
# pdb.set_trace()
roi_data = self.RCNN_proposal_target(rois, gt_boxes, num_boxes)
rois, rois_label, rois_target, rois_inside_ws, rois_outside_ws = roi_data
rois_label = Variable(rois_label.view(-1).long())
rois_target = Variable(rois_target.view(-1, rois_target.size(2)))
rois_inside_ws = Variable(rois_inside_ws.view(-1, rois_inside_ws.size(2)))
rois_outside_ws = Variable(rois_outside_ws.view(-1, rois_outside_ws.size(2)))
else:
rois, rpn_loss_cls, rpn_loss_bbox = self.RCNN_rpn(base_feat, im_info, gt_boxes, num_boxes)
rois1, rpn_loss_cls1, rpn_loss_bbox1 = self.RCNN_rpn(base_feat, im_info, gt_boxes, num_boxes)
rois_label = None
rois_target = None
rois_inside_ws = None
rois_outside_ws = None
rpn_loss_cls = 0
rpn_loss_bbox = 0
rois = Variable(rois)
rois1 = Variable(rois1)
# do roi pooling based on predicted rois
if cfg.POOLING_MODE == 'align':
pooled_feat1 = self.RCNN_roi_align(base_feat, rois1.view(-1, 5).detach())
pooled_feat = self.RCNN_roi_align(base_feat, rois.view(-1, 5).detach())
elif cfg.POOLING_MODE == 'pool':
pooled_feat1 = self.RCNN_roi_pool(base_feat, rois1.view(-1,5).detach())
pooled_feat = self.RCNN_roi_pool(base_feat, rois.view(-1, 5).detach())
# pdb.set_trace()
# feed pooled features to top model
pooled_feat = self._head_to_tail(pooled_feat)
pooled_feat1 = self._head_to_tail(pooled_feat1)
# compute bbox offset
bbox_pred = self.RCNN_bbox_pred(pooled_feat)
if self.training and not self.class_agnostic:
# pdb.set_trace()
# select the corresponding columns according to roi labels
bbox_pred_view = bbox_pred.view(bbox_pred.size(0), int(bbox_pred.size(1) / 4), 4)
bbox_pred_select = torch.gather(bbox_pred_view, 1, rois_label.view(rois_label.size(0), 1, 1).expand(rois_label.size(0), 1, 4))
bbox_pred = bbox_pred_select.squeeze(1)
# compute object classification probability
cls_score = self.RCNN_cls_score(pooled_feat)
cls_prob = F.softmax(cls_score, 1)
cls_score1 = self.RCNN_cls_score(pooled_feat1)
cls_prob1 = F.softmax(cls_score1, 1)
cls_loss_cls, cls_entropy_loss, sim_loss_cls = self.fun(cls_prob1.detach(), cls_score1, pooled_feat1)
RCNN_loss_cls = 0
RCNN_loss_bbox = 0
if self.training:
# classification loss
RCNN_loss_cls = F.cross_entropy(cls_score, rois_label)
# bounding box regression L1 loss
RCNN_loss_bbox = _smooth_l1_loss(bbox_pred, rois_target, rois_inside_ws, rois_outside_ws)
cls_prob = cls_prob.view(batch_size, rois.size(1), -1)
bbox_pred = bbox_pred.view(batch_size, rois.size(1), -1)
return rois, cls_prob, bbox_pred, rpn_loss_cls, rpn_loss_bbox, RCNN_loss_cls, RCNN_loss_bbox, rois_label, cls_loss_cls, cls_entropy_loss, sim_loss_cls#, pooled_feat
def _init_weights(self):
def normal_init(m, mean, stddev, truncated=False):
"""
weight initalizer: truncated normal and random normal.
"""
# x is a parameter
if truncated:
m.weight.data.normal_().fmod_(2).mul_(stddev).add_(mean) # not a perfect approximation
else:
m.weight.data.normal_(mean, stddev)
m.bias.data.zero_()
normal_init(self.RCNN_rpn.RPN_Conv, 0, 0.01, cfg.TRAIN.TRUNCATED)
normal_init(self.RCNN_rpn.RPN_cls_score, 0, 0.01, cfg.TRAIN.TRUNCATED)
normal_init(self.RCNN_rpn.RPN_bbox_pred, 0, 0.01, cfg.TRAIN.TRUNCATED)
normal_init(self.RCNN_cls_score, 0, 0.01, cfg.TRAIN.TRUNCATED)
normal_init(self.RCNN_bbox_pred, 0, 0.001, cfg.TRAIN.TRUNCATED)
def create_architecture(self):
self._init_modules()
self._init_weights()
def fun(self, cls_prob, cls_score, pooled_feat):
region_prob_sum = 1/(torch.t(cls_prob).sum(1))
region_prob_weight = region_prob_sum.unsqueeze(1).expand(cls_prob.size(1), cls_prob.size(0)).mul(torch.t(cls_prob))
cls_feat = torch.mm(region_prob_weight, pooled_feat)
if self.not_first:
alpha = ((cls_feat.mul(self.G_cls_feat).sum(1)/(torch.norm(cls_feat, 2, 1).mul(torch.norm(self.G_cls_feat, 2, 1)))).unsqueeze(1)+1)/2
else:
alpha = torch.ones(self.n_classes,1).cuda()
self.not_first = True
self.G_cls_feat = cls_feat.mul(alpha.detach())+(self.G_cls_feat.detach()).mul(1-alpha.detach())
# pdb.set_trace()
sim_cls_score = torch.mm(pooled_feat, torch.t(self.G_cls_feat.detach()))/torch.mm(torch.norm(pooled_feat, 2, 1).unsqueeze(1), torch.norm(self.G_cls_feat.detach(), 2, 1).unsqueeze(0))
sim_label = (10 * sim_cls_score + cls_prob).max(1)[1]
sim_loss_cls = F.cross_entropy(cls_score, sim_label.detach())
cls_cls_score = self.RCNN_cls_score(self.G_cls_feat)
cls_cls_prob = (F.softmax(cls_cls_score, 1)>(1.0/self.n_classes)).float()
cls_loss_cls = -F.log_softmax(cls_cls_score, 1).mul(self.label).mul(cls_cls_prob.detach()).sum(0).sum(0) / cls_cls_score.size(0)#goodcls.sum(0)
# pdb.set_trace()
cls_entropy_loss = self.entropy_loss(cls_score)
return cls_loss_cls, cls_entropy_loss, sim_loss_cls
def entropy_loss(self, score):
#pdb.set_trace()
num = -F.log_softmax(score, 1).mul(F.softmax(score, 1)).sum(0).sum(0)/score.size(0)
return num
#reason_function
def reason_run(self, pooled_feat, cls_score, cls_prob, rois):
reason_feat, mask = self.reason(pooled_feat.detach(), cls_score.detach(), rois.detach())
reason_score = self.RCNN_cls_score(reason_feat)
reason_prob = F.softmax(reason_score, 1)
right_region = mask.byte().squeeze(1).mul(reason_prob.max(1)[1] == cls_prob.max(1)[1])
cls_score = cls_score + right_region.float().unsqueeze(1).expand(reason_score.size(0), reason_score.size(1)).mul(reason_score)
right_inds = torch.nonzero(right_region > 0).view(-1).detach()
right_score = cls_score[right_inds]
fake_label = cls_prob.max(1)[1][right_inds]
def reason(self, pooled_feat, cls_score, rois):
# 提取batch大小,候选区域个数,类别数目; B,R,C
# train:4,256,101; test:1,300,101
batch_size = rois.size(0)
region_num = rois.size(1)
fea_num = pooled_feat.size(1)
# pdb.set_trace()
mask = self.back_goodinds(cls_score, batch_size*region_num)
mask_cls_score = cls_score.mul(mask.expand(batch_size*region_num, cls_score.size(1)))
raw_region_box = torch.cuda.FloatTensor(rois.data[:, :, 1:5])
trs_region_box = self.__transform_box(raw_region_box) # raw_region_box)
# 分离B,R,调整大小为B*R*C
temp_feat = pooled_feat.view(batch_size, region_num, -1)
temp_score = mask_cls_score.view(batch_size, region_num, -1)
temp_mask = mask.view(batch_size, region_num, -1)
# pdb.set_trace()
# 新建一个全零B*R*C,用于存储每张图片的推理结果
temp0 = torch.zeros(batch_size, region_num, fea_num).cuda()
for i in range(batch_size):
# 使用nms筛选重复框得到一个R*1的mask
# pdb.set_trace()
region_fea = temp_feat[i, :, :]
# region_score = temp_score[i, :, :]
region_mask = temp_mask[i, :, :]
A_spread = self.__Build_spread(trs_region_box[i, :, :], region_num, region_mask)
spread_reason_fea = torch.mm(A_spread, region_fea)
# norm = 1.0 / (spread_reason_fea.sum(1) + 0.0001).unsqueeze(1).expand(region_num, fea_num) # 50
# spread_reason_fea = spread_reason_fea.mul(norm)
choose_fea = spread_reason_fea # 0.07
# pdb.set_trace()
# 将每张图片的推理结果保存到temp3中
temp0[i, :, :] = temp0[i, :, :] + choose_fea
reason_feat = temp0.view(batch_size*region_num, -1)
return reason_feat, mask
def __Build_spread(self, region_box, region_num, mask):
# 坐标扩展为3维,并转置,用于计算区域之间的距离
expand1_region_box = region_box.unsqueeze(2)
expand_region_box = expand1_region_box.expand(region_num, 4, region_num)
transpose_region_box = expand_region_box.transpose(0, 2)
# 跟据每个区域的w和h,计算每个区域的传播范围
spread_distance = torch.sqrt(torch.pow(region_box[:, 2], 2) + torch.pow(region_box[:, 3], 2))
expand_spread_distance = (self.lamda * spread_distance).expand(region_num, region_num)
# 根据每个区域的x和y,计算区域之间的距离
region_distance = torch.sqrt(
torch.pow((expand_region_box[:, 0, :] - transpose_region_box[:, 0, :]), 2) + torch.pow(
(expand_region_box[:, 1, :] - transpose_region_box[:, 1, :]), 2))
# A = F.relu(expand_spread_distance-region_distance)
# 根据传播范围和距离,计算传播矩阵的权值
A = F.relu(1 - region_distance / expand_spread_distance)
# pdb.set_trace()
#mask_w = torch.t(mask).expand(region_num, region_num)
mask_w = torch.mm(mask, torch.t(mask))
# 不接受来自自己的推理信息
self_w = 1 - torch.eye(region_num, region_num).cuda()
A = A.mul(self_w).mul(mask_w)
weight = 1.0/(A.sum(1)+0.001).unsqueeze(1).expand(region_num, region_num)
return A.mul(weight)
def __transform_box(self, region_box):
new_region_box = torch.zeros((region_box.size(0), region_box.size(1), 4)).cuda()
new_region_box[:, :, 0] = 0.5 * (region_box[:, :, 0] + region_box[:, :, 2])
new_region_box[:, :, 1] = 0.5 * (region_box[:, :, 1] + region_box[:, :, 3])
new_region_box[:, :, 2] = region_box[:, :, 2] - region_box[:, :, 0]
new_region_box[:, :, 3] = region_box[:, :, 3] - region_box[:, :, 1]
return new_region_box
def back_goodinds(self, region_fea, region_num):
mask = (region_fea.max(1)[1]>0).float().unsqueeze(1)
return mask
|
StarcoderdataPython
|
138470
|
"""
sentry.cache.django
~~~~~~~~~~~~~~~~~~~
:copyright: (c) 2010-2014 by the Sentry Team, see AUTHORS for more details.
:license: BSD, see LICENSE for more details.
"""
from __future__ import absolute_import
from django.core.cache import cache
from .base import BaseCache
class DjangoCache(BaseCache):
def set(self, key, value, timeout, version=None):
cache.set(key, value, timeout, version=version or self.version)
def delete(self, key, version=None):
cache.delete(key, version=version or self.version)
def get(self, key, version=None):
return cache.get(key, version=version or self.version)
|
StarcoderdataPython
|
1787381
|
<filename>Scripts/simulation/global_policies/global_policy_tuning.py<gh_stars>0
# uncompyle6 version 3.7.4
# Python bytecode 3.7 (3394)
# Decompiled from: Python 3.7.9 (tags/v3.7.9:13c94747c7, Aug 17 2020, 18:58:18) [MSC v.1900 64 bit (AMD64)]
# Embedded file name: T:\InGame\Gameplay\Scripts\Server\global_policies\global_policy_tuning.py
# Compiled at: 2019-04-27 02:11:59
# Size of source mod 2**32: 8678 bytes
from date_and_time import TimeSpan
from display_snippet_tuning import DisplaySnippet
from elements import SleepElement
from event_testing.resolver import SingleSimResolver
from event_testing.test_events import TestEvent
from global_policies.global_policy_effects import GlobalPolicyEffectVariants
from global_policies.global_policy_enums import GlobalPolicyProgressEnum, GlobalPolicyTokenType
from interactions.utils.loot import LootActions
from sims4.localization import TunableLocalizedStringFactory, LocalizationHelperTuning
from sims4.tuning.tunable import TunableList, TunableRange
import services, sims4
logger = sims4.log.Logger('Global Policy Tuning', default_owner='shipark')
class GlobalPolicy(DisplaySnippet):
GLOBAL_POLICY_TOKEN_NON_ACTIVE = TunableLocalizedStringFactory(description='\n Display string that appears when trying to use a Global Policy Token\n referencing a non-active Global Policy.\n ')
INSTANCE_TUNABLES = {'decay_days':TunableRange(description='\n The number of days it will take for the global policy to revert to\n not-complete. Decay begins when the policy is completed.\n ',
tunable_type=int,
default=5,
minimum=0),
'progress_initial_value':TunableRange(description='\n The initial value of global policy progress. Progress begins when\n the policy is first set to in-progress.\n ',
tunable_type=int,
default=0,
minimum=0),
'progress_max_value':TunableRange(description='\n The max value of global policy progress. Once the policy progress\n reaches the max threshold, global policy state becomes complete.\n ',
tunable_type=int,
default=100,
minimum=1),
'loot_on_decay':TunableList(description='\n A list of loot actions that will be run when the policy decays.\n ',
tunable=LootActions.TunableReference(description='\n The loot action will target the active Sim.\n ')),
'loot_on_complete':TunableList(description='\n A list of loot actions that will be run when the policy is complete.\n ',
tunable=LootActions.TunableReference(description='\n The loot action will target the active Sim.\n ')),
'global_policy_effects':TunableList(description='\n Actions to apply when the global policy is enacted.\n ',
tunable=GlobalPolicyEffectVariants(description='\n The action to apply.\n '))}
@classmethod
def _verify_tuning_callback(cls):
if cls.progress_max_value < cls.progress_initial_value:
logger.error('Global Policy {} has a max value less than the initial value. This is not allowed.', cls)
def __init__(self, progress_initial_value=None, **kwargs):
(super().__init__)(**kwargs)
self._progress_state = GlobalPolicyProgressEnum.NOT_STARTED
self._progress_value = 0
self.decay_handler = None
self.end_time_from_load = 0
@property
def progress_state(self):
return self._progress_state
@property
def progress_value(self):
return self._progress_value
def pre_load(self, global_policy_data):
self.set_progress_state((GlobalPolicyProgressEnum(global_policy_data.progress_state)), from_load=True)
self.set_progress_value((global_policy_data.progress_value), from_load=True)
if global_policy_data.decay_days != 0:
self.end_time_from_load = global_policy_data.decay_days
def set_progress_state(self, progress_enum, from_load=False):
old_state = self._progress_state
self._progress_state = progress_enum
if old_state != self._progress_state:
if not from_load:
services.get_event_manager().process_event((TestEvent.GlobalPolicyProgress), custom_keys=(type(self), self))
def set_progress_value(self, new_value, from_load=False):
self._progress_value = new_value
if not from_load:
self._process_new_value(new_value)
return self.progress_state
def _process_new_value--- This code section failed: ---
L. 154 0 LOAD_FAST 'new_value'
2 LOAD_FAST 'self'
4 LOAD_ATTR progress_initial_value
6 COMPARE_OP <=
8 POP_JUMP_IF_FALSE 70 'to 70'
10 LOAD_FAST 'self'
12 LOAD_ATTR progress_state
14 LOAD_GLOBAL GlobalPolicyProgressEnum
16 LOAD_ATTR NOT_STARTED
18 COMPARE_OP !=
20 POP_JUMP_IF_FALSE 70 'to 70'
L. 155 22 LOAD_FAST 'self'
24 LOAD_METHOD set_progress_state
26 LOAD_GLOBAL GlobalPolicyProgressEnum
28 LOAD_ATTR NOT_STARTED
30 CALL_METHOD_1 1 '1 positional argument'
32 POP_TOP
L. 156 34 LOAD_CONST None
36 LOAD_FAST 'self'
38 STORE_ATTR decay_handler
L. 157 40 SETUP_LOOP 158 'to 158'
42 LOAD_FAST 'self'
44 LOAD_ATTR global_policy_effects
46 GET_ITER
48 FOR_ITER 66 'to 66'
50 STORE_FAST 'effect'
L. 158 52 LOAD_FAST 'effect'
54 LOAD_METHOD turn_off
56 LOAD_FAST 'self'
58 LOAD_ATTR guid64
60 CALL_METHOD_1 1 '1 positional argument'
62 POP_TOP
64 JUMP_BACK 48 'to 48'
66 POP_BLOCK
68 JUMP_FORWARD 158 'to 158'
70_0 COME_FROM 20 '20'
70_1 COME_FROM 8 '8'
L. 159 70 LOAD_FAST 'new_value'
72 LOAD_FAST 'self'
74 LOAD_ATTR progress_max_value
76 COMPARE_OP >=
78 POP_JUMP_IF_FALSE 134 'to 134'
80 LOAD_FAST 'self'
82 LOAD_ATTR progress_state
84 LOAD_GLOBAL GlobalPolicyProgressEnum
86 LOAD_ATTR COMPLETE
88 COMPARE_OP !=
90 POP_JUMP_IF_FALSE 134 'to 134'
L. 160 92 LOAD_FAST 'self'
94 LOAD_METHOD set_progress_state
96 LOAD_GLOBAL GlobalPolicyProgressEnum
98 LOAD_ATTR COMPLETE
100 CALL_METHOD_1 1 '1 positional argument'
102 POP_TOP
L. 161 104 SETUP_LOOP 158 'to 158'
106 LOAD_FAST 'self'
108 LOAD_ATTR global_policy_effects
110 GET_ITER
112 FOR_ITER 130 'to 130'
114 STORE_FAST 'effect'
L. 162 116 LOAD_FAST 'effect'
118 LOAD_METHOD turn_on
120 LOAD_FAST 'self'
122 LOAD_ATTR guid64
124 CALL_METHOD_1 1 '1 positional argument'
126 POP_TOP
128 JUMP_BACK 112 'to 112'
130 POP_BLOCK
132 JUMP_FORWARD 158 'to 158'
134_0 COME_FROM 90 '90'
134_1 COME_FROM 78 '78'
L. 163 134 LOAD_FAST 'self'
136 LOAD_ATTR progress_state
138 LOAD_GLOBAL GlobalPolicyProgressEnum
140 LOAD_ATTR IN_PROGRESS
142 COMPARE_OP !=
144 POP_JUMP_IF_FALSE 158 'to 158'
L. 164 146 LOAD_FAST 'self'
148 LOAD_METHOD set_progress_state
150 LOAD_GLOBAL GlobalPolicyProgressEnum
152 LOAD_ATTR IN_PROGRESS
154 CALL_METHOD_1 1 '1 positional argument'
156 POP_TOP
158_0 COME_FROM 144 '144'
158_1 COME_FROM 132 '132'
158_2 COME_FROM_LOOP 104 '104'
158_3 COME_FROM 68 '68'
158_4 COME_FROM_LOOP 40 '40'
Parse error at or near `COME_FROM_LOOP' instruction at offset 158_2
def apply_policy_loot_to_active_sim(self, loot_list, resolver=None):
if resolver is None:
resolver = SingleSimResolver(services.active_sim_info())
for loot_action in loot_list:
loot_action.apply_to_resolver(resolver)
def decay_policy(self, timeline):
yield timeline.run_child(SleepElement(TimeSpan.ZERO))
services.global_policy_service().set_global_policy_progress(self, self.progress_initial_value)
self.decay_handler = None
self.apply_policy_loot_to_active_sim(self.loot_on_decay)
@classmethod
def get_non_active_display(cls, token_data):
if token_data.token_property == GlobalPolicyTokenType.NAME:
return LocalizationHelperTuning.get_raw_text(token_data.global_policy.display_name())
if token_data.token_property == GlobalPolicyTokenType.PROGRESS:
return LocalizationHelperTuning.get_raw_text(cls.GLOBAL_POLICY_TOKEN_NON_ACTIVE())
logger.error('Invalid Global Policy Property {} tuned on the Global Policy token.'.format(token_data.property))
def get_active_policy_display(self, token_data):
if token_data.token_property == GlobalPolicyTokenType.NAME:
return LocalizationHelperTuning.get_raw_text(self.display_name())
if token_data.token_property == GlobalPolicyTokenType.PROGRESS:
progress_percentage_str = str(int(round(float(self.progress_value) / float(self.progress_max_value), 2) * 100))
return LocalizationHelperTuning.get_raw_text(progress_percentage_str)
logger.error('Invalid Global Policy Property {} tuned on the Global Policy token.'.format(token_data.property))
|
StarcoderdataPython
|
3250777
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
__author__ = "<NAME>"
import symjax.tensor as T
import matplotlib.pyplot as plt
from symjax.viz import compute_graph
x = T.random.randn((10,), name="x")
y = T.random.randn((10,), name="y")
z = T.random.randn((10,), name="z")
w = T.Variable(T.ones(1), name="w")
out = (x + y).sum() * w + z.sum()
graph = compute_graph(out)
graph.draw("file.png", prog="dot")
import matplotlib.image as mpimg
img = mpimg.imread("file.png")
plt.figure(figsize=(15, 5))
imgplot = plt.imshow(img)
plt.xticks()
plt.yticks()
plt.tight_layout()
|
StarcoderdataPython
|
4838340
|
<reponame>abael/ScrapyFrontera
# -*- coding: utf-8 -*-
from __future__ import absolute_import
from tests.backends import BackendSequenceTest, TEST_SITES
from frontera.utils.tester import DownloaderSimulator, BaseDownloaderSimulator
from six.moves.urllib.parse import urlparse
class DFSOverusedBackendTest(BackendSequenceTest):
EXPECTED_SEQUENCES = {
"SEQUENCE_01_A": [
'https://www.a.com', 'http://b.com', 'http://www.a.com/2', 'http://www.a.com/2/1', 'http://www.a.com/3',
'http://www.a.com/2/1/3', 'http://www.a.com/2/4/1', 'http://www.a.net', 'http://b.com/2',
'http://test.cloud.c.com', 'http://cloud.c.com', 'http://test.cloud.c.com/2',
'http://b.com/entries?page=2', 'http://www.a.com/2/4/2'
],
"SEQUENCE_02_A": [
'https://www.a.com', 'http://b.com', 'http://www.a.com/2', 'http://www.a.com/2/1', 'http://www.a.com/3',
'http://www.a.com/2/1/3', 'http://www.a.com/2/4/1', 'http://www.a.com/2/4/2', 'http://www.a.net',
'http://b.com/2', 'http://test.cloud.c.com', 'http://cloud.c.com', 'http://test.cloud.c.com/2',
'http://b.com/entries?page=2'
]
}
def test_sequence1(self):
sequence = self.get_sequence(TEST_SITES['SITE_09'], max_next_requests=5,
downloader_simulator=DownloaderSimulator(rate=1))
assert len(sequence) == 7
all_domains = set()
for requests, iteration, dl_info in sequence:
overused_keys = set(dl_info['overused_keys'])
for r in requests:
url = urlparse(r.url)
all_domains.add(url.hostname)
if not overused_keys:
continue
assert url.hostname not in overused_keys
assert overused_keys.issubset(all_domains)
|
StarcoderdataPython
|
199296
|
<reponame>alentoghostflame/StupidAlentoBot<filename>mmo_module/mmo.py
from mmo_module.mmo_data import GuildMMOConfig, UserMMOConfig, BasicMMODataStorage, CharacterSaveData
from alento_bot import BaseModule, StorageManager
from mmo_module import mmo_admin, mmo_user, text
from mmo_module.mmo_controller import MMOServer, MMOBattleManager
from discord.ext import commands
from datetime import datetime
from typing import Dict
class MMOModule(BaseModule):
def __init__(self, *args):
BaseModule.__init__(self, *args)
# noinspection PyArgumentList
self.storage.caches.register_cache("basic_mmo_data", BasicMMODataStorage(self.storage.config))
self.storage.users.register_data_name("mmo_char_save", CharacterSaveData)
self.storage.guilds.register_data_name("mmo_config", GuildMMOConfig)
self.storage.users.register_data_name("mmo_config", UserMMOConfig)
self.mmo_server: MMOServer = MMOServer(self.storage)
self.add_cog(MMOAdmin(self.storage, self.mmo_server))
self.add_cog(MMOUser(self.bot, self.storage, self.mmo_server))
class MMOUser(commands.Cog, name="MMO User"):
def __init__(self, bot: commands.Bot, storage: StorageManager, mmo_server: MMOServer):
self.bot: commands.Bot = bot
self.storage: StorageManager = storage
self.mmo_server: MMOServer = mmo_server
self.mmo_battle: MMOBattleManager = MMOBattleManager(self.mmo_server)
self._change_class_cooldowns: Dict[int, datetime] = dict()
@commands.group(name="mmo", brief=text.MMO_BRIEF, invoke_without_command=True)
async def mmo_user(self, context: commands.Context):
if context.message.content.strip().lower() == f"{context.prefix}{context.command.name}":
await context.send_help(context.command)
else:
await context.send(text.INVALID_COMMAND)
@mmo_user.command(name="enable", brief=text.MMO_ENABLE_BRIEF)
async def mmo_enable(self, context: commands.Context):
await mmo_user.enable(self.mmo_server, context)
@mmo_user.command(name="disable", brief=text.MMO_DISABLE_BRIEF)
async def mmo_disable(self, context: commands.Context):
await mmo_user.disable(self.mmo_server, context)
@mmo_user.command(name="status", brief=text.MMO_STATUS_BRIEF)
async def mmo_status(self, context: commands.Context):
await mmo_user.status(self.mmo_server, context)
@mmo_user.command(name="battle", brief=text.MMO_BATTLE_BRIEF)
async def mmo_battle(self, context: commands.Context):
if self.mmo_server.user.enabled(context.author.id):
await self.mmo_battle.create_battle(context)
else:
await context.send(mmo_user.text.MMO_CURRENTLY_DISABLED)
@mmo_user.command("attack", brief=text.MMO_ATTACK_BRIEF)
async def mmo_attack(self, context: commands.Context, attack_name="Default Attack"):
await self.mmo_battle.attack(context, attack_name)
@mmo_user.group("char", brief=text.MMO_CHAR_BRIEF, invoke_without_command=True)
async def mmo_char(self, context: commands.Context):
if context.message.content.strip().lower() == f"{context.prefix}mmo char":
await context.send_help(context.command)
else:
await context.send(text.INVALID_COMMAND)
@mmo_char.group("class", brief=text.MMO_CHAR_CLASS_BRIEF, invoke_without_command=True)
async def mmo_char_class(self, context: commands.Context, class_name: str = None):
if context.message.content.strip().lower() == f"{context.prefix}mmo char class":
await context.send_help(context.command)
else:
await mmo_user.set_class(self.mmo_server, self._change_class_cooldowns, context, class_name)
@mmo_char_class.command("list", brief=text.MMO_CHAR_CLASS_LIST_BRIEF)
async def mmo_char_class_list(self, context: commands.Context):
await mmo_user.send_class_display(self.mmo_server, context)
@mmo_char.command("name", brief=text.MMO_CHAR_NAME_BRIEF)
async def mmo_char_name(self, context: commands.Context, new_name: str):
await mmo_user.set_character_name(self.mmo_server, context, new_name)
@mmo_char.command("attack", brief=text.MMO_CHAR_ATTACK_BRIEF)
async def mmo_char_attack(self, context: commands.Context, new_attack: str):
await mmo_user.set_default_attack(self.mmo_server, context, new_attack)
@mmo_user.command("spells", brief=text.MMO_SPELLS_BRIEF)
async def mmo_spells(self, context: commands.Context):
await mmo_user.send_ability_display(self.mmo_server, context)
class MMOAdmin(commands.Cog, name="MMO Admin"):
def __init__(self, storage: StorageManager, mmo_server: MMOServer):
self.storage = storage
self.mmo_server: MMOServer = mmo_server
@commands.group(name="mmoa", brief=text.MMOA_BRIEF, invoke_without_command=True)
async def mmoa(self, context: commands.Context):
if context.message.content.strip() == f"{context.prefix}{context.command.name}":
await context.send_help(context.command)
else:
await context.send(text.INVALID_COMMAND)
@mmoa.command(name="enable", brief=text.MMOA_ENABLE_BRIEF)
async def mmoa_enable(self, context: commands.Context):
await mmo_admin.enable(self.mmo_server, context)
@mmoa.command(name="disable", brief=text.MMOA_DISABLE_BRIEF)
async def mmoa_disable(self, context: commands.Context):
await mmo_admin.disable(self.mmo_server, context)
|
StarcoderdataPython
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.