id
stringlengths 1
7
| text
stringlengths 6
1.03M
| dataset_id
stringclasses 1
value |
---|---|---|
3202713
|
__author__ = '<NAME>'
__email__ = '<EMAIL>'
__date__ = '9/3/2020 8:12 AM'
def get_sub(param1, param2):
return abs(param1 - param2)
def get_val(arr, i, j):
n = len(arr)
if 0 <= i < n and 0 <= j < n:
return arr[i][j]
else:
return float('inf')
def get_min(arr, i, j):
return min(abs(arr[i][j] - get_val(arr, i - 1, j)),
abs(arr[i][j] - get_val(arr, i, j + 1)),
abs(arr[i][j] - get_val(arr, i + 1, j)),
abs(arr[i][j] - get_val(arr, i, j - 1)))
def helper(arr):
n = len(arr)
dp = [[float('inf')] * (n + 1) for _ in range(n + 1)]
dp[1][1] = 0
# for i in range(1, n + 1):
# for j in range(1, n + 1):
# if i == 1 and j == 1:
# continue
# else:
# dp[i][j] = min(get_sub(arr[i - 1][j - 1], get_val(arr, i - 2, j - 1)),
# get_sub(arr[i - 1][j - 1], get_val(arr, i - 1, j - 2)),
# get_sub(arr[i - 1][j - 1], get_val(arr, i, j - 1)),
# get_sub(arr[i - 1][j - 1], get_val(arr, i - 1, j)))
temp = [[0] * n for _ in range(n)]
for i in range(n):
for j in range(n):
temp[i][j] = get_min(arr, i, j)
print(temp)
return dp[-1][-1]
if __name__ == '__main__':
n = int(input().split()[0])
data = []
for _ in range(n):
temp = list(map(int, input().split()))
data.append(temp)
res = helper(data)
# print(res)
|
StarcoderdataPython
|
185811
|
<filename>vio/vio/pub/msapi/extsys.py
# Copyright (c) 2017-2018 VMware, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at:
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import logging
from vio.pub.utils.restcall import AAIClient
logger = logging.getLogger(__name__)
def split_vim_to_owner_region(vim_id):
split_vim = vim_id.split('_')
cloud_owner = split_vim[0]
cloud_region = "".join(split_vim[1:])
return cloud_owner, cloud_region
def get_vim_by_id(vim_id):
cloud_owner, cloud_region = split_vim_to_owner_region(vim_id)
client = AAIClient(cloud_owner, cloud_region)
ret = client.get_vim(get_all=True)
ret['type'] = ret['cloud-type']
ret['version'] = ret['cloud-region-version']
ret['vimId'] = vim_id
ret['cloud_owner'] = cloud_owner
ret['cloud_region_id'] = cloud_region
ret['name'] = vim_id
ret['userName'] = ret['esr-system-info-list'][
'esr-system-info'][0]['user-name']
ret['password'] = ret['esr-system-info-list'][
'esr-system-info'][0]['password']
ret['tenant'] = ret['esr-system-info-list'][
'esr-system-info'][0]['default-tenant']
ret['url'] = ret['esr-system-info-list'][
'esr-system-info'][0]['service-url']
ret['domain'] = ret['esr-system-info-list'][
'esr-system-info'][0]['cloud-domain']
ret['cacert'] = ret['esr-system-info-list'][
'esr-system-info'][0].get('ssl-cacert', "")
ret['insecure'] = ret['esr-system-info-list'][
'esr-system-info'][0].get('ssl-insecure', False)
return ret
|
StarcoderdataPython
|
52857
|
<gh_stars>0
#!/usr/bin/env python3
import unittest.mock
from unittest.mock import call
import draw
class TestDraw(unittest.TestCase):
def test_parse_command_1(self):
self.assertEqual([("F", 100), ("T", 90), ("F", 100), ("T", 90), ("F", 100), ("T", 90), ("F", 100)],
draw.parse_command("F100;T90;F100;T90;F100;T90;F100"))
self.assertEqual([("R", 4), ("F", 100), ("T", 90), ("E", 0)],
draw.parse_command("R4;F100;T90;E0"))
self.assertEqual([("R", 5), ("F", 80), ("T", -72), ("E", 0)],
draw.parse_command("R5;F80;T-72;E0"))
def test_parse_command_2(self):
self.assertEqual([("F", 100), ("T", 90), ("F", 100), ("T", 90), ("F", 100), ("T", 90), ("F", 100)],
draw.parse_command("F100T90F100T90F100T90F100"))
self.assertEqual([("R", 4), ("F", 100), ("T", 90), ("E", 0)],
draw.parse_command("R4F100T90E0"))
self.assertEqual([("R", 5), ("F", 80), ("T", -72), ("E", 0)],
draw.parse_command("R5F80T-72E0"))
@unittest.mock.patch("draw.turtle")
def test_run_commands(self, mock_turtle: unittest.mock.Mock):
draw.run_commands([("R", 5), ("F", 80), ("T", -72), ("E", 0)])
self.assertEqual(mock_turtle.method_calls,
[call.forward(80),
call.left(-72),
call.forward(80),
call.left(-72),
call.forward(80),
call.left(-72),
call.forward(80),
call.left(-72),
call.forward(80),
call.left(-72)])
if __name__ == "__main__":
unittest.main()
|
StarcoderdataPython
|
1797807
|
#!/usr/bin/env python3
#
import logging
# from scipy.special import comb
import gmpy2
logging.basicConfig(format="%(message)s", level=logging.INFO)
logger = logging.getLogger()
MODULUS = pow(2, 15)
# MODULUS = 0
class IterationError(Exception):
pass
def r0_3_r1_n(n, r7):
result = r7 * (r7 + n + 3) % MODULUS
coef = gmpy2.mpz(n + 3)
for i in range(2, n + 2):
coef = gmpy2.divexact(coef * (n + 4 - i), i)
result = (r7 * (result + coef)) % MODULUS
result = (result + n) % MODULUS
return result
def confirm(r0, r1, r7, maxstep=0, useshortcut=0, usecache=False):
stack = []
step = 0
reg0, reg1, reg7 = r0, r1, r7
cache = dict()
seen_at = dict()
def sub178b(lvl=0):
nonlocal reg0, reg1, reg7, step, stack
step += 1
lvl += 1
if maxstep and step > maxstep:
raise IterationError("Too many steps!")
if reg0 == 0:
block = 0
elif reg1 == 0:
block = 1
else:
block = 2
logger.debug("{:3d}: [{}] r0:{:2d} r1:{:2d} s: {}".format(
step, block, reg0, reg1, " ".join([str(v) for v in stack])))
inputs = (reg0, reg1, reg7)
if usecache:
if not inputs in seen_at:
seen_at[inputs] = len(stack)
if inputs in cache:
reg0, reg1, cached_steps = cache[inputs]
# step += cached_steps
return
# shortcuts
if reg0 <= useshortcut:
if reg0 == 1:
reg1 = reg7 + reg1
elif reg0 == 2:
reg1 = reg7 * (reg1 + 2) + reg1
elif reg0 == 3:
reg1 = r0_3_r1_n(reg1, reg7)
# if reg1 == 0:
# reg1 = reg7 * (reg7 + 3)
# elif reg1 == 1:
# reg1 = pow(reg7, 3) + 4 * pow(reg7, 2) + 6 * reg7 + 1
# elif reg1 == 2:
# reg1 = (pow(reg7, 4) + 5 * pow(reg7, 3) +
# 10 * pow(reg7, 2) + 10 * reg7 + 2)
# else:
# reg1 = r0_3_r1_n(reg1, reg7)
reg0 = reg1 + 1
if MODULUS:
reg0 = reg0 % MODULUS
reg1 = reg1 % MODULUS
return
# original code
if reg0 == 0:
reg0 = reg1 + 1
elif reg1 == 0:
reg0 -= 1
reg1 = reg7
sub178b(lvl)
else:
stack.append(reg0)
reg1 -= 1
sub178b(lvl)
reg1 = reg0
reg0 = stack.pop() - 1
sub178b(lvl)
if MODULUS:
reg0 = reg0 % MODULUS
reg1 = reg1 % MODULUS
if usecache and seen_at[inputs] == len(stack):
cache[inputs] = (reg0, reg1, step)
print("{} -> r0={}, r1={}".format(inputs, *cache[inputs]))
return
try:
sub178b()
except IterationError:
pass
return (reg0, reg1, step)
if __name__ == '__main__':
import argparse
parser = argparse.ArgumentParser()
parser.add_argument("-r0", type=int, default=4)
parser.add_argument("-r1", type=int, default=1)
parser.add_argument("-r7", type=int, default=1)
parser.add_argument("-n", type=int, default=0)
parser.add_argument("-d", "--debug", action='store_true')
parser.add_argument("-s7", "--seq7")
parser.add_argument("-s1", "--seq1")
parser.add_argument("-x", "--shortcut", type=int, default=0)
parser.add_argument("-y", "--cache", action='store_true')
opt = parser.parse_args()
assert opt.shortcut < 4
logger.info("teleporter register confirmation")
if opt.debug:
logger.setLevel(logging.DEBUG)
if opt.seq7:
start, stop = [int(v) for v in opt.seq7.split(":")]
for reg7 in range(start,stop+1):
inputs = (opt.r0, opt.r1, reg7)
(reg0, reg1, step) = confirm(opt.r0, opt.r1, reg7, opt.n,
useshortcut=opt.shortcut, usecache=opt.cache)
logger.info("{:3d} steps, {} --> r0:{:2d} r1:{:2d} r7:{:2d}".format(
step, inputs, reg0, reg1, reg7))
elif opt.seq1:
start, stop = [int(v) for v in opt.seq1.split(":")]
for reg1 in range(start, stop+1):
inputs = (opt.r0, reg1, opt.r7)
(reg0, reg1, step) = confirm(opt.r0, reg1, opt.r7, opt.n,
useshortcut=opt.shortcut, usecache=opt.cache)
logger.info("{:3d} steps, {} --> r0:{:2d} r1:{:2d} r7:{:2d}".format(
step, inputs, reg0, reg1, opt.r7))
else:
inputs = (opt.r0, opt.r1, opt.r7)
(reg0, reg1, step) = confirm(opt.r0, opt.r1, opt.r7, opt.n,
useshortcut=opt.shortcut, usecache=opt.cache)
logger.info("{:3d} steps, {} --> r0:{:2d} r1:{:2d} r7:{:2d}".format(
step, inputs, reg0, reg1, opt.r7))
|
StarcoderdataPython
|
3389869
|
<filename>src/sw/src/simulation/gen_image.py
#!/usr/bin/env python
import numpy as np;
import matplotlib.pyplot as plt
import argparse
#=====================================================================================================
# defaul values
VGA_HEIGHT = 480
VGA_WIDTH = 640
OUTPUT_IMG_WIDTH = 45
OUTPUT_IMG_HEIGHT = 185
SPACING = 50
NUM_IMAGES_TO_DECODE = 10
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument('-i', '--input_filename', help='input file name', required=True)
parser.add_argument('-o', '--output_filename', help='output file name', required=True)
parser.add_argument('-n', '--image_to_decode', help='number of input image to decode', required=False)
parser.add_argument('-w', '--image_width', help='image width', required=False)
parser.add_argument('-l', '--image_height', help='image height', required=False)
parser.add_argument('-s', '--spacing', help='spacing between images', required=False)
args = parser.parse_args()
return vars(args)
def load_output_txt(file, image_to_decode=NUM_IMAGES_TO_DECODE, image_height=OUTPUT_IMG_HEIGHT, image_width=OUTPUT_IMG_WIDTH):
with open(file) as f:
lines = f.readlines()
images = np.ones((image_width*image_height, image_to_decode))
# import ipdb as pdb; pdb.set_trace()
for idx, line in enumerate(lines):
line = line.replace("\n", "")
if line != "\n":
vals = list(line)
images[idx,:] = [int(val) for val in vals]
return images
def gen_vga_img(imgs, output_filename, image_to_decode=NUM_IMAGES_TO_DECODE, image_height=OUTPUT_IMG_HEIGHT, image_width=OUTPUT_IMG_WIDTH, spacing=SPACING):
out_img = np.zeros((VGA_HEIGHT, VGA_WIDTH))
# import ipdb as pdb; pdb.set_trace()
for i in range(0,image_to_decode):
image = imgs[:, i]
image = np.array(image)
image = image.reshape(image_height, image_width)
# import ipdb as pdb; pdb.set_trace()
indw = (i+1)*spacing
out_img[spacing:spacing+image_height, indw:indw+image_width] = image
plt.imshow(out_img, cmap='gray')
# import ipdb as pdb; pdb.set_trace()
plt.savefig(output_filename+".png")
if __name__ == '__main__':
args = parse_args()
input_filename = args['input_filename']
output_filename = args['output_filename']
image_to_decode = args['image_to_decode']
image_width = args['image_width']
image_height = args['image_height']
spacing = args['spacing']
if image_to_decode == None:
image_to_decode = NUM_IMAGES_TO_DECODE
else:
image_to_decode = int(image_to_decode)
if image_width == None:
image_width = OUTPUT_IMG_WIDTH
else:
image_width = int(image_width)
if image_height == None:
image_height = OUTPUT_IMG_HEIGHT
else:
image_height = int(image_height)
if spacing == None:
spacing = SPACING
else:
spacing = int(spacing)
images = load_output_txt(input_filename, image_to_decode, image_height, image_width)
gen_vga_img(images, output_filename, image_to_decode, image_height, image_width, spacing)
|
StarcoderdataPython
|
60649
|
<filename>configs/egohands/egohands_dataset.py
_base_ = '../_base_/datasets/coco_detection.py'
data_root = 'data/egohands/'
classes = ('myleft', 'myright', 'yourleft', 'yourright') # クラスラベル
data = dict(
train=dict(
classes=classes, # COCOデータセットのクラスをオーバーライド
ann_file=data_root+'annotations/train.json',
img_prefix=data_root+'train/'),
val=dict(
classes=classes, # COCOデータセットのクラスをオーバーライド
ann_file=data_root+'annotations/valid.json',
img_prefix=data_root+'valid/'),
test=dict(
classes=classes, # COCOデータセットのクラスをオーバーライド
ann_file=data_root+'annotations/test.json',
img_prefix=data_root+'test/')
)
|
StarcoderdataPython
|
145043
|
<filename>ingest/importer/conversion/metadata_entity.py
import copy
from ingest.importer.data_node import DataNode
from ingest.importer.spreadsheet.ingest_worksheet import IngestRow
TYPE_UNDEFINED = 'undefined'
class MetadataEntity:
# TODO enforce definition of concrete and domain types for all MetadataEntity
# It's only currently done this way to minimise friction with other parts of the system
def __init__(self, concrete_type=TYPE_UNDEFINED, domain_type=TYPE_UNDEFINED, object_id=None,
content={}, links={}, external_links={}, linking_details={}, row: IngestRow = None,
is_reference=False, is_linking_reference=False, is_module=False):
self._concrete_type = concrete_type
self._domain_type = domain_type
self.object_id = object_id
self._content = DataNode(defaults=copy.deepcopy(content))
self._links = copy.deepcopy(links)
self._external_links = copy.deepcopy(external_links)
self._linking_details = DataNode(defaults=copy.deepcopy(linking_details))
self._spreadsheet_location = {
'row_index': row.index,
'worksheet_title': row.worksheet_title,
} if row else None
self._is_reference = is_reference
self._is_linking_reference = is_linking_reference
self._is_module = is_module
@property
def concrete_type(self):
return self._concrete_type
@property
def domain_type(self):
return self._domain_type
@property
def content(self):
return copy.deepcopy(self._content)
def get_content(self, content_property):
return self._content[content_property]
def define_content(self, content_property, value):
self._content[content_property] = value
def define_linking_detail(self, link_property, value):
self._linking_details[link_property] = value
@property
def linking_details(self):
return self._linking_details.as_dict()
def get_linking_detail(self, link_property):
return self._linking_details[link_property]
@property
def links(self):
return copy.deepcopy(self._links)
def get_links(self, link_entity_type):
return self._links.get(link_entity_type)
def add_links(self, link_entity_type, new_links):
self._do_add_links(self._links, link_entity_type, new_links)
@property
def is_reference(self):
return self._is_reference
@property
def is_linking_reference(self):
return self._is_linking_reference
@property
def is_module(self):
return self._is_module
@property
def external_links(self):
return copy.deepcopy(self._external_links)
def get_external_links(self, link_entity_type):
return self._external_links.get(link_entity_type)
def add_external_links(self, link_entity_type, new_links):
self._do_add_links(self._external_links, link_entity_type, new_links)
@staticmethod
def _do_add_links(link_map, link_entity_type, new_links):
existent_links = link_map.get(link_entity_type)
if existent_links is None:
existent_links = []
link_map[link_entity_type] = existent_links
existent_links.extend(new_links)
def retain_fields(self, *allowed_fields):
removed_fields = []
for key in self._content.keys():
if key not in allowed_fields:
removed_fields.append({'key': key, 'value': self._content[key]})
self._content.remove_field(key)
return removed_fields
def add_module_entity(self, module_entity):
for field, value in module_entity.content.as_dict().items():
module_list = self._content[field]
if not module_list:
module_list = []
self._content[field] = module_list
module_list.append(value)
def get_spreadsheet_location(self):
return self._spreadsheet_location
def map_for_submission(self):
return {
'is_reference': self.is_reference,
'is_linking_reference': self._is_linking_reference,
'concrete_type': self.concrete_type,
'content': self._content.as_dict(),
'links_by_entity': self.links,
'external_links_by_entity': self.external_links,
'linking_details': self.linking_details,
'spreadsheet_location': self._spreadsheet_location
}
|
StarcoderdataPython
|
3352809
|
<filename>memberaudit/tests/testdata/esi_test_tools/tests.py
from datetime import datetime
from bravado.exception import HTTPNotFound
from django.test import TestCase
from .main import EsiClientStub, EsiEndpoint
testdata = {
"Alpha": {
"get_cake": {"1": "cheesecake", "2": "strawberrycake"},
"get_details": {"1": {"appointment": "2015-03-24T11:37:00Z"}},
"get_secret": {"1": "blue secret", "2": "red secret"},
"get_simple": "steak",
"get_double_impact": {"1": {"A": "Madrid", "B": "Tokyo"}},
}
}
class TestEsiClientStub(TestCase):
def setUp(self) -> None:
self.stub = EsiClientStub(
testdata,
[
EsiEndpoint("Alpha", "get_cake", "cake_id"),
EsiEndpoint("Alpha", "get_secret", "secret_id", needs_token=True),
EsiEndpoint("Alpha", "get_simple"),
EsiEndpoint("Alpha", "get_double_impact", ("first_id", "second_id")),
],
)
def test_can_create_endpoint(self):
self.assertTrue(hasattr(self.stub, "Alpha"))
self.assertTrue(hasattr(self.stub.Alpha, "get_cake"))
self.assertEqual(self.stub.Alpha.get_cake(cake_id=1).results(), "cheesecake")
self.assertEqual(
self.stub.Alpha.get_cake(cake_id=2).results(), "strawberrycake"
)
self.assertEqual(self.stub.Alpha.get_simple().results(), "steak")
self.assertEqual(
self.stub.Alpha.get_double_impact(first_id="1", second_id="B").results(),
"Tokyo",
)
def test_raises_exception_on_wrong_pk(self):
with self.assertRaises(ValueError):
self.stub.Alpha.get_cake(fruit_id=1).results()
def test_raises_exception_on_missing_pk(self):
with self.assertRaises(ValueError):
self.stub.Alpha.get_cake().results()
def test_raises_exception_on_missing_data(self):
with self.assertRaises(HTTPNotFound):
self.stub.Alpha.get_cake(cake_id=3).results()
def test_raises_exception_on_missing_token_if_required(self):
with self.assertRaises(ValueError):
self.stub.Alpha.get_secret(secret_id=1).results()
def test_raises_exception_when_trying_to_refine_same_endpoint(self):
with self.assertRaises(ValueError):
EsiClientStub(
testdata,
[
EsiEndpoint("Alpha", "get_cake", "cake_id"),
EsiEndpoint("Alpha", "get_cake", "cake_id"),
],
)
def test_raises_exception_when_trying_to_refine_endpoint_without_data(self):
with self.assertRaises(ValueError):
EsiClientStub(
testdata,
[
EsiEndpoint("Alpha", "get_fruit_id", "fruit_id"),
],
)
def test_can_convert_datetimes(self):
stub = EsiClientStub(
testdata,
[
EsiEndpoint("Alpha", "get_details", "id"),
],
)
results = stub.Alpha.get_details(id=1).results()
self.assertIsInstance(results["appointment"], datetime)
|
StarcoderdataPython
|
3292587
|
from aerosandbox.dynamics.point_mass.common_point_mass import _DynamicsPointMassBaseClass
from aerosandbox.weights.mass_properties import MassProperties
import aerosandbox.numpy as np
from typing import Union, Dict, Tuple
class DynamicsPointMass3DSpeedGammaTrack(_DynamicsPointMassBaseClass):
"""
Dynamics instance:
* simulating a point mass
* in 3D
* with velocity parameterized in speed-gamma-track space
State variables:
x_e: x-position, in Earth axes. [meters]
y_e: y-position, in Earth axes. [meters]
z_e: z-position, in Earth axes. [meters]
speed: Speed; equivalent to u_w, the x-velocity in wind axes. [m/s]
gamma: Flight path angle. [radians]
track: Track angle. [radians]
* Track of 0 == North == aligned with x_e axis
* Track of np.pi / 2 == East == aligned with y_e axis
Indirect control variables:
alpha: Angle of attack. [degrees]
beta: Sideslip angle. [degrees]
bank: Bank angle. [radians]
Control variables:
Fx_w: Force along the wind-x axis. [N]
Fy_w: Force along the wind-y axis. [N]
Fz_w: Force along the wind-z axis. [N]
"""
def __init__(self,
mass_props: MassProperties = None,
x_e: Union[np.ndarray, float] = 0,
y_e: Union[np.ndarray, float] = 0,
z_e: Union[np.ndarray, float] = 0,
speed: Union[np.ndarray, float] = 0,
gamma: Union[np.ndarray, float] = 0,
track: Union[np.ndarray, float] = 0,
alpha: Union[np.ndarray, float] = 0,
beta: Union[np.ndarray, float] = 0,
bank: Union[np.ndarray, float] = 0,
):
# Initialize state variables
self.mass_props = MassProperties() if mass_props is None else mass_props
self.x_e = x_e
self.y_e = y_e
self.z_e = z_e
self.speed = speed
self.gamma = gamma
self.track = track
# Initialize indirect control variables
self.alpha = alpha
self.beta = beta
self.bank = bank
# Initialize control variables
self.Fx_w = 0
self.Fy_w = 0
self.Fz_w = 0
@property
def state(self) -> Dict[str, Union[float, np.ndarray]]:
return {
"x_e" : self.x_e,
"y_e" : self.y_e,
"z_e" : self.z_e,
"speed": self.speed,
"gamma": self.gamma,
"track": self.track,
}
@property
def control_variables(self) -> Dict[str, Union[float, np.ndarray]]:
return {
"alpha": self.alpha,
"beta" : self.beta,
"bank" : self.bank,
"Fx_w" : self.Fx_w,
"Fy_w" : self.Fy_w,
"Fz_w" : self.Fz_w,
}
def state_derivatives(self) -> Dict[str, Union[float, np.ndarray]]:
d_speed = self.Fx_w / self.mass_props.mass
sb = np.sin(self.bank)
cb = np.cos(self.bank)
force_gamma_direction = -cb * self.Fz_w - sb * self.Fy_w # Force in the direction that acts to increase gamma
force_track_direction = -sb * self.Fz_w + cb * self.Fy_w # Force in the direction that acts to increase track
d_gamma = force_gamma_direction / self.mass_props.mass / self.speed
d_track = force_track_direction / self.mass_props.mass / self.speed / np.cos(self.gamma)
return {
"x_e" : self.u_e,
"y_e" : self.v_e,
"z_e" : self.w_e,
"speed": d_speed,
"gamma": d_gamma,
"track": d_track,
}
@property
def u_e(self):
return self.speed * np.cos(self.gamma) * np.cos(self.track)
@property
def v_e(self):
return self.speed * np.cos(self.gamma) * np.sin(self.track)
@property
def w_e(self):
return -self.speed * np.sin(self.gamma)
def convert_axes(self,
x_from: float,
y_from: float,
z_from: float,
from_axes: str,
to_axes: str,
) -> Tuple[float, float, float]:
if from_axes == to_axes:
return x_from, y_from, z_from
if (from_axes == "earth" or to_axes == "earth"):
rot_w_to_e = np.rotation_matrix_from_euler_angles(
roll_angle=self.bank,
pitch_angle=self.gamma,
yaw_angle=self.track,
as_array=False
)
if from_axes == "wind":
x_w = x_from
y_w = y_from
z_w = z_from
elif from_axes == "earth":
x_w = rot_w_to_e[0][0] * x_from + rot_w_to_e[1][0] * y_from + rot_w_to_e[2][0] * z_from
y_w = rot_w_to_e[0][1] * x_from + rot_w_to_e[1][1] * y_from + rot_w_to_e[2][1] * z_from
z_w = rot_w_to_e[0][2] * x_from + rot_w_to_e[1][2] * y_from + rot_w_to_e[2][2] * z_from
else:
x_w, y_w, z_w = self.op_point.convert_axes(
x_from, y_from, z_from,
from_axes=from_axes, to_axes="wind"
)
if to_axes == "wind":
x_to = x_w
y_to = y_w
z_to = z_w
elif to_axes == "earth":
x_to = rot_w_to_e[0][0] * x_w + rot_w_to_e[0][1] * y_w + rot_w_to_e[0][2] * z_w
y_to = rot_w_to_e[1][0] * x_w + rot_w_to_e[1][1] * y_w + rot_w_to_e[1][2] * z_w
z_to = rot_w_to_e[2][0] * x_w + rot_w_to_e[2][1] * y_w + rot_w_to_e[2][2] * z_w
else:
x_to, y_to, z_to = self.op_point.convert_axes(
x_w, y_w, z_w,
from_axes="wind", to_axes=to_axes
)
return x_to, y_to, z_to
def add_force(self,
Fx: Union[np.ndarray, float] = 0,
Fy: Union[np.ndarray, float] = 0,
Fz: Union[np.ndarray, float] = 0,
axes="wind",
) -> None:
Fx_w, Fy_w, Fz_w = self.convert_axes(
x_from=Fx,
y_from=Fy,
z_from=Fz,
from_axes=axes,
to_axes="wind"
)
self.Fx_w = self.Fx_w + Fx_w
self.Fy_w = self.Fy_w + Fy_w
self.Fz_w = self.Fz_w + Fz_w
if __name__ == '__main__':
dyn = DynamicsPointMass3DSpeedGammaTrack()
|
StarcoderdataPython
|
3394957
|
<reponame>cajomferro/marine-robotics-pacific
#!/usr/bin/python
from pacific import ms5837
from dataclasses import dataclass
@dataclass
class Pressure:
sensor = None
BAR_CONST: float = 0.0689475729
def read(self):
"""
Read tempeature (ºC) and pressure (bar)
"""
# We have to read values from sensor to update pressure and temperature
if not self.sensor.read():
raise Exception("Pressure sensor read failed!")
pressure_bar = self.sensor.pressure(ms5837.UNITS_psi) * self.BAR_CONST
temp_celsius = self.sensor.temperature(ms5837.UNITS_Centigrade)
return pressure_bar, temp_celsius
def __post_init__(self):
self.sensor = ms5837.MS5837_30BA() # Default I2C bus is 1 (Raspberry Pi 3)
# We must initialize the sensor before reading it
if not self.sensor.init():
raise Exception("Pressure sensor could not be initialized")
|
StarcoderdataPython
|
3351760
|
class WrappaText:
def __init__(self, text):
self._text = text
@property
def text(self):
return self._text
|
StarcoderdataPython
|
1659708
|
#Импортируем нужные модули
import json
import requests
import time
import urllib
import logging
import signal
import sys
#Переменные для получение и отправка данные через TelegramApi и OpenWeatherApi
TOKEN = "<KEY>"
OWM_KEY = "<KEY>"
POLLING_TIMEOUT = None
#Функция для анализа обновлений из TelegramApi
def getText(update): return update["message"]["text"]
def getLocation(update): return update["message"]["location"]
def getChatId(update): return update["message"]["chat"]["id"]
def getUpId(update): return int(update["update_id"])
def getResult(updates): return updates["result"]
#Функция для анализа погодных откликов OpenWeatherApi
def getDesc(w): return w["weather"][0]["description"]
def getTemp(w): return w["main"]["temp"]
def getCity(w): return w["name"]
#присваем переменные для logger и установливаем статус debug
logger = logging.getLogger("weather-telegram")
logger.setLevel(logging.DEBUG)
#Города для запроса погоды, те города в меню бота
cities = ["Москва", "Королев", "Ташкент", "Коканд"]
def sigHandler(signal, frame):
logger.info("Сигнал входа Получен. Завершения скрипта... Пока-Пока")
sys.exit(0)
#Настройка ведения журнала файлов и консоли
def configLogging():
#Создадим регистратор журнала файлов и установит уровень для отладки DEBUG
#Режим = запись -> Очистка существующий файл журнала логов
handler = logging.FileHandler("run.log", mode="w")
handler.setLevel(logging.DEBUG)
formatter = logging.Formatter("%(asctime)s - %(name)s - %(levelname)s - %(message)s")
handler.setFormatter(formatter)
logger.addHandler(handler)
#Создайим обработчик консоли и установит уровень для отладки в INFO
ch = logging.StreamHandler()
ch.setLevel(logging.INFO)
formatter = logging.Formatter("[%(levelname)s] - %(message)s")
ch.setFormatter(formatter)
logger.addHandler(ch)
def parseConfig():
global URL, URL_OWM, POLLING_TIMEOUT
URL = "https://api.telegram.org/bot{}/".format(TOKEN)
URL_OWM = "http://api.openweathermap.org/data/2.5/weather?appid={}&units=metric&lang=ru".format(OWM_KEY)
POLLING_TIMEOUT
#Отправлаем запрос к Telegram-боту и получим ответ JSON формате и кодировка UTF-8
def makeRequest(url):
logger.debug("URL: %s" % url)
r = requests.get(url)
resp = json.loads(r.content.decode("utf8"))
return resp
#Вернем все обновления с идентификатором > смещение данные
#(Список обновлененный данные хранится в Telegram, в течение 24 часов)
def getUpdates(offset=None):
url = URL + "getUpdates?timeout=%s" % POLLING_TIMEOUT
logger.info("Получение обновлений")
if offset:
url += "&offset={}".format(offset)
js = makeRequest(url)
return js
#Создайте виртаулную кнопку-клавиша для экранных опций Telegram бота
def buildKeyboard(items):
keyboard = [[{"text":item}] for item in items]
replyKeyboard = {"keyboard":keyboard, "one_time_keyboard": True}
logger.debug(replyKeyboard)
return json.dumps(replyKeyboard)
def buildCitiesKeyboard():
keyboard = [[{"text": c}] for c in cities]
keyboard.append([{"text": "Поделиться местоположением", "request_location": True}])
replyKeyboard = {"keyboard": keyboard, "one_time_keyboard": True}
logger.debug(replyKeyboard)
return json.dumps(replyKeyboard)
# Отправляем запрос к OpenWeatherApi о погоде для места или координат полученный от пользвателя
def getWeather(place):
if isinstance(place, dict): #Предоставлены координаты
lat, lon = place["latitude"], place["longitude"]
url = URL_OWM + "&lat=%f&lon=%f&cnt=1" % (lat, lon)
logger.info("Запрашиваю погоду: " + url)
js = makeRequest(url)
logger.debug(js)
return u"%s \N{DEGREE SIGN}C, %s в городе %s" % (getTemp(js), getDesc(js), getCity(js))
else: #предоставленное название места - те города
#отправляем запрос
url = URL_OWM + "&q={}".format(place)
logger.info("Запрашиваю погоду: " + url)
js = makeRequest(url)
logger.debug(js)
return u"%s \N{DEGREE SIGN}C, %s в городе %s" % (getTemp(js), getDesc(js), getCity(js))
#Отправить сообщение на идентификатор чата пользвателя
def sendMessage(text, chatId, interface=None):
text = text.encode('utf-8', 'strict')
text = urllib.parse.quote_plus(text)
url = URL + "sendMessage?text={}&chat_id={}&parse_mode=Markdown".format(text, chatId)
if interface:
url += "&reply_markup={}".format(interface)
requests.get(url)
#Получить идентификатор последнего доступного обновления
def getLastUpdateId(updates):
ids = []
for update in getResult(updates):
ids.append(getUpId(update))
return max(ids)
# Следим за состояниями разговора диалога пользвателя: "Запрашиваю погоду"
chats = {}
#Ech-Callback функция для получение обратных сообщении
def handleUpdates(updates):
for update in getResult(updates):
chatId = getChatId(update)
try:
text = getText(update)
except Exception as e:
logger.error("На текстовом поле не указан местоположение для обновления данных.Попытайтемся узнать местоположение")
loc = getLocation(update)
#Была ли ранее запрошена погода?
if (chatId in chats) and (chats[chatId] == "weatherReq"):
logger.info("Погода, запрошенная для %s в чате id %d" % (str(loc), chatId))
#Отправить погоду в идентификатор чата пользвателя и очистить состояние данные
sendMessage(getWeather(loc), chatId)
del chats[chatId]
continue
if text == "/pogoda":
keyboard = buildCitiesKeyboard()
chats[chatId] = "weatherReq"
sendMessage("Выберите город:", chatId, keyboard)
elif text == "/start":
sendMessage("Аксиома Кана: Когда все остальное терпит неудачу, прочитайте инструкции", chatId)
elif text.startswith("/"):
logger.warning("Неверная команда %s" % text)
continue
elif (text in cities) and (chatId in chats) and (chats[chatId] == "weatherReq"):
logger.info("Weather requested for %s" % text)
# Send weather to chat id and clear state
sendMessage(getWeather(text), chatId)
del chats[chatId]
else:
keyboard = buildKeyboard(["/pogoda"])
sendMessage("Я каждый день узнаю что-то новое, но пока вы можете спросить меня о погоде.", chatId, keyboard)
def main():
#Настройка регистров файла лога и консолных данные
configLogging()
#Получаем токены и ключи данных Api
parseConfig()
# Intercept Ctrl-C SIGINT
signal.signal(signal.SIGINT, sigHandler)
#main-функция для цикла
last_update_id = None
while True:
updates = getUpdates(last_update_id)
if len(getResult(updates)) > 0:
last_update_id = getLastUpdateId(updates) + 1
handleUpdates(updates)
time.sleep(0.5)
if __name__ == "__main__":
main()
|
StarcoderdataPython
|
182070
|
from datetime import datetime, timezone
from roll import AbilityRoll, DamageRoll, DiceRoll, SimpleRoll, parse_roll_content
import re
# TODO: refactor classes into some kind of parent-child relationship once we've established a reasonable
# inheritance model
def parse_attack_name(content):
attack_name_patterns = (
'\{\{rname=\[(.+?)\]',
'\{\{rname=(.+?)\}\}'
)
# format the list of possible patterns as alternations, with each option enclosed in a non-capturing group
search_pattern = re.compile('(?:' + ')|(?:'.join(attack_name_patterns) + ')')
match = re.search(search_pattern, content)
groups = match.groups()
if match:
return next((x for x in groups if x is not None), None)
else:
return ''
def parse_ability_check_name(content):
ability_name_patterns = (
'\{\{rname=\^\{(.*?)\}\}\}',
'\{\{rname=(.+?)\}\}'
)
search_pattern = re.compile('(?:' + ')|(?:'.join(ability_name_patterns) + ')')
match = re.search(search_pattern, content)
groups = match.groups()
if match:
return next((x for x in groups if x is not None), None)
else:
return ''
class Message:
def __init__(self, player_name, avatar, timestamp, content):
self.player_name = player_name
self.avatar = avatar
# timestamps from the raw data are in milliseconds instead of seconds
self.timestamp = datetime.fromtimestamp(timestamp/1000, tz=timezone.utc)
self.content = content
def __repr__(self):
return 'Message(player_name=%r, avatar=%r, timestamp=%r, content=%r)' % (self.player_name,
self.avatar, self.timestamp, self.content)
def pretty_print(self):
return '(%s) %s: %s' % (self.timestamp.ctime(), self.player_name, self.content)
class RollMessage:
def __init__(self, player_name, avatar, timestamp, content, original_roll):
self.player_name = player_name
self.avatar = avatar
# timestamps from the raw data are in milliseconds instead of seconds
self.timestamp = datetime.fromtimestamp(timestamp/1000, tz=timezone.utc)
self.original_roll = original_roll
self.content = content
self.roll_result = parse_roll_content(original_roll, content)
def __repr__(self):
return 'RollMessage(player_name=%r, avatar=%r, timestamp=%r, content=%r)' % (
self.player_name, self.avatar, self.timestamp, self.content)
def pretty_print(self):
return '(%s) %s: %s' % (self.timestamp.ctime(), self.player_name,
self.roll_result.pretty_print())
class AttackRollMessage:
def __init__(self, player_name, avatar, timestamp, content, rolls):
self.player_name = player_name
self.avatar = avatar
# timestamps from the raw data are in milliseconds instead of seconds
self.timestamp = datetime.fromtimestamp(timestamp/1000, tz=timezone.utc)
self.content = content
self.rolls = rolls
self.roll_result = AbilityRoll(parse_attack_name(content),
[SimpleRoll(roll.get('expression'), roll['results'].get('total')) for roll in rolls])
def __repr__(self):
return 'AttackRollMessage(player_name=%r, avatar=%r, timestamp=%r, content=%r, rolls=%r)' % (
self.player_name, self.avatar, self.timestamp, self.content, self.rolls)
def pretty_print(self):
return '(%s) %s: %s' % (self.timestamp.ctime(), self.player_name,
self.roll_result.pretty_print())
class DamageRollMessage:
def __init__(self, player_name, avatar, timestamp, content, rolls):
self.player_name = player_name
self.avatar = avatar
# timestamps from the raw data are in milliseconds instead of seconds
self.timestamp = datetime.fromtimestamp(timestamp/1000, tz=timezone.utc)
self.content = content
self.rolls = rolls
self.roll_result = DamageRoll(parse_attack_name(content),
[SimpleRoll(roll.get('expression'), roll['results'].get('total')) for roll in rolls])
def __repr__(self):
return 'DamageRollMessage(player_name=%r, avatar=%r, timestamp=%r, content=%r, rolls=%r)' % (
self.player_name, self.avatar, self.timestamp, self.content, self.rolls)
def pretty_print(self):
return '(%s) %s: %s' % (self.timestamp.ctime(), self.player_name,
self.roll_result.pretty_print())
"""
Ability checks, like Intimidation, Persuation, Insight etc.
"""
class AbilityCheckMessage:
def __init__(self, player_name, avatar, timestamp, content, rolls):
self.player_name = player_name
self.avatar = avatar
# timestamps from the raw data are in milliseconds instead of seconds
self.timestamp = datetime.fromtimestamp(timestamp/1000, tz=timezone.utc)
self.content = content
self.roll_result = AbilityRoll(parse_ability_check_name(content),
[SimpleRoll(roll.get('expression'), roll['results'].get('total')) for roll in rolls])
def __repr__(self):
return 'AbilityCheckMessage(player_name=%r, avatar=%r, timestamp=%r, content=%r)' % (
self.player_name, self.avatar, self.timestamp, self.content)
def pretty_print(self):
return '(%s) %s rolled %s' % (self.timestamp.ctime(), self.player_name, self.roll_result.pretty_print())
"""
Spells and additional features like sentinel, battlemaster manoeuvres
"""
class AbilityMessage:
def __init__(self, player_name, avatar, timestamp, content):
self.player_name = player_name
self.avatar = avatar
# timestamps from the raw data are in milliseconds instead of seconds
self.timestamp = datetime.fromtimestamp(timestamp/1000, tz=timezone.utc)
self.content = content
ability_name_pattern = re.compile('\{\{name=(.*?)\}\}')
ability_name_match = re.search(ability_name_pattern, content)
self.ability_name = ability_name_match.group(1).strip() if ability_name_match else ''
def __repr__(self):
return 'AbilityMessage(player_name=%r, avatar=%r, timestamp=%r, content=%r)' % (
self.player_name, self.avatar, self.timestamp, self.content)
def pretty_print(self):
return '(%s) %s | %s' % (self.timestamp.ctime(), self.player_name, self.ability_name)
""" Parse rolls with the 'atkdmg' template, which combines
attack rolls and damage rolls into a single roll
"""
def parse_attackdamage_message(message):
# assume the first 2 rolls are attack rolls, followed by a bunch of damage rolls
attack_rolls = message['inlinerolls'][0:2]
# skip crits - roll20's crit roller usually fails to take extra damage like sneak attack and
# hunter's mark into account, so all the dice end up being rerolled manually anyway
damage_rolls = [roll for roll in message['inlinerolls'][2:] if \
'CRIT' not in roll['expression'] and roll['expression'] != '0']
attack_message = AttackRollMessage(message['who'], message['avatar'], message['.priority'],
message['content'], attack_rolls)
if len(damage_rolls) > 0:
return (attack_message, DamageRollMessage(message['who'], message['avatar'], message['.priority'],
message['content'], damage_rolls))
else:
return (attack_message, )
|
StarcoderdataPython
|
26634
|
import numpy as np
import pandas as pd
import seaborn as sns
from nninst.backend.tensorflow.model import AlexNet
from nninst.backend.tensorflow.trace.alexnet_imagenet_inter_class_similarity import (
alexnet_imagenet_inter_class_similarity_frequency,
)
from nninst.op import Conv2dOp, DenseOp
np.random.seed(0)
sns.set()
threshold = 0.5
frequency = int(500 * 0.1)
label = "import"
variant = None
base_name = f"alexnet_imagenet_inter_class_similarity_frequency_{frequency}"
cmap = "Greens"
same_class_similarity = []
diff_class_similarity = []
layer_names = []
layers = AlexNet.graph().load().ops_in_layers(Conv2dOp, DenseOp)
for layer_name in [
None,
*layers,
]:
similarity = alexnet_imagenet_inter_class_similarity_frequency(
threshold, frequency, label, variant=variant, layer_name=layer_name
).load()
same_class_similarity.append(
np.mean(similarity[np.eye(similarity.shape[0], dtype=bool)])
)
diff_class_similarity.append(
np.mean(
similarity[
np.tri(similarity.shape[0], similarity.shape[1], k=-1, dtype=bool)
]
)
)
if layer_name is None:
file_name = base_name
layer_names.append("All")
else:
file_name = base_name + "_" + layer_name[: layer_name.index("/")]
layer_names.append(layer_name[: layer_name.index("/")])
plot_array = np.around(similarity, decimals=2)
ax = sns.heatmap(plot_array, cmap=cmap, vmax=plot_array.max(), annot=True)
ax.set(xlabel="Class", ylabel="Class")
fig = ax.get_figure()
# fig.savefig(f"{file_name}.pdf", bbox_inches="tight")
fig.savefig(f"{file_name}.png", bbox_inches="tight")
# np.savetxt(f"{file_name}.csv", similarity, delimiter=",")
fig.clf()
for layer_name, similarity in zip(
["avg", "first_half", "second_half"],
[
np.mean(
[
alexnet_imagenet_inter_class_similarity_frequency(
threshold, frequency, label, variant=variant, layer_name=layer
).load()
for layer in layers
],
axis=0,
),
# np.mean([alexnet_imagenet_inter_class_similarity_frequency(
# threshold, frequency, label, variant=variant, layer_name=layer
# ).load()
# for layer in layers[:len(layers) // 2]], axis=0),
# np.mean([alexnet_imagenet_inter_class_similarity_frequency(
# threshold, frequency, label, variant=variant, layer_name=layer
# ).load()
# for layer in layers[len(layers) // 2:]], axis=0),
],
):
file_name = base_name + "_" + layer_name
plot_array = np.around(similarity, decimals=2)
ax = sns.heatmap(plot_array, cmap=cmap, vmax=plot_array.max(), annot=True)
ax.set(xlabel="Class", ylabel="Class")
fig = ax.get_figure()
# fig.savefig(f"{file_name}.pdf", bbox_inches="tight")
fig.savefig(f"{file_name}.png", bbox_inches="tight")
# np.savetxt(f"{file_name}.csv", similarity, delimiter=",")
fig.clf()
summary_df = pd.DataFrame(
{
"Same Class": same_class_similarity,
"Diff Class": diff_class_similarity,
"Layer": layer_names,
}
)
summary_df.to_csv(f"{base_name}_summary.csv", index=False)
|
StarcoderdataPython
|
52521
|
<reponame>gentildf/Python
#Faça um programa em Python que abra e reproduza o áudio de um arquivo MP3.
import pygame
print('\033[1mPlayer de música')
pygame.mixer.init()
pygame.mixer.music.load("desafio023.mp3")
pygame.mixer.music.play()
while(pygame.mixer.music.get_busy()):pass
|
StarcoderdataPython
|
4808504
|
from django.core.exceptions import PermissionDenied
from django.http.response import Http404, HttpResponseForbidden, HttpResponseNotAllowed
from rest_framework.response import Response
from rest_framework import status
from rest_framework import viewsets, generics
from rest_framework import permissions as rest_permissions
from rest_framework.authentication import SessionAuthentication
from . import models
from . import serializers
from . import api_permissions
class DebitLogUpdateView(generics.UpdateAPIView):
queryset = models.DebitLog.objects.all() #! filter by logged in user
lookup_url_kwarg = 'debitlog_pk'
serializer_class = serializers.DebitLogSerializer
authentication_classes = [SessionAuthentication, ]
permission_classes = [rest_permissions.IsAuthenticated, api_permissions.CheckLogModelsPermission]
class PaidLogViewSet(viewsets.ModelViewSet):
"""
Created a full Viewset 'cause it is useful in near future
or you can simply use the UpdateApiView.
"""
# * better approach
# inseted of custmer id as url param use debit id
# get the customer name as data and search for name in debit customer_set if not create new
# else create new debit
model = models.PaidLog
serializer_class = serializers.PaidLogSerializer
authentication_classes = [SessionAuthentication, ]
permission_classes = [api_permissions.CheckLogModelsPermission, rest_permissions.IsAuthenticated, ]
def get_queryset(self):
customer_pk = self.kwargs.get('customer_pk')
customer = models.Customer.objects.get(pk = customer_pk)
return customer.paidlog_set.all()
def create(self, request, *args, **kwargs):
try:
customer = models.Customer.objects.get(pk = kwargs['customer_pk'])
if customer.debit.user != request.user:
raise PermissionDenied
request.data.update({ 'customer': customer.id })
except models.Customer.DoesNotExist:
raise Http404
return super().create(request, *args, **kwargs)
def update(self, request, *args, **kwargs):
try:
customer = models.Customer.objects.get(pk = kwargs['customer_pk'])
if customer.debit.user != request.user:
raise PermissionDenied
request.data.update({ 'customer': customer.id })
except models.Customer.DoesNotExist:
raise Http404
return super().update(request, *args, **kwargs)
class SearchCustomerAPIView(generics.GenericAPIView):
model = models.Customer
serializer_class = serializers.CustomerSerializer
authentication_classes = [SessionAuthentication, ]
permission_classes = [rest_permissions.IsAuthenticated, api_permissions.CheckDebitPermission]
def get_debit(self, pk):
try:
return self.request.user.debit_set.get(pk = pk)
except models.Debit.DoesNotExist:
raise Http404()
def get(self, request, *args, **kwargs):
name = request.GET.get('name', None)
debit = self.get_debit(kwargs.get('debit_pk'))
if name:
queryset = self.model.objects.filter(name__startswith = name, debit = debit)[:10]
page = self.paginate_queryset(queryset)
if page is not None:
serializer = self.get_serializer(page, many=True)
return self.get_paginated_response(serializer.data)
serializer = self.get_serializer(queryset, many=True)
return Response(serializer.data)
return Response({'error': 'You need to pass name as params!'}, status = status.HTTP_400_BAD_REQUEST)
|
StarcoderdataPython
|
1757031
|
from nose.tools import set_trace
import logging
import sys
import os
import base64
import random
import uuid
import json
import re
import urllib
import urlparse
import flask
from flask import (
Response,
redirect,
)
from flask.ext.babel import lazy_gettext as _
from sqlalchemy.exc import ProgrammingError
from PIL import Image
from StringIO import StringIO
import feedparser
from Crypto.PublicKey import RSA
from Crypto.Cipher import PKCS1_OAEP
from core.model import (
create,
get_one,
get_one_or_create,
Admin,
CirculationEvent,
Classification,
Collection,
Complaint,
ConfigurationSetting,
DataSource,
Edition,
ExternalIntegration,
Genre,
Hold,
Hyperlink,
Identifier,
Library,
LicensePool,
Loan,
Patron,
PresentationCalculationPolicy,
Representation,
Subject,
Work,
WorkGenre,
)
from core.util.problem_detail import (
ProblemDetail,
JSON_MEDIA_TYPE as PROBLEM_DETAIL_JSON_MEDIA_TYPE,
)
from core.util.http import HTTP
from problem_details import *
from api.config import (
Configuration,
CannotLoadConfiguration
)
from google_oauth_admin_authentication_provider import GoogleOAuthAdminAuthenticationProvider
from password_admin_authentication_provider import PasswordAdminAuthenticationProvider
from api.controller import CirculationManagerController
from api.coverage import MetadataWranglerCoverageProvider
from core.app_server import entry_response
from core.app_server import (
entry_response,
feed_response,
load_pagination_from_request
)
from core.opds import AcquisitionFeed
from opds import AdminAnnotator, AdminFeed
from collections import Counter
from core.classifier import (
genres,
SimplifiedGenreClassifier,
NO_NUMBER,
NO_VALUE
)
from datetime import datetime, timedelta
from sqlalchemy.sql import func
from sqlalchemy.sql.expression import desc, nullslast, or_, and_, distinct, select, join
from sqlalchemy.orm import lazyload
from templates import admin as admin_template
from api.authenticator import AuthenticationProvider
from api.simple_authentication import SimpleAuthenticationProvider
from api.millenium_patron import MilleniumPatronAPI
from api.sip import SIP2AuthenticationProvider
from api.firstbook import FirstBookAuthenticationAPI
from api.clever import CleverAuthenticationAPI
from core.opds_import import OPDSImporter
from api.opds_for_distributors import OPDSForDistributorsAPI
from api.overdrive import OverdriveAPI
from api.bibliotheca import BibliothecaAPI
from api.axis import Axis360API
from api.oneclick import OneClickAPI
from api.enki import EnkiAPI
from api.nyt import NYTBestSellerAPI
from api.novelist import NoveListAPI
from core.opds_import import MetadataWranglerOPDSLookup
from api.google_analytics_provider import GoogleAnalyticsProvider
from core.local_analytics_provider import LocalAnalyticsProvider
from api.adobe_vendor_id import AuthdataUtility
from core.external_search import ExternalSearchIndex
def setup_admin_controllers(manager):
"""Set up all the controllers that will be used by the admin parts of the web app."""
if not manager.testing:
try:
manager.config = Configuration.load(manager._db)
except CannotLoadConfiguration, e:
logging.error("Could not load configuration file: %s", e)
sys.exit()
manager.admin_view_controller = ViewController(manager)
manager.admin_sign_in_controller = SignInController(manager)
manager.admin_work_controller = WorkController(manager)
manager.admin_feed_controller = FeedController(manager)
manager.admin_dashboard_controller = DashboardController(manager)
manager.admin_settings_controller = SettingsController(manager)
class AdminController(object):
def __init__(self, manager):
self.manager = manager
self._db = self.manager._db
self.url_for = self.manager.url_for
self.cdn_url_for = self.manager.cdn_url_for
@property
def auth(self):
auth_service = ExternalIntegration.admin_authentication(self._db)
if auth_service and auth_service.protocol == ExternalIntegration.GOOGLE_OAUTH:
return GoogleOAuthAdminAuthenticationProvider(
auth_service,
self.url_for('google_auth_callback'),
test_mode=self.manager.testing,
)
elif Admin.with_password(self._db).count() != 0:
return PasswordAdminAuthenticationProvider(
auth_service,
)
return None
def authenticated_admin_from_request(self):
"""Returns an authenticated admin or a problem detail."""
if not self.auth:
return ADMIN_AUTH_NOT_CONFIGURED
email = flask.session.get("admin_email")
if email:
admin = get_one(self._db, Admin, email=email)
if admin and self.auth.active_credentials(admin):
return admin
return INVALID_ADMIN_CREDENTIALS
def authenticated_admin(self, admin_details):
"""Creates or updates an admin with the given details"""
admin, ignore = get_one_or_create(
self._db, Admin, email=admin_details['email']
)
admin.update_credentials(
self._db,
credential=admin_details.get('credentials'),
)
# Set up the admin's flask session.
flask.session["admin_email"] = admin_details.get("email")
# A permanent session expires after a fixed time, rather than
# when the user closes the browser.
flask.session.permanent = True
# If this is the first time an admin has been authenticated,
# make sure there is a value set for the sitewide BASE_URL_KEY
# setting. If it's not set, set it to the hostname of the
# current request. This assumes the first authenticated admin
# is accessing the admin interface through the hostname they
# want to be used for the site itself.
base_url = ConfigurationSetting.sitewide(
self._db, Configuration.BASE_URL_KEY
)
if not base_url.value:
base_url.value = urlparse.urljoin(flask.request.url, '/')
return admin
def check_csrf_token(self):
"""Verifies that the CSRF token in the form data matches the one in the session."""
token = self.get_csrf_token()
if not token or token != flask.request.form.get("csrf_token"):
return INVALID_CSRF_TOKEN
return token
def get_csrf_token(self):
"""Returns the CSRF token for the current session."""
return flask.request.cookies.get("csrf_token")
def generate_csrf_token(self):
"""Generate a random CSRF token."""
return base64.b64encode(os.urandom(24))
class ViewController(AdminController):
def __call__(self, collection, book, path=None):
setting_up = (self.auth == None)
if not setting_up:
admin = self.authenticated_admin_from_request()
if isinstance(admin, ProblemDetail):
redirect_url = flask.request.url
if (collection):
quoted_collection = urllib.quote(collection)
redirect_url = redirect_url.replace(
quoted_collection,
quoted_collection.replace("/", "%2F"))
if (book):
quoted_book = urllib.quote(book)
redirect_url = redirect_url.replace(
quoted_book,
quoted_book.replace("/", "%2F"))
return redirect(self.url_for('admin_sign_in', redirect=redirect_url))
if not collection and not book and not path:
library = Library.default(self._db)
if library:
return redirect(self.url_for('admin_view', collection=library.short_name))
csrf_token = flask.request.cookies.get("csrf_token") or self.generate_csrf_token()
local_analytics = get_one(
self._db, ExternalIntegration,
protocol=LocalAnalyticsProvider.__module__,
goal=ExternalIntegration.ANALYTICS_GOAL)
show_circ_events_download = (local_analytics != None)
response = Response(flask.render_template_string(
admin_template,
csrf_token=csrf_token,
show_circ_events_download=show_circ_events_download,
setting_up=setting_up,
))
# The CSRF token is in its own cookie instead of the session cookie,
# because if your session expires and you log in again, you should
# be able to submit a form you already had open. The CSRF token lasts
# until the user closes the browser window.
response.set_cookie("csrf_token", csrf_token, httponly=True)
return response
class SignInController(AdminController):
ERROR_RESPONSE_TEMPLATE = """<!DOCTYPE HTML>
<html lang="en">
<head><meta charset="utf8"></head>
</body>
<p><strong>%(status_code)d ERROR:</strong> %(message)s</p>
</body>
</html>"""
PASSWORD_SIGN_IN_TEMPLATE = """<!DOCTYPE HTML>
<html lang="en">
<head><meta charset="utf8"></head>
</body>
<form action="%(password_sign_in_url)s" method="post">
<input type="hidden" name="redirect" value="%(redirect)s"/>
<label>Email <input type="text" name="email" /></label>
<label>Password <input type="password" name="password" /></label>
<button type="submit">Sign In</button>
</form>
</body>
</html>"""
def sign_in(self):
"""Redirects admin if they're signed in."""
if not self.auth:
return ADMIN_AUTH_NOT_CONFIGURED
admin = self.authenticated_admin_from_request()
if isinstance(admin, ProblemDetail):
redirect_url = flask.request.args.get("redirect")
return redirect(self.auth.auth_uri(redirect_url), Response=Response)
elif admin:
return redirect(flask.request.args.get("redirect"), Response=Response)
def redirect_after_google_sign_in(self):
"""Uses the Google OAuth client to determine admin details upon
callback. Barring error, redirects to the provided redirect url.."""
if not self.auth:
return ADMIN_AUTH_NOT_CONFIGURED
if not isinstance(self.auth, GoogleOAuthAdminAuthenticationProvider):
return ADMIN_AUTH_MECHANISM_NOT_CONFIGURED
admin_details, redirect_url = self.auth.callback(flask.request.args)
if isinstance(admin_details, ProblemDetail):
return self.error_response(admin_details)
if not self.staff_email(admin_details['email']):
return self.error_response(INVALID_ADMIN_CREDENTIALS)
else:
admin = self.authenticated_admin(admin_details)
return redirect(redirect_url, Response=Response)
def staff_email(self, email):
"""Checks the domain of an email address against the admin-authorized
domain"""
if not self.auth or not self.auth.domains:
return False
staff_domains = self.auth.domains
domain = email[email.index('@')+1:]
return domain.lower() in [staff_domain.lower() for staff_domain in staff_domains]
def password_sign_in(self):
if not self.auth:
return ADMIN_AUTH_NOT_CONFIGURED
if not isinstance(self.auth, PasswordAdminAuthenticationProvider):
return ADMIN_AUTH_MECHANISM_NOT_CONFIGURED
if flask.request.method == 'GET':
html = self.PASSWORD_SIGN_IN_TEMPLATE % dict(
password_sign_in_url=self.url_for("password_auth"),
redirect=flask.request.args.get("redirect"),
)
headers = dict()
headers['Content-Type'] = "text/html"
return Response(html, 200, headers)
admin_details, redirect_url = self.auth.sign_in(self._db, flask.request.form)
if isinstance(admin_details, ProblemDetail):
return self.error_response(INVALID_ADMIN_CREDENTIALS)
admin = self.authenticated_admin(admin_details)
return redirect(redirect_url, Response=Response)
def error_response(self, problem_detail):
"""Returns a problem detail as an HTML response"""
html = self.ERROR_RESPONSE_TEMPLATE % dict(
status_code=problem_detail.status_code,
message=problem_detail.detail
)
return Response(html, problem_detail.status_code)
class WorkController(CirculationManagerController):
STAFF_WEIGHT = 1
def details(self, identifier_type, identifier):
"""Return an OPDS entry with detailed information for admins.
This includes relevant links for editing the book.
"""
work = self.load_work(flask.request.library, identifier_type, identifier)
if isinstance(work, ProblemDetail):
return work
annotator = AdminAnnotator(self.circulation, flask.request.library)
return entry_response(
AcquisitionFeed.single_entry(self._db, work, annotator)
)
def complaints(self, identifier_type, identifier):
"""Return detailed complaint information for admins."""
work = self.load_work(flask.request.library, identifier_type, identifier)
if isinstance(work, ProblemDetail):
return work
counter = self._count_complaints_for_work(work)
response = dict({
"book": {
"identifier_type": identifier_type,
"identifier": identifier
},
"complaints": counter
})
return response
def edit(self, identifier_type, identifier):
"""Edit a work's metadata."""
work = self.load_work(flask.request.library, identifier_type, identifier)
if isinstance(work, ProblemDetail):
return work
changed = False
staff_data_source = DataSource.lookup(self._db, DataSource.LIBRARY_STAFF)
primary_identifier = work.presentation_edition.primary_identifier
staff_edition, is_new = get_one_or_create(
self._db, Edition,
primary_identifier_id=primary_identifier.id,
data_source_id=staff_data_source.id
)
self._db.expire(primary_identifier)
new_title = flask.request.form.get("title")
if new_title and work.title != new_title:
staff_edition.title = unicode(new_title)
changed = True
new_subtitle = flask.request.form.get("subtitle")
if work.subtitle != new_subtitle:
if work.subtitle and not new_subtitle:
new_subtitle = NO_VALUE
staff_edition.subtitle = unicode(new_subtitle)
changed = True
new_series = flask.request.form.get("series")
if work.series != new_series:
if work.series and not new_series:
new_series = NO_VALUE
staff_edition.series = unicode(new_series)
changed = True
new_series_position = flask.request.form.get("series_position")
if new_series_position:
try:
new_series_position = int(new_series_position)
except ValueError:
return INVALID_SERIES_POSITION
else:
new_series_position = None
if work.series_position != new_series_position:
if work.series_position and not new_series_position:
new_series_position = NO_NUMBER
staff_edition.series_position = new_series_position
changed = True
new_summary = flask.request.form.get("summary") or ""
if new_summary != work.summary_text:
old_summary = None
if work.summary and work.summary.data_source == staff_data_source:
old_summary = work.summary
work.presentation_edition.primary_identifier.add_link(
Hyperlink.DESCRIPTION, None,
staff_data_source, content=new_summary)
# Delete previous staff summary
if old_summary:
for link in old_summary.links:
self._db.delete(link)
self._db.delete(old_summary)
changed = True
if changed:
# Even if the presentation doesn't visibly change, we want
# to regenerate the OPDS entries and update the search
# index for the work, because that might be the 'real'
# problem the user is trying to fix.
policy = PresentationCalculationPolicy(
classify=True,
regenerate_opds_entries=True,
update_search_index=True,
choose_summary=True
)
work.calculate_presentation(policy=policy)
return Response("", 200)
def suppress(self, identifier_type, identifier):
"""Suppress the license pool associated with a book."""
# Turn source + identifier into a LicensePool
pools = self.load_licensepools(flask.request.library, identifier_type, identifier)
if isinstance(pools, ProblemDetail):
# Something went wrong.
return pools
# Assume that the Work is being suppressed from the catalog, and
# not just the LicensePool.
# TODO: Suppress individual LicensePools when it's not that deep.
for pool in pools:
pool.suppressed = True
return Response("", 200)
def unsuppress(self, identifier_type, identifier):
"""Unsuppress all license pools associated with a book.
TODO: This will need to be revisited when we distinguish
between complaints about a work and complaints about a
LicensePoool.
"""
# Turn source + identifier into a group of LicensePools
pools = self.load_licensepools(flask.request.library, identifier_type, identifier)
if isinstance(pools, ProblemDetail):
# Something went wrong.
return pools
# Unsuppress each pool.
for pool in pools:
pool.suppressed = False
return Response("", 200)
def refresh_metadata(self, identifier_type, identifier, provider=None):
"""Refresh the metadata for a book from the content server"""
if not provider:
provider = MetadataWranglerCoverageProvider(self._db)
work = self.load_work(flask.request.library, identifier_type, identifier)
if isinstance(work, ProblemDetail):
return work
identifier = work.presentation_edition.primary_identifier
try:
record = provider.ensure_coverage(identifier, force=True)
except Exception:
# The coverage provider may raise an HTTPIntegrationException.
return REMOTE_INTEGRATION_FAILED
if record.exception:
# There was a coverage failure.
if (isinstance(record.exception, int)
and record.exception in [201, 202]):
# A 201/202 error means it's never looked up this work before
# so it's started the resolution process or looking for sources.
return METADATA_REFRESH_PENDING
# Otherwise, it just doesn't know anything.
return METADATA_REFRESH_FAILURE
return Response("", 200)
def resolve_complaints(self, identifier_type, identifier):
"""Resolve all complaints for a particular license pool and complaint type."""
work = self.load_work(flask.request.library, identifier_type, identifier)
if isinstance(work, ProblemDetail):
return work
resolved = False
found = False
requested_type = flask.request.form.get("type")
if requested_type:
for complaint in work.complaints:
if complaint.type == requested_type:
found = True
if complaint.resolved == None:
complaint.resolve()
resolved = True
if not found:
return UNRECOGNIZED_COMPLAINT
elif not resolved:
return COMPLAINT_ALREADY_RESOLVED
return Response("", 200)
def classifications(self, identifier_type, identifier):
"""Return list of this work's classifications."""
work = self.load_work(flask.request.library, identifier_type, identifier)
if isinstance(work, ProblemDetail):
return work
identifier_id = work.presentation_edition.primary_identifier.id
results = self._db \
.query(Classification) \
.join(Subject) \
.join(DataSource) \
.filter(Classification.identifier_id == identifier_id) \
.order_by(Classification.weight.desc()) \
.all()
data = []
for result in results:
data.append(dict({
"type": result.subject.type,
"name": result.subject.identifier,
"source": result.data_source.name,
"weight": result.weight
}))
return dict({
"book": {
"identifier_type": identifier_type,
"identifier": identifier
},
"classifications": data
})
def edit_classifications(self, identifier_type, identifier):
"""Edit a work's audience, target age, fiction status, and genres."""
work = self.load_work(flask.request.library, identifier_type, identifier)
if isinstance(work, ProblemDetail):
return work
staff_data_source = DataSource.lookup(self._db, DataSource.LIBRARY_STAFF)
# Previous staff classifications
primary_identifier = work.presentation_edition.primary_identifier
old_classifications = self._db \
.query(Classification) \
.join(Subject) \
.filter(
Classification.identifier == primary_identifier,
Classification.data_source == staff_data_source
)
old_genre_classifications = old_classifications \
.filter(Subject.genre_id != None)
old_staff_genres = [
c.subject.genre.name
for c in old_genre_classifications
if c.subject.genre
]
old_computed_genres = [
work_genre.genre.name
for work_genre in work.work_genres
]
# New genres should be compared to previously computed genres
new_genres = flask.request.form.getlist("genres")
genres_changed = sorted(new_genres) != sorted(old_computed_genres)
# Update audience
new_audience = flask.request.form.get("audience")
if new_audience != work.audience:
# Delete all previous staff audience classifications
for c in old_classifications:
if c.subject.type == Subject.FREEFORM_AUDIENCE:
self._db.delete(c)
# Create a new classification with a high weight
primary_identifier.classify(
data_source=staff_data_source,
subject_type=Subject.FREEFORM_AUDIENCE,
subject_identifier=new_audience,
weight=WorkController.STAFF_WEIGHT,
)
# Update target age if present
new_target_age_min = flask.request.form.get("target_age_min")
new_target_age_min = int(new_target_age_min) if new_target_age_min else None
new_target_age_max = flask.request.form.get("target_age_max")
new_target_age_max = int(new_target_age_max) if new_target_age_max else None
if new_target_age_max < new_target_age_min:
return INVALID_EDIT.detailed(_("Minimum target age must be less than maximum target age."))
if work.target_age:
old_target_age_min = work.target_age.lower
old_target_age_max = work.target_age.upper
else:
old_target_age_min = None
old_target_age_max = None
if new_target_age_min != old_target_age_min or new_target_age_max != old_target_age_max:
# Delete all previous staff target age classifications
for c in old_classifications:
if c.subject.type == Subject.AGE_RANGE:
self._db.delete(c)
# Create a new classification with a high weight - higher than audience
if new_target_age_min and new_target_age_max:
age_range_identifier = "%s-%s" % (new_target_age_min, new_target_age_max)
primary_identifier.classify(
data_source=staff_data_source,
subject_type=Subject.AGE_RANGE,
subject_identifier=age_range_identifier,
weight=WorkController.STAFF_WEIGHT * 100,
)
# Update fiction status
# If fiction status hasn't changed but genres have changed,
# we still want to ensure that there's a staff classification
new_fiction = True if flask.request.form.get("fiction") == "fiction" else False
if new_fiction != work.fiction or genres_changed:
# Delete previous staff fiction classifications
for c in old_classifications:
if c.subject.type == Subject.SIMPLIFIED_FICTION_STATUS:
self._db.delete(c)
# Create a new classification with a high weight (higher than genre)
fiction_term = "Fiction" if new_fiction else "Nonfiction"
classification = primary_identifier.classify(
data_source=staff_data_source,
subject_type=Subject.SIMPLIFIED_FICTION_STATUS,
subject_identifier=fiction_term,
weight=WorkController.STAFF_WEIGHT,
)
classification.subject.fiction = new_fiction
# Update genres
# make sure all new genres are legit
for name in new_genres:
genre, is_new = Genre.lookup(self._db, name)
if not isinstance(genre, Genre):
return GENRE_NOT_FOUND
if genres[name].is_fiction != new_fiction:
return INCOMPATIBLE_GENRE
if name == "Erotica" and new_audience != "Adults Only":
return EROTICA_FOR_ADULTS_ONLY
if genres_changed:
# delete existing staff classifications for genres that aren't being kept
for c in old_genre_classifications:
if c.subject.genre.name not in new_genres:
self._db.delete(c)
# add new staff classifications for new genres
for genre in new_genres:
if genre not in old_staff_genres:
classification = primary_identifier.classify(
data_source=staff_data_source,
subject_type=Subject.SIMPLIFIED_GENRE,
subject_identifier=genre,
weight=WorkController.STAFF_WEIGHT
)
# add NONE genre classification if we aren't keeping any genres
if len(new_genres) == 0:
primary_identifier.classify(
data_source=staff_data_source,
subject_type=Subject.SIMPLIFIED_GENRE,
subject_identifier=SimplifiedGenreClassifier.NONE,
weight=WorkController.STAFF_WEIGHT
)
else:
# otherwise delete existing NONE genre classification
none_classifications = self._db \
.query(Classification) \
.join(Subject) \
.filter(
Classification.identifier == primary_identifier,
Subject.identifier == SimplifiedGenreClassifier.NONE
) \
.all()
for c in none_classifications:
self._db.delete(c)
# Update presentation
policy = PresentationCalculationPolicy(
classify=True,
regenerate_opds_entries=True,
update_search_index=True
)
work.calculate_presentation(policy=policy)
return Response("", 200)
def _count_complaints_for_work(self, work):
complaint_types = [complaint.type for complaint in work.complaints]
return Counter(complaint_types)
class FeedController(CirculationManagerController):
def complaints(self):
this_url = self.url_for('complaints')
annotator = AdminAnnotator(self.circulation, flask.request.library)
pagination = load_pagination_from_request()
if isinstance(pagination, ProblemDetail):
return pagination
opds_feed = AdminFeed.complaints(
library=flask.request.library, title="Complaints",
url=this_url, annotator=annotator,
pagination=pagination
)
return feed_response(opds_feed)
def suppressed(self):
this_url = self.url_for('suppressed')
annotator = AdminAnnotator(self.circulation, flask.request.library)
pagination = load_pagination_from_request()
if isinstance(pagination, ProblemDetail):
return pagination
opds_feed = AdminFeed.suppressed(
_db=self._db, title="Hidden Books",
url=this_url, annotator=annotator,
pagination=pagination
)
return feed_response(opds_feed)
def genres(self):
data = dict({
"Fiction": dict({}),
"Nonfiction": dict({})
})
for name in genres:
top = "Fiction" if genres[name].is_fiction else "Nonfiction"
data[top][name] = dict({
"name": name,
"parents": [parent.name for parent in genres[name].parents],
"subgenres": [subgenre.name for subgenre in genres[name].subgenres]
})
return data
class DashboardController(CirculationManagerController):
def stats(self):
patron_count = self._db.query(Patron).count()
active_loans_patron_count = self._db.query(
distinct(Patron.id)
).join(
Patron.loans
).filter(
Loan.end >= datetime.now(),
).count()
active_patrons = select(
[Patron.id]
).select_from(
join(
Loan,
Patron,
and_(
Patron.id == Loan.patron_id,
Loan.id != None,
Loan.end >= datetime.now()
)
)
).union(
select(
[Patron.id]
).select_from(
join(
Hold,
Patron,
Patron.id == Hold.patron_id
)
)
).alias()
active_loans_or_holds_patron_count_query = select(
[func.count(distinct(active_patrons.c.id))]
).select_from(
active_patrons
)
result = self._db.execute(active_loans_or_holds_patron_count_query)
active_loans_or_holds_patron_count = [r[0] for r in result][0]
loan_count = self._db.query(
Loan
).filter(
Loan.end >= datetime.now()
).count()
hold_count = self._db.query(Hold).count()
data_sources = dict(
overdrive=DataSource.OVERDRIVE,
bibliotheca=DataSource.BIBLIOTHECA,
axis360=DataSource.AXIS_360,
)
vendor_counts = dict()
for key, data_source in data_sources.iteritems():
data_source_count = self._db.query(
LicensePool
).join(
DataSource
).filter(
LicensePool.licenses_owned > 0
).filter(
DataSource.name == data_source
).count()
if data_source_count > 0:
vendor_counts[key] = data_source_count
open_access_count = self._db.query(
LicensePool
).filter(
LicensePool.open_access == True
).count()
if open_access_count > 0:
vendor_counts['open_access'] = open_access_count
title_count = self._db.query(LicensePool).count()
# The sum queries return None instead of 0 if there are
# no license pools in the db.
license_count = self._db.query(
func.sum(LicensePool.licenses_owned)
).filter(
LicensePool.open_access == False,
).all()[0][0] or 0
available_license_count = self._db.query(
func.sum(LicensePool.licenses_available)
).filter(
LicensePool.open_access == False,
).all()[0][0] or 0
return dict(
patrons=dict(
total=patron_count,
with_active_loans=active_loans_patron_count,
with_active_loans_or_holds=active_loans_or_holds_patron_count,
loans=loan_count,
holds=hold_count,
),
inventory=dict(
titles=title_count,
licenses=license_count,
available_licenses=available_license_count,
),
vendors=vendor_counts,
)
def circulation_events(self):
annotator = AdminAnnotator(self.circulation, flask.request.library)
num = min(int(flask.request.args.get("num", "100")), 500)
results = self._db.query(CirculationEvent) \
.join(LicensePool) \
.join(Work) \
.join(DataSource) \
.join(Identifier) \
.order_by(nullslast(desc(CirculationEvent.start))) \
.limit(num) \
.all()
events = map(lambda result: {
"id": result.id,
"type": result.type,
"patron_id": result.foreign_patron_id,
"time": result.start,
"book": {
"title": result.license_pool.work.title,
"url": annotator.permalink_for(result.license_pool.work, result.license_pool, result.license_pool.identifier)
}
}, results)
return dict({ "circulation_events": events })
def bulk_circulation_events(self):
default = str(datetime.today()).split(" ")[0]
date = flask.request.args.get("date", default)
next_date = datetime.strptime(date, "%Y-%m-%d") + timedelta(days=1)
query = self._db.query(
CirculationEvent, Identifier, Work, Edition
) \
.join(LicensePool, LicensePool.id == CirculationEvent.license_pool_id) \
.join(Identifier, Identifier.id == LicensePool.identifier_id) \
.join(Work, Work.id == LicensePool.work_id) \
.join(Edition, Edition.id == Work.presentation_edition_id) \
.filter(CirculationEvent.start >= date) \
.filter(CirculationEvent.start < next_date) \
.order_by(CirculationEvent.start.asc())
query = query \
.options(lazyload(Identifier.licensed_through)) \
.options(lazyload(Work.license_pools))
results = query.all()
work_ids = map(lambda result: result[2].id, results)
subquery = self._db \
.query(WorkGenre.work_id, Genre.name) \
.join(Genre) \
.filter(WorkGenre.work_id.in_(work_ids)) \
.order_by(WorkGenre.affinity.desc()) \
.subquery()
genre_query = self._db \
.query(subquery.c.work_id, func.string_agg(subquery.c.name, ",")) \
.select_from(subquery) \
.group_by(subquery.c.work_id)
genres = dict(genre_query.all())
header = [
"time", "event", "identifier", "identifier_type", "title", "author",
"fiction", "audience", "publisher", "language", "target_age", "genres"
]
def result_to_row(result):
(event, identifier, work, edition) = result
return [
str(event.start) or "",
event.type,
identifier.identifier,
identifier.type,
edition.title,
edition.author,
"fiction" if work.fiction else "nonfiction",
work.audience,
edition.publisher,
edition.language,
work.target_age_string,
genres.get(work.id)
]
return [header] + map(result_to_row, results), date
class SettingsController(CirculationManagerController):
METADATA_SERVICE_URI_TYPE = 'application/opds+json;profile=https://librarysimplified.org/rel/profile/metadata-service'
def libraries(self):
if flask.request.method == 'GET':
libraries = []
for library in self._db.query(Library).order_by(Library.name):
settings = dict()
for setting in Configuration.LIBRARY_SETTINGS:
if setting.get("type") == "list":
value = ConfigurationSetting.for_library(setting.get("key"), library).json_value
else:
value = ConfigurationSetting.for_library(setting.get("key"), library).value
if value:
settings[setting.get("key")] = value
libraries += [dict(
uuid=library.uuid,
name=library.name,
short_name=library.short_name,
settings=settings,
)]
return dict(libraries=libraries, settings=Configuration.LIBRARY_SETTINGS)
library_uuid = flask.request.form.get("uuid")
name = flask.request.form.get("name")
short_name = flask.request.form.get("short_name")
library = None
is_new = False
if not short_name:
return MISSING_LIBRARY_SHORT_NAME
if library_uuid:
# Library UUID is required when editing an existing library
# from the admin interface, and isn't present for new libraries.
library = get_one(
self._db, Library, uuid=library_uuid,
)
if not library:
return LIBRARY_NOT_FOUND.detailed(_("The specified library uuid does not exist."))
if not library or short_name != library.short_name:
# If you're adding a new short_name, either by editing an
# existing library or creating a new library, it must be unique.
library_with_short_name = get_one(self._db, Library, short_name=short_name)
if library_with_short_name:
return LIBRARY_SHORT_NAME_ALREADY_IN_USE
if not library:
library, is_new = create(
self._db, Library, short_name=short_name,
uuid=str(uuid.uuid4()))
if name:
library.name = name
if short_name:
library.short_name = short_name
NO_VALUE = object()
for setting in Configuration.LIBRARY_SETTINGS:
# Start off by assuming the value is not set.
value = NO_VALUE
if setting.get("type") == "list":
if setting.get('options'):
# Restrict to the values in 'options'.
value = []
for option in setting.get("options"):
if setting["key"] + "_" + option["key"] in flask.request.form:
value += [option["key"]]
else:
# Allow any entered values.
value = [item for item in flask.request.form.getlist(setting.get('key')) if item]
value = json.dumps(value)
elif setting.get("type") == "image":
image_file = flask.request.files.get(setting.get("key"))
if not image_file and not setting.get("optional"):
self._db.rollback()
return INCOMPLETE_CONFIGURATION.detailed(_(
"The library is missing a required setting: %s." % setting.get("key")))
if image_file:
allowed_types = [Representation.JPEG_MEDIA_TYPE, Representation.PNG_MEDIA_TYPE, Representation.GIF_MEDIA_TYPE]
type = image_file.headers.get("Content-Type")
if type not in allowed_types:
self._db.rollback()
return INVALID_CONFIGURATION_OPTION.detailed(_(
"Upload for %(setting)s must be in GIF, PNG, or JPG format. (Upload was %(format)s.)",
setting=setting.get("label"),
format=type))
image = Image.open(image_file)
width, height = image.size
if width > 135 or height > 135:
image.thumbnail((135, 135), Image.ANTIALIAS)
buffer = StringIO()
image.save(buffer, format="PNG")
b64 = base64.b64encode(buffer.getvalue())
value = "data:image/png;base64,%s" % b64
else:
value = flask.request.form.get(setting['key'], None)
if value != NO_VALUE:
ConfigurationSetting.for_library(setting['key'], library).value = value
if is_new:
return Response(unicode(_("Success")), 201)
else:
return Response(unicode(_("Success")), 200)
def _get_integration_protocols(self, provider_apis, protocol_name_attr="__module__"):
protocols = []
for api in provider_apis:
protocol = dict()
name = getattr(api, protocol_name_attr)
protocol["name"] = name
label = getattr(api, "NAME", name)
protocol["label"] = label
description = getattr(api, "DESCRIPTION", None)
if description != None:
protocol["description"] = description
sitewide = getattr(api, "SITEWIDE", None)
if sitewide != None:
protocol["sitewide"] = sitewide
settings = getattr(api, "SETTINGS", [])
protocol["settings"] = settings
child_settings = getattr(api, "CHILD_SETTINGS", None)
if child_settings != None:
protocol["child_settings"] = child_settings
library_settings = getattr(api, "LIBRARY_SETTINGS", None)
if library_settings != None:
protocol["library_settings"] = library_settings
protocols.append(protocol)
return protocols
def _get_integration_info(self, goal, protocols):
services = []
for service in self._db.query(ExternalIntegration).filter(
ExternalIntegration.goal==goal):
[protocol] = [p for p in protocols if p.get("name") == service.protocol]
libraries = []
if not protocol.get("sitewide"):
for library in service.libraries:
library_info = dict(short_name=library.short_name)
for setting in protocol.get("library_settings", []):
key = setting.get("key")
if setting.get("type") == "list":
value = ConfigurationSetting.for_library_and_externalintegration(
self._db, key, library, service
).json_value
else:
value = ConfigurationSetting.for_library_and_externalintegration(
self._db, key, library, service
).value
if value:
library_info[key] = value
libraries.append(library_info)
settings = dict()
for setting in protocol.get("settings", []):
key = setting.get("key")
if setting.get("type") == "list":
value = ConfigurationSetting.for_externalintegration(
key, service).json_value
else:
value = ConfigurationSetting.for_externalintegration(
key, service).value
settings[key] = value
services.append(
dict(
id=service.id,
name=service.name,
protocol=service.protocol,
settings=settings,
libraries=libraries,
)
)
return services
def _set_integration_settings_and_libraries(self, integration, protocol):
settings = protocol.get("settings")
for setting in settings:
key = setting.get("key")
if setting.get("type") == "list" and not setting.get("options"):
value = [item for item in flask.request.form.getlist(key) if item]
if value:
value = json.dumps(value)
else:
value = flask.request.form.get(key)
if value and setting.get("options"):
# This setting can only take on values that are in its
# list of options.
allowed = [option.get("key") for option in setting.get("options")]
if value not in allowed:
self._db.rollback()
return INVALID_CONFIGURATION_OPTION.detailed(_(
"The configuration value for %(setting)s is invalid.",
setting=setting.get("label"),
))
if not value and not setting.get("optional"):
# Roll back any changes to the integration that have already been made.
self._db.rollback()
return INCOMPLETE_CONFIGURATION.detailed(
_("The configuration is missing a required setting: %(setting)s",
setting=setting.get("label")))
integration.setting(key).value = value
if not protocol.get("sitewide"):
integration.libraries = []
libraries = []
if flask.request.form.get("libraries"):
libraries = json.loads(flask.request.form.get("libraries"))
for library_info in libraries:
library = get_one(self._db, Library, short_name=library_info.get("short_name"))
if not library:
self._db.rollback()
return NO_SUCH_LIBRARY.detailed(_("You attempted to add the integration to %(library_short_name)s, but it does not exist.", library_short_name=library_info.get("short_name")))
integration.libraries += [library]
for setting in protocol.get("library_settings", []):
key = setting.get("key")
value = library_info.get(key)
if setting.get("options") and value not in [option.get("key") for option in setting.get("options")]:
self._db.rollback()
return INVALID_CONFIGURATION_OPTION.detailed(_(
"The configuration value for %(setting)s is invalid.",
setting=setting.get("label"),
))
if not value and not setting.get("optional"):
self._db.rollback()
return INCOMPLETE_CONFIGURATION.detailed(
_("The configuration is missing a required setting: %(setting)s for library %(library)s",
setting=setting.get("label"),
library=library.short_name,
))
ConfigurationSetting.for_library_and_externalintegration(self._db, key, library, integration).value = value
return True
def collections(self):
provider_apis = [OPDSImporter,
OPDSForDistributorsAPI,
OverdriveAPI,
BibliothecaAPI,
Axis360API,
OneClickAPI,
EnkiAPI,
]
protocols = self._get_integration_protocols(provider_apis, protocol_name_attr="NAME")
if flask.request.method == 'GET':
collections = []
for c in self._db.query(Collection).order_by(Collection.name).all():
collection = dict(
id=c.id,
name=c.name,
protocol=c.protocol,
parent_id=c.parent_id,
libraries=[{ "short_name": library.short_name } for library in c.libraries],
settings=dict(external_account_id=c.external_account_id),
)
if c.protocol in [p.get("name") for p in protocols]:
[protocol] = [p for p in protocols if p.get("name") == c.protocol]
for setting in protocol.get("settings"):
key = setting.get("key")
if key not in collection["settings"]:
if setting.get("type") == "list":
value = c.external_integration.setting(key).json_value
else:
value = c.external_integration.setting(key).value
collection["settings"][key] = value
collections.append(collection)
return dict(
collections=collections,
protocols=protocols,
)
id = flask.request.form.get("id")
name = flask.request.form.get("name")
if not name:
return MISSING_COLLECTION_NAME
protocol = flask.request.form.get("protocol")
if protocol and protocol not in [p.get("name") for p in protocols]:
return UNKNOWN_PROTOCOL
is_new = False
collection = None
if id:
collection = get_one(self._db, Collection, id=id)
if not collection:
return MISSING_COLLECTION
if collection:
if protocol != collection.protocol:
return CANNOT_CHANGE_PROTOCOL
if name != collection.name:
collection_with_name = get_one(self._db, Collection, name=name)
if collection_with_name:
return COLLECTION_NAME_ALREADY_IN_USE
else:
if protocol:
collection, is_new = get_one_or_create(self._db, Collection, name=name)
if not is_new:
self._db.rollback()
return COLLECTION_NAME_ALREADY_IN_USE
collection.create_external_integration(protocol)
else:
return NO_PROTOCOL_FOR_NEW_SERVICE
[protocol] = [p for p in protocols if p.get("name") == protocol]
parent_id = flask.request.form.get("parent_id")
if parent_id and not protocol.get("child_settings"):
self._db.rollback()
return PROTOCOL_DOES_NOT_SUPPORT_PARENTS
if parent_id:
parent = get_one(self._db, Collection, id=parent_id)
if not parent:
self._db.rollback()
return MISSING_PARENT
collection.parent = parent
settings = protocol.get("child_settings")
else:
collection.parent = None
settings = protocol.get("settings")
for setting in settings:
key = setting.get("key")
value = flask.request.form.get(key)
if not value and not setting.get("optional"):
# Roll back any changes to the collection that have already been made.
self._db.rollback()
return INCOMPLETE_CONFIGURATION.detailed(
_("The collection configuration is missing a required setting: %(setting)s",
setting=setting.get("label")))
if key == "external_account_id":
collection.external_account_id = value
else:
collection.external_integration.setting(key).value = value
libraries = []
if flask.request.form.get("libraries"):
libraries = json.loads(flask.request.form.get("libraries"))
for library_info in libraries:
library = get_one(self._db, Library, short_name=library_info.get("short_name"))
if not library:
return NO_SUCH_LIBRARY.detailed(_("You attempted to add the collection to %(library_short_name)s, but it does not exist.", library_short_name=library_info.get("short_name")))
if collection not in library.collections:
library.collections.append(collection)
for library in collection.libraries:
if library.short_name not in [l.get("short_name") for l in libraries]:
library.collections.remove(collection)
if is_new:
return Response(unicode(_("Success")), 201)
else:
return Response(unicode(_("Success")), 200)
def admin_auth_services(self):
provider_apis = [GoogleOAuthAdminAuthenticationProvider]
protocols = self._get_integration_protocols(provider_apis, protocol_name_attr="NAME")
if flask.request.method == 'GET':
auth_services = self._get_integration_info(ExternalIntegration.ADMIN_AUTH_GOAL, protocols)
return dict(
admin_auth_services=auth_services,
protocols=protocols,
)
protocol = flask.request.form.get("protocol")
if protocol and protocol not in ExternalIntegration.ADMIN_AUTH_PROTOCOLS:
return UNKNOWN_PROTOCOL
id = flask.request.form.get("id")
is_new = False
auth_service = ExternalIntegration.admin_authentication(self._db)
if auth_service:
if id and int(id) != auth_service.id:
return MISSING_SERVICE
if protocol != auth_service.protocol:
return CANNOT_CHANGE_PROTOCOL
else:
if id:
return MISSING_SERVICE
if protocol:
auth_service, is_new = get_one_or_create(
self._db, ExternalIntegration, protocol=protocol,
goal=ExternalIntegration.ADMIN_AUTH_GOAL
)
else:
return NO_PROTOCOL_FOR_NEW_SERVICE
name = flask.request.form.get("name")
auth_service.name = name
[protocol] = [p for p in protocols if p.get("name") == protocol]
result = self._set_integration_settings_and_libraries(auth_service, protocol)
if isinstance(result, ProblemDetail):
return result
if is_new:
return Response(unicode(_("Success")), 201)
else:
return Response(unicode(_("Success")), 200)
def individual_admins(self):
if flask.request.method == 'GET':
admins = []
admins_with_password = Admin.with_password(self._db)
if admins_with_password.count() != 0:
admins=[dict(email=admin.email) for admin in admins_with_password]
return dict(
individualAdmins=admins,
)
email = flask.request.form.get("email")
password = flask.request.form.get("password")
if not email or not password:
return INCOMPLETE_CONFIGURATION
admin, is_new = get_one_or_create(self._db, Admin, email=email)
admin.password = password
try:
self._db.flush()
except ProgrammingError as e:
self._db.rollback()
return MISSING_PGCRYPTO_EXTENSION
if is_new:
return Response(unicode(_("Success")), 201)
else:
return Response(unicode(_("Success")), 200)
def patron_auth_services(self):
provider_apis = [SimpleAuthenticationProvider,
MilleniumPatronAPI,
SIP2AuthenticationProvider,
FirstBookAuthenticationAPI,
CleverAuthenticationAPI,
]
protocols = self._get_integration_protocols(provider_apis)
basic_auth_protocols = [SimpleAuthenticationProvider.__module__,
MilleniumPatronAPI.__module__,
SIP2AuthenticationProvider.__module__,
FirstBookAuthenticationAPI.__module__,
]
if flask.request.method == 'GET':
services = self._get_integration_info(ExternalIntegration.PATRON_AUTH_GOAL, protocols)
return dict(
patron_auth_services=services,
protocols=protocols,
)
id = flask.request.form.get("id")
protocol = flask.request.form.get("protocol")
if protocol and protocol not in [p.get("name") for p in protocols]:
return UNKNOWN_PROTOCOL
is_new = False
if id:
auth_service = get_one(self._db, ExternalIntegration, id=id, goal=ExternalIntegration.PATRON_AUTH_GOAL)
if not auth_service:
return MISSING_SERVICE
if protocol != auth_service.protocol:
return CANNOT_CHANGE_PROTOCOL
else:
if protocol:
auth_service, is_new = create(
self._db, ExternalIntegration, protocol=protocol,
goal=ExternalIntegration.PATRON_AUTH_GOAL
)
else:
return NO_PROTOCOL_FOR_NEW_SERVICE
name = flask.request.form.get("name")
if name:
if auth_service.name != name:
service_with_name = get_one(self._db, ExternalIntegration, name=name)
if service_with_name:
self._db.rollback()
return INTEGRATION_NAME_ALREADY_IN_USE
auth_service.name = name
[protocol] = [p for p in protocols if p.get("name") == protocol]
result = self._set_integration_settings_and_libraries(auth_service, protocol)
if isinstance(result, ProblemDetail):
return result
for library in auth_service.libraries:
# Check that the library didn't end up with multiple basic auth services.
basic_auth_count = 0
for integration in library.integrations:
if integration.goal == ExternalIntegration.PATRON_AUTH_GOAL and integration.protocol in basic_auth_protocols:
basic_auth_count += 1
if basic_auth_count > 1:
self._db.rollback()
return MULTIPLE_BASIC_AUTH_SERVICES.detailed(_(
"You tried to add a patron authentication service that uses basic auth to %(library)s, but it already has one.",
library=library.short_name,
))
# Check that the library's external type regular express is valid, if it was set.
value = ConfigurationSetting.for_library_and_externalintegration(
self._db, AuthenticationProvider.EXTERNAL_TYPE_REGULAR_EXPRESSION,
library, auth_service).value
if value:
try:
re.compile(value)
except Exception, e:
self._db.rollback()
return INVALID_EXTERNAL_TYPE_REGULAR_EXPRESSION
if is_new:
return Response(unicode(_("Success")), 201)
else:
return Response(unicode(_("Success")), 200)
def sitewide_settings(self):
if flask.request.method == 'GET':
settings = []
for s in Configuration.SITEWIDE_SETTINGS:
setting = ConfigurationSetting.sitewide(self._db, s.get("key"))
if setting.value:
settings += [{ "key": setting.key, "value": setting.value }]
return dict(
settings=settings,
all_settings=Configuration.SITEWIDE_SETTINGS,
)
key = flask.request.form.get("key")
if not key:
return MISSING_SITEWIDE_SETTING_KEY
value = flask.request.form.get("value")
if not value:
return MISSING_SITEWIDE_SETTING_VALUE
setting = ConfigurationSetting.sitewide(self._db, key)
setting.value = value
return Response(unicode(_("Success")), 200)
def metadata_services(
self, do_get=HTTP.debuggable_get, do_post=HTTP.debuggable_post,
key=None
):
provider_apis = [NYTBestSellerAPI,
NoveListAPI,
MetadataWranglerOPDSLookup,
]
protocols = self._get_integration_protocols(provider_apis, protocol_name_attr="PROTOCOL")
if flask.request.method == 'GET':
metadata_services = self._get_integration_info(ExternalIntegration.METADATA_GOAL, protocols)
return dict(
metadata_services=metadata_services,
protocols=protocols,
)
id = flask.request.form.get("id")
protocol = flask.request.form.get("protocol")
if protocol and protocol not in [p.get("name") for p in protocols]:
return UNKNOWN_PROTOCOL
is_new = False
if id:
service = get_one(self._db, ExternalIntegration, id=id, goal=ExternalIntegration.METADATA_GOAL)
if not service:
return MISSING_SERVICE
if protocol != service.protocol:
return CANNOT_CHANGE_PROTOCOL
else:
if protocol:
service, is_new = create(
self._db, ExternalIntegration, protocol=protocol,
goal=ExternalIntegration.METADATA_GOAL
)
else:
return NO_PROTOCOL_FOR_NEW_SERVICE
name = flask.request.form.get("name")
if name:
if service.name != name:
service_with_name = get_one(self._db, ExternalIntegration, name=name)
if service_with_name:
self._db.rollback()
return INTEGRATION_NAME_ALREADY_IN_USE
service.name = name
[protocol] = [p for p in protocols if p.get("name") == protocol]
result = self._set_integration_settings_and_libraries(service, protocol)
if isinstance(result, ProblemDetail):
return result
# Register this site with the Metadata Wrangler.
if ((is_new or not service.password) and
service.protocol == ExternalIntegration.METADATA_WRANGLER):
problem_detail = self.sitewide_registration(
service, do_get=do_get, do_post=do_post, key=key
)
if problem_detail:
self._db.rollback()
return problem_detail
if is_new:
return Response(unicode(_("Success")), 201)
else:
return Response(unicode(_("Success")), 200)
def sitewide_registration(self, integration, do_get=HTTP.debuggable_get,
do_post=HTTP.debuggable_post, key=None
):
"""Performs a sitewide registration for a particular service, currently
only the Metadata Wrangler.
:return: A ProblemDetail or, if successful, None
"""
if not integration:
return MISSING_SERVICE
# Get the catalog for this service.
try:
response = do_get(integration.url)
except Exception as e:
return REMOTE_INTEGRATION_FAILED.detailed(e.message)
if isinstance(response, ProblemDetail):
return response
content_type = response.headers.get('Content-Type')
if content_type != 'application/opds+json':
return REMOTE_INTEGRATION_FAILED.detailed(
_('The service did not provide a valid catalog.')
)
catalog = response.json()
links = catalog.get('links', [])
# Get the link for registration from the catalog.
register_link_filter = lambda l: (
l.get('rel')=='register' and
l.get('type')==self.METADATA_SERVICE_URI_TYPE
)
register_urls = filter(register_link_filter, links)
if not register_urls:
return REMOTE_INTEGRATION_FAILED.detailed(
_('The service did not provide a register link.')
)
# Get the full registration url.
register_url = register_urls[0].get('href')
if not register_url.startswith('http'):
# We have a relative path. Create a full registration url.
base_url = catalog.get('id')
register_url = urlparse.urljoin(base_url, register_url)
# Generate a public key for this website.
if not key:
key = RSA.generate(2048)
encryptor = PKCS1_OAEP.new(key)
public_key = key.publickey().exportKey()
# Save the public key to the database before generating the public key document.
public_key_setting = ConfigurationSetting.sitewide(self._db, Configuration.PUBLIC_KEY)
public_key_setting.value = public_key
self._db.commit()
# If the integration has an existing shared_secret, use it to access the
# server and update it.
headers = { 'Content-Type' : 'application/x-www-form-urlencoded' }
if integration.password:
token = base64.b64encode(integration.password.encode('utf-8'))
headers['Authorization'] = 'Bearer ' + token
# Get the public key document URL and register this server.
try:
public_key_url = self.url_for('public_key_document')
response = do_post(
register_url, dict(url=public_key_url),
allowed_response_codes=['2xx'], headers=headers
)
except Exception as e:
public_key_setting.value = None
return REMOTE_INTEGRATION_FAILED.detailed(e.message)
registration_info = response.json()
shared_secret = registration_info.get('metadata', {}).get('shared_secret')
if not shared_secret:
public_key_setting.value = None
return REMOTE_INTEGRATION_FAILED.detailed(
_('The service did not provide registration information.')
)
public_key_setting.value = None
shared_secret = encryptor.decrypt(base64.b64decode(shared_secret))
integration.password = unicode(shared_secret)
def analytics_services(self):
provider_apis = [GoogleAnalyticsProvider,
LocalAnalyticsProvider,
]
protocols = self._get_integration_protocols(provider_apis)
if flask.request.method == 'GET':
services = self._get_integration_info(ExternalIntegration.ANALYTICS_GOAL, protocols)
return dict(
analytics_services=services,
protocols=protocols,
)
id = flask.request.form.get("id")
protocol = flask.request.form.get("protocol")
if protocol and protocol not in [p.get("name") for p in protocols]:
return UNKNOWN_PROTOCOL
is_new = False
if id:
service = get_one(self._db, ExternalIntegration, id=id, goal=ExternalIntegration.ANALYTICS_GOAL)
if not service:
return MISSING_SERVICE
if protocol != service.protocol:
return CANNOT_CHANGE_PROTOCOL
else:
if protocol:
service, is_new = create(
self._db, ExternalIntegration, protocol=protocol,
goal=ExternalIntegration.ANALYTICS_GOAL
)
else:
return NO_PROTOCOL_FOR_NEW_SERVICE
name = flask.request.form.get("name")
if name:
if service.name != name:
service_with_name = get_one(self._db, ExternalIntegration, name=name)
if service_with_name:
self._db.rollback()
return INTEGRATION_NAME_ALREADY_IN_USE
service.name = name
[protocol] = [p for p in protocols if p.get("name") == protocol]
result = self._set_integration_settings_and_libraries(service, protocol)
if isinstance(result, ProblemDetail):
return result
if is_new:
return Response(unicode(_("Success")), 201)
else:
return Response(unicode(_("Success")), 200)
def cdn_services(self):
protocols = [
{
"name": ExternalIntegration.CDN,
"sitewide": True,
"settings": [
{ "key": ExternalIntegration.URL, "label": _("CDN URL") },
{ "key": Configuration.CDN_MIRRORED_DOMAIN_KEY, "label": _("Mirrored domain") },
],
}
]
if flask.request.method == 'GET':
services = self._get_integration_info(ExternalIntegration.CDN_GOAL, protocols)
return dict(
cdn_services=services,
protocols=protocols,
)
id = flask.request.form.get("id")
protocol = flask.request.form.get("protocol")
if protocol and protocol not in [p.get("name") for p in protocols]:
return UNKNOWN_PROTOCOL
is_new = False
if id:
service = get_one(self._db, ExternalIntegration, id=id, goal=ExternalIntegration.CDN_GOAL)
if not service:
return MISSING_SERVICE
if protocol != service.protocol:
return CANNOT_CHANGE_PROTOCOL
else:
if protocol:
service, is_new = create(
self._db, ExternalIntegration, protocol=protocol,
goal=ExternalIntegration.CDN_GOAL
)
else:
return NO_PROTOCOL_FOR_NEW_SERVICE
name = flask.request.form.get("name")
if name:
if service.name != name:
service_with_name = get_one(self._db, ExternalIntegration, name=name)
if service_with_name:
self._db.rollback()
return INTEGRATION_NAME_ALREADY_IN_USE
service.name = name
[protocol] = [p for p in protocols if p.get("name") == protocol]
result = self._set_integration_settings_and_libraries(service, protocol)
if isinstance(result, ProblemDetail):
return result
if is_new:
return Response(unicode(_("Success")), 201)
else:
return Response(unicode(_("Success")), 200)
def search_services(self):
provider_apis = [ExternalSearchIndex,
]
protocols = self._get_integration_protocols(provider_apis, protocol_name_attr="NAME")
if flask.request.method == 'GET':
services = self._get_integration_info(ExternalIntegration.SEARCH_GOAL, protocols)
return dict(
search_services=services,
protocols=protocols,
)
id = flask.request.form.get("id")
protocol = flask.request.form.get("protocol")
if protocol and protocol not in [p.get("name") for p in protocols]:
return UNKNOWN_PROTOCOL
is_new = False
if id:
service = get_one(self._db, ExternalIntegration, id=id, goal=ExternalIntegration.SEARCH_GOAL)
if not service:
return MISSING_SERVICE
if protocol != service.protocol:
return CANNOT_CHANGE_PROTOCOL
else:
if protocol:
service, is_new = get_one_or_create(
self._db, ExternalIntegration, protocol=protocol,
goal=ExternalIntegration.SEARCH_GOAL
)
if not is_new:
self._db.rollback()
return MULTIPLE_SEARCH_SERVICES
else:
return NO_PROTOCOL_FOR_NEW_SERVICE
name = flask.request.form.get("name")
if name:
if service.name != name:
service_with_name = get_one(self._db, ExternalIntegration, name=name)
if service_with_name:
self._db.rollback()
return INTEGRATION_NAME_ALREADY_IN_USE
service.name = name
[protocol] = [p for p in protocols if p.get("name") == protocol]
result = self._set_integration_settings_and_libraries(service, protocol)
if isinstance(result, ProblemDetail):
return result
if is_new:
return Response(unicode(_("Success")), 201)
else:
return Response(unicode(_("Success")), 200)
def discovery_services(self):
protocols = [
{
"name": ExternalIntegration.OPDS_REGISTRATION,
"sitewide": True,
"settings": [
{ "key": ExternalIntegration.URL, "label": _("URL") },
],
}
]
if flask.request.method == 'GET':
registries = self._db.query(ExternalIntegration).filter(ExternalIntegration.goal==ExternalIntegration.DISCOVERY_GOAL)
if registries.count() == 0:
# Set up the default library registry if one doesn't exist yet.
default, ignore = get_one_or_create(
self._db, ExternalIntegration,
goal=ExternalIntegration.DISCOVERY_GOAL,
protocol=ExternalIntegration.OPDS_REGISTRATION,
name="Library Simplified Registry")
default.url = "https://libraryregistry.librarysimplified.org"
services = self._get_integration_info(ExternalIntegration.DISCOVERY_GOAL, protocols)
return dict(
discovery_services=services,
protocols=protocols,
)
id = flask.request.form.get("id")
protocol = flask.request.form.get("protocol")
if protocol and protocol not in [p.get("name") for p in protocols]:
return UNKNOWN_PROTOCOL
is_new = False
if id:
service = get_one(self._db, ExternalIntegration, id=id, goal=ExternalIntegration.DISCOVERY_GOAL)
if not service:
return MISSING_SERVICE
if protocol != service.protocol:
return CANNOT_CHANGE_PROTOCOL
else:
if protocol:
service, is_new = create(
self._db, ExternalIntegration, protocol=protocol,
goal=ExternalIntegration.DISCOVERY_GOAL
)
else:
return NO_PROTOCOL_FOR_NEW_SERVICE
name = flask.request.form.get("name")
if name:
if service.name != name:
service_with_name = get_one(self._db, ExternalIntegration, name=name)
if service_with_name:
self._db.rollback()
return INTEGRATION_NAME_ALREADY_IN_USE
service.name = name
[protocol] = [p for p in protocols if p.get("name") == protocol]
result = self._set_integration_settings_and_libraries(service, protocol)
if isinstance(result, ProblemDetail):
return result
if is_new:
return Response(unicode(_("Success")), 201)
else:
return Response(unicode(_("Success")), 200)
def library_registrations(self, do_get=HTTP.debuggable_get,
do_post=HTTP.debuggable_post, key=None):
LIBRARY_REGISTRATION_STATUS = u"library-registration-status"
SUCCESS = u"success"
FAILURE = u"failure"
if flask.request.method == "GET":
services = []
for service in self._db.query(ExternalIntegration).filter(
ExternalIntegration.goal==ExternalIntegration.DISCOVERY_GOAL):
libraries = []
for library in service.libraries:
library_info = dict(short_name=library.short_name)
status = ConfigurationSetting.for_library_and_externalintegration(
self._db, LIBRARY_REGISTRATION_STATUS, library, service).value
if status:
library_info["status"] = status
libraries.append(library_info)
services.append(
dict(
id=service.id,
libraries=libraries,
)
)
return dict(library_registrations=services)
if flask.request.method == "POST":
integration_id = flask.request.form.get("integration_id")
library_short_name = flask.request.form.get("library_short_name")
integration = get_one(self._db, ExternalIntegration,
goal=ExternalIntegration.DISCOVERY_GOAL,
id=integration_id)
if not integration:
return MISSING_SERVICE
library = get_one(self._db, Library, short_name=library_short_name)
if not library:
return NO_SUCH_LIBRARY
integration.libraries += [library]
status = ConfigurationSetting.for_library_and_externalintegration(
self._db, LIBRARY_REGISTRATION_STATUS, library, integration)
status.value = FAILURE
response = do_get(integration.url)
if isinstance(response, ProblemDetail):
return response
type = response.headers.get("Content-Type")
if type == 'application/opds+json':
# This is an OPDS 2 catalog.
catalog = json.loads(response.content)
links = catalog.get("links", [])
vendor_id = catalog.get("metadata", {}).get("adobe_vendor_id")
elif type and type.startswith("application/atom+xml;profile=opds-catalog"):
# This is an OPDS 1 feed.
feed = feedparser.parse(response.content)
links = feed.get("feed", {}).get("links", [])
vendor_id = None
else:
return REMOTE_INTEGRATION_FAILED.detailed(_("The discovery service did not return OPDS."))
register_url = None
for link in links:
if link.get("rel") == "register":
register_url = link.get("href")
break
if not register_url:
return REMOTE_INTEGRATION_FAILED.detailed(_("The discovery service did not provide a register link."))
# Store the vendor id as a ConfigurationSetting on the registry.
if vendor_id:
ConfigurationSetting.for_externalintegration(
AuthdataUtility.VENDOR_ID_KEY, integration).value = vendor_id
# Generate a public key for the library.
if not key:
key = RSA.generate(2048)
public_key = key.publickey().exportKey()
encryptor = PKCS1_OAEP.new(key)
ConfigurationSetting.for_library(Configuration.PUBLIC_KEY, library).value = public_key
# Commit so the public key will be there when the registry gets the
# OPDS Authentication document.
self._db.commit()
auth_document_url = self.url_for(
"authentication_document",
library_short_name=library.short_name
)
response = do_post(
register_url, dict(url=auth_document_url), timeout=60
)
if isinstance(response, ProblemDetail):
return response
catalog = json.loads(response.content)
# Since we generated a public key, the catalog should have the short name
# and shared secret for Short Client Tokens.
short_name = catalog.get("metadata", {}).get("short_name")
shared_secret = catalog.get("metadata", {}).get("shared_secret")
if short_name and shared_secret:
shared_secret = self._decrypt_shared_secret(encryptor, shared_secret)
if isinstance(shared_secret, ProblemDetail):
return shared_secret
ConfigurationSetting.for_library_and_externalintegration(
self._db, ExternalIntegration.USERNAME, library, integration
).value = short_name
ConfigurationSetting.for_library_and_externalintegration(
self._db, ExternalIntegration.PASSWORD, library, integration
).value = shared_secret
integration.libraries += [library]
# We're done with the key, so remove the setting.
ConfigurationSetting.for_library(Configuration.PUBLIC_KEY, library).value = None
status.value = SUCCESS
return Response(unicode(_("Success")), 200)
def _decrypt_shared_secret(self, encryptor, shared_secret):
"""Attempt to decrypt an encrypted shared secret.
:return: The decrypted shared secret, or a ProblemDetail if
it could not be decrypted.
"""
try:
shared_secret = encryptor.decrypt(base64.b64decode(shared_secret))
except ValueError, e:
return SHARED_SECRET_DECRYPTION_ERROR.detailed(
_("Could not decrypt shared secret %s") % shared_secret
)
return shared_secret
|
StarcoderdataPython
|
3276818
|
<reponame>mikepyne/RotaGenerator
import pytest
from unittest.mock import Mock
from spiders.loew import LiturgyOffice
@pytest.fixture()
def lo():
return LiturgyOffice()
class TestMass_init(object):
def test_build_url(self, lo):
assert lo._build_url(2017, 7) == 'https://www.liturgyoffice.org.uk/Calendar/2017/Jul.shtml'
def test_build_url_bad_month(self, lo):
with pytest.raises(ValueError):
lo._build_url(2017, 13) == ''
def test_build_summary(self, lo):
assert lo._build_summary(2017, 7) == 'Liturgical Calendar for July 2017'
def test_build_summary_bad_month(self, lo):
with pytest.raises(ValueError):
lo._build_summary(2017, 13)
|
StarcoderdataPython
|
105912
|
<reponame>MontyThibault/centre-of-mass-awareness<gh_stars>0
'''OpenGL extension NV.vertex_program1_1
This module customises the behaviour of the
OpenGL.raw.GL.NV.vertex_program1_1 to provide a more
Python-friendly API
'''
from OpenGL import platform, constants, constant, arrays
from OpenGL import extensions, wrapper
from OpenGL.GL import glget
import ctypes
from OpenGL.raw.GL.NV.vertex_program1_1 import *
### END AUTOGENERATED SECTION
|
StarcoderdataPython
|
176165
|
from django.apps import AppConfig
class TemplatesConfig(AppConfig):
default_auto_field = 'django.db.models.BigAutoField'
name = 'templates'
|
StarcoderdataPython
|
1640225
|
<reponame>melvinkcx/graphql-core-next
from graphql.language import Source
def describe_source():
def can_be_stringified():
source = Source("")
assert str(source) == "<Source name='GraphQL request'>"
source = Source("", "Custom source name")
assert str(source) == "<Source name='Custom source name'>"
|
StarcoderdataPython
|
1602508
|
<filename>examples/benchmark/bench_modem.py
# Copyright: (c) 2021, <NAME>
"""
Benchmark the SDR and measure BER
See README.md for instructions on how to run
Parameters can be edited in main. Remember to match the config file used for the SDR
"""
import numpy as np
import asyncio
import zmq
import zmq.asyncio
zmq.asyncio.install() # pyzmq < 17 needs this
import logging
import time, sys, os
from create_signals import *
OUT_DIR = 'bench_logs/'
nDemodulators = 1 # can be set higher if multiple demodulators shall vote
BASEPORT = 5560
sockets_addr = [f'tcp://*:{BASEPORT + i}' for i in range(nDemodulators)]
class SendSignal():
chunkSize = int(2**14) # just want it to be smaller than the block size in the modem to mimic GNU radio behaviour
modemIn = "tcp://127.0.0.1:5512"
def __init__(self,sig,repeats,rate, bitData,SNR):
"""
signal,
number of loops
sample rate
"""
self.sig = sig.astype(np.complex64)
self.repeats = repeats
self.rate = rate
self.bitData = bitData
self.SNR = SNR
self.delayTime = 1/rate*self.chunkSize # seconds
self.isRunning = True
self.tasks = asyncio.gather(self.sendToModem(),self.receiveFromModem())
async def sendToModem(self):
ctx = zmq.asyncio.Context()
sockets = []
for a in sockets_addr:
sock = ctx.socket(zmq.PUB)
sock.bind(a)
sockets.append(sock)
time.sleep(2) # wait for receive socket to be up
N = len(self.sig)
Nloops = N//self.chunkSize
preLenBlocks = 5
log.debug('sending pre data')
async def sendBuffer():
for r in range(preLenBlocks):
for s in sockets:
dat = np.sqrt(0.1)*np.random.randn(self.chunkSize).astype(np.complex64)
await s.send(dat)
await asyncio.sleep(self.delayTime)
await sendBuffer()
for r in range(self.repeats):
log.debug(f'repeat {r}')
sigs = []
for i in range(len(sockets)):
sigs.append(awgn(self.sig,self.SNR).astype(np.complex64))
for n in range(Nloops):
idx = slice(n*self.chunkSize,(n+1)*self.chunkSize)
for i,s in enumerate(sockets):
await s.send(sigs[i][idx])
await asyncio.sleep(self.delayTime)
if N > Nloops*self.chunkSize:
for i,s in enumerate(sockets):
await s.send(sigs[i][Nloops*self.chunkSize:])
time.sleep(self.delayTime)
log.debug('sending end data')
await sendBuffer()
# log.info('sendToModem finished -- sleep before shutting down')
await asyncio.sleep(2) # wait before shutting down receiver
self.isRunning = False
for s in sockets:
s.close()
log.info('sendToModem done')
async def receiveFromModem(self):
ctx = zmq.asyncio.Context()
socket = ctx.socket(zmq.PULL)
socket.connect(self.modemIn)
poller = zmq.asyncio.Poller()
poller.register(socket,zmq.POLLIN)
timeOut = 3
self.pktCNT = 0
self.bitErrors = []
while self.isRunning:
s = await poller.poll(timeOut)
if len(s) > 0 and s[0][1] == zmq.POLLIN:
dataRaw = await socket.recv(zmq.NOBLOCK)
data = np.frombuffer(dataRaw,np.int8)
self.pktCNT += 1
bitErrorsT = len(np.where(data != self.bitData)[0])
self.bitErrors.append(bitErrorsT)
log.info(f'received packet number {self.pktCNT}\tbit errors {bitErrorsT}\tBER {bitErrorsT/len(self.bitData)}')
if self.pktCNT > 0:
BER = np.mean(np.array(self.bitErrors)/len(self.bitData))
else:
BER = -1
socket.close()
log.info(f'receiveFromModem done -- received {self.pktCNT}/{self.repeats} packets\tavg. BER {BER}')
def print_help():
helpstr = """
benchmark_sdr.py modscheme N SNR_low SNR_high SNR_step
where:
modscheme is the modulation sheme (FSK, GMSK, BPSK, GFSK)
N is the number of simulations to run
SNR_low is the lowest test SNR
SNR_high is the highest test SNR
SNR_step is the step size of the SNRs to loop through
"""
print(helpstr)
sys.exit(-1)
if __name__ == "__main__":
if not len(sys.argv) == 6:
print_help()
modulation = sys.argv[1]
nRuns = int(sys.argv[2])
SNR_low = float(sys.argv[3])
SNR_high = float(sys.argv[4])
SNR_inc = float(sys.argv[5])
if not modulation in ['GMSK','FSK','BPSK','GFSK']: # invalid modulation exit
print_help()
if not os.path.exists(OUT_DIR):
os.mkdir(OUT_DIR)
logName = '{}/{}_{}'.format(OUT_DIR,time.strftime("%Y_%m_%d_%H_%M", time.gmtime(time.time())), f'_bench_{modulation}.log')
saveName = '{}/{}_{}'.format(OUT_DIR,time.strftime("%Y_%m_%d_%H_%M", time.gmtime(time.time())), f'_bench_results_{modulation}')
FORMAT = '%(asctime)-16s %(message)s'
log = logging.getLogger()
logging.Formatter.converter = time.gmtime
logFormatter = logging.Formatter(FORMAT, "%Y-%m-%d %H:%M:%S")
consoleHandler = logging.StreamHandler(sys.stdout)
log.addHandler(consoleHandler)
fileHandler = logging.FileHandler(logName)
consoleHandler.setFormatter(logFormatter)
fileHandler.setFormatter(logFormatter)
log.addHandler(fileHandler)
log.setLevel(logging.DEBUG)
log.info(f'Benchmarking modulation scheme {modulation} over {nRuns} tests')
spSym = 16
baud = 9600
fs = spSym*baud
fsSim = fs * 10//nDemodulators # simulation speedup Can be set as high as the SDR can keep up
bw = baud/0.7
bw_gmsk = baud/0.7
bw_bpsk = baud*1.5 # rrcos with beta = 0.5
fsk_delta_f = baud/2
bw_fsk = 2*baud + 2*fsk_delta_f # is this correct for EBN0 though? Only one frequency contains the power at one time
bw_gfsk2 = 2*baud + 2*fsk_delta_f # is this correct for EBN0 though? Only one frequency contains the power at one time
sig,bitData = get_padded_packet(modulation,spSym,fs)
# SNRs = range(15,16,1) # for quick test
SNRs = np.arange(SNR_low,SNR_high+SNR_inc,SNR_inc)
numPackets = []
bitErrors = []
BER = []
EBN0 = []
loop = asyncio.get_event_loop()
for snr in SNRs:
log.info(f'Running bench with SNR {snr} dB')
if modulation == 'GMSK':
SNR_r = snr + 10*np.log10(bw_gmsk/fs) # for generating AWGN, the bandwidth and oversampling rate need to be taken into account
bw = bw_gmsk
elif modulation == 'FSK':
SNR_r = snr + 10*np.log10(bw_fsk/fs) # for generating AWGN, the bandwidth and oversampling rate need to be taken into account
bw = bw_fsk
elif modulation == 'GFSK':
SNR_r = snr + 10*np.log10(bw_fsk/fs) # for generating AWGN, the bandwidth and oversampling rate need to be taken into account
bw = bw_fsk
elif modulation == 'BPSK':
SNR_r = snr + 10*np.log10(bw_bpsk/fs) # for generating AWGN, the bandwidth and oversampling rate need to be taken into account
bw = bw_bpsk
else:
print_help() # not recognized
sigOut = SendSignal(sig,nRuns, fsSim ,bitData,SNR_r)
loop.run_until_complete(sigOut.tasks)
numPackets.append(sigOut.pktCNT)
bitErrors.append(sigOut.bitErrors.copy())
EBN0.append(snr+10*np.log10(bw/baud))
if len(bitErrors[-1]) > 0:
# mBER = np.median(np.array(bitErrors[-1]))
# sBER = np.std(np.array(bitErrors[-1]))
# BERval = np.array(bitErrors[-1])
# valBER = BERval < np.max((mBER+0.05*sBER, 50))
# log.info(f'mean BER {mBER} std BER {sBER} threshold {mBER+sBER}')
BER.append(np.mean(np.array(bitErrors[-1])/len(bitData)))
# if np.sum(valBER) > 0 :
# BER.append(np.mean(BERval[valBER])/len(bitData))
log.info(f'Corrected BER {BER[-1]}')
else:
BER.append(1)
del sigOut
loop.close()
for S,E,r,B in zip(SNRs,EBN0,numPackets,BER):
log.info(f'SNR {S} dB:\tEB/N0 {E:.2f} dB\tpackets {r}\tavg. BER {B} ')
np.savez(saveName,
SNR = SNRs,
EBN0 = EBN0,
bitErrors = bitErrors,
numPackets = numPackets,
BER = BER,
nRuns = nRuns,
bitData = bitData,
lenBitData = len(bitData),
fs = fs,
baud = baud)
|
StarcoderdataPython
|
1656845
|
<reponame>technolingo/AlgoStructuresPy
'''
Write a program that console prints the numbers
from 1 to n. But for multiples of three print
“fizz” instead of the number and for the multiples
of five print “buzz”. For numbers which are multiples
of both three and five print “fizzbuzz”.
--- Example
fizzBuzz(5);
1
2
fizz
4
buzz
'''
def fizzbuzz(n: int):
if n <= 0:
print('Input must be greater than zero.')
else:
for i in range(1, n + 1):
if i % 3 == 0 and i % 5 == 0:
print('fizzbuzz')
elif i % 3 == 0:
print('fizz')
elif i % 5 == 0:
print('buzz')
else:
print(i)
|
StarcoderdataPython
|
1625521
|
<filename>pyscript/apps/getdata_afldraw/__init__.py
#!/usr/bin/env python
import json
import requests
from requests.exceptions import HTTPError
@service
def getdata_afldraw(
entity_id="sensor.getdata_afl_draw",
unit_of_measurement=None,
friendly_name="AFL Draw",
icon="mdi:football-australian",
):
if entity_id is None:
log.error("getdata_afldraw: No Entity ID provided")
return
URL = "https://aflapi.afl.com.au/afl/v2/matches?competitionId=1&compSeasonId=34&pageSize=50&teamId=13"
TEAM = "<NAME>"
try:
r = task.executor(requests.get, URL)
r.raise_for_status()
except HTTPError as http_err:
log.error("getdata_afldraw: HTTP Error Occured: {http_err}")
except Exception as err:
log.error("getdata_afldraw: Other Error Occured: {err}")
data = r.json()
currentround = data["matches"][0]["compSeason"]["currentRoundNumber"]
ROUNDS = []
for key in data["matches"]:
# print(json.dumps(key, indent=2))
if key["home"]["team"]["name"] == TEAM:
match = key["away"]["team"]["name"] + " (" + key["venue"]["name"] + ")"
else:
match = key["home"]["team"]["name"] + " (" + key["venue"]["name"] + ")"
if key["status"] == "CONCLUDED":
homescore = (
str(key["home"]["score"]["goals"])
+ "."
+ str(key["home"]["score"]["behinds"])
+ " ("
+ str(key["home"]["score"]["totalScore"])
+ ")"
)
awayscore = (
str(key["away"]["score"]["goals"])
+ "."
+ str(key["away"]["score"]["behinds"])
+ " ("
+ str(key["away"]["score"]["totalScore"])
+ ")"
)
if key["home"]["score"]["totalScore"] > key["away"]["score"]["totalScore"]:
result = key["home"]["team"]["nickname"] + ": " + homescore + " vs " + awayscore
else:
result = key["away"]["team"]["nickname"] + ": " + awayscore + " vs " + homescore
else:
homescore = "N/A"
awayscore = "N/A"
result = "Yet to Play"
ROUNDS.append(
{
"round": key["round"]["roundNumber"],
"match": match,
"time": key["utcStartTime"],
"homescore": homescore,
"awayscore": awayscore,
"result": result,
}
)
attributes = {}
attributes["unit_of_measurement"] = unit_of_measurement
attributes["friendly_name"] = friendly_name
attributes["icon"] = icon
attributes["currentround"] = currentround
attributes["draw"] = ROUNDS
state.set(entity_id, value="Round " + str(currentround), new_attributes=attributes)
def get_config(name):
value = pyscript.app_config.get(name)
if value is None:
log.error(
'"'
+ name
+ '" is required parameter but not defined in Pyscript configuration for application'
)
return value
@time_trigger("startup")
def load():
log.info(f"app has started")
|
StarcoderdataPython
|
1746989
|
<filename>openstack-dashboard/openstack_dashboard/dashboards/project/instances/workflows/__init__.py
# Importing non-modules that are not used explicitly
from create_instance import LaunchInstance
|
StarcoderdataPython
|
3307075
|
<reponame>spidezad/python-pptx<gh_stars>1-10
# -*- coding: utf-8 -*-
#
# testdata.py
#
# Copyright (C) 2013 <NAME> <EMAIL>
#
# This module is part of python-pptx and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
"""Test data for unit tests"""
from pptx.oxml import nsdecls, oxml_fromstring
from pptx.shapes import (
_Cell, _Paragraph, _Picture, _Shape, _ShapeCollection
)
class CT_CorePropertiesBuilder(object):
"""
Test data builder for CT_CoreProperties (cp:coreProperties) XML element
"""
properties = (
('author', 'dc:creator'),
('category', 'cp:category'),
('comments', 'dc:description'),
('content_status', 'cp:contentStatus'),
('created', 'dcterms:created'),
('identifier', 'dc:identifier'),
('keywords', 'cp:keywords'),
('language', 'dc:language'),
('last_modified_by', 'cp:lastModifiedBy'),
('last_printed', 'cp:lastPrinted'),
('modified', 'dcterms:modified'),
('revision', 'cp:revision'),
('subject', 'dc:subject'),
('title', 'dc:title'),
('version', 'cp:version'),
)
def __init__(self):
"""Establish instance variables with default values"""
for propname, tag in self.properties:
setattr(self, '_%s' % propname, None)
@property
def _ns_prefixes(self):
ns_prefixes = ['cp']
for propname, tag in self.properties:
value = getattr(self, '_%s' % propname)
if value is None:
continue
ns_prefix = tag.split(':')[0]
if ns_prefix not in ns_prefixes:
ns_prefixes.append(ns_prefix)
if ns_prefix == 'dcterms' and 'xsi' not in ns_prefixes:
ns_prefixes.append('xsi')
return tuple(ns_prefixes)
@property
def props_xml(self):
props_xml = ''
for propname, tag in self.properties:
value = getattr(self, '_%s' % propname)
if value is None:
continue
if value == '':
xml = ' <%s/>\n' % tag
else:
if tag.startswith('dcterms:'):
xml = (' <%s xsi:type="dcterms:W3CDTF">%s</%s>\n' %
(tag, value, tag))
else:
xml = ' <%s>%s</%s>\n' % (tag, value, tag)
props_xml += xml
return props_xml
@property
def coreProperties(self):
if self.props_xml:
coreProperties = (
'<cp:coreProperties %s>\n%s</cp:coreProperties>\n' %
(nsdecls(*self._ns_prefixes), self.props_xml)
)
else:
coreProperties = (
'<cp:coreProperties %s/>\n' % nsdecls('cp', 'dc', 'dcterms')
)
return coreProperties
@property
def element(self):
"""Return element based on XML generated by builder"""
return oxml_fromstring(self.xml)
def with_child(self, name, value):
"""add property element for *name* set to *value*"""
setattr(self, '_%s' % name, value)
return self
def with_date_prop(self, name, value):
"""add date property element for *name* set to *value*"""
setattr(self, '_%s' % name, value)
return self
def with_revision(self, value):
"""add revision element set to *value*"""
self._revision = value
return self
@property
def xml(self):
"""
Return XML string based on settings accumulated via method calls
"""
return self.coreProperties
class CT_PresetGeometry2DBuilder(object):
"""
Test data builder for CT_PresetGeometry2D (prstGeom) XML element
"""
def __init__(self, prst='rect'):
"""Establish instance variables with default values"""
self._prst = prst
self._avLst = False
self._guides = []
@property
def with_avLst(self):
"""contains an <a:avLst> element, even if it's empty"""
self._avLst = True
return self
def with_gd(self, val=25000, name='adj'):
"""add <a:gd> element"""
self._guides.append((name, val))
return self
@property
def avLst(self):
if self.gd:
avLst = ' <a:avLst>\n%s </a:avLst>\n' % self.gd
elif self._avLst:
avLst = ' <a:avLst/>\n'
else:
avLst = ''
return avLst
@property
def gd(self):
if self._guides:
tmpl = ' <a:gd name="%s" fmla="val %d"/>\n'
gd = ''.join([tmpl % guide for guide in self._guides])
else:
gd = ''
return gd
@property
def prstGeom(self):
if self.avLst:
prstGeom = ('<a:prstGeom %s prst="%s">\n%s</a:prstGeom>\n' %
(nsdecls('a'), self._prst, self.avLst))
else:
prstGeom = ('<a:prstGeom %s prst="%s"/>\n' %
(nsdecls('a'), self._prst))
return prstGeom
def reset(self):
"""return guides and avLst to defaults"""
self._avLst = False
self._guides = []
@property
def xml(self):
"""
Return XML string based on settings accumulated via method calls
"""
return self.prstGeom
@property
def element(self):
"""Return element based on XML generated by builder"""
return oxml_fromstring(self.xml)
class CT_TableBuilder(object):
"""Test data builder for CT_Table (tbl) XML element"""
empty_tbl_tmpl = (
'<a:tbl %s/>%s\n'
)
with_props_tmpl = (
'<a:tbl %s>\n'
' <a:tblPr%s/>\n'
'</a:tbl>\n'
)
def __init__(self):
"""Establish instance variables with default values"""
self._tmpl = CT_TableBuilder.empty_tbl_tmpl
self._properties = []
@property
def _tblPr_attrs_str(self):
"""String containing all attributes of tblPr element"""
s = ''
for prop in self._properties:
s += ' %s="%s"' % prop
return s
@property
def xml(self):
"""
Return XML string based on settings accumulated via method calls
"""
return self._tmpl % (nsdecls('a'), self._tblPr_attrs_str)
@property
def element(self):
"""Return element based on XML generated by builder"""
return oxml_fromstring(self.xml)
@property
def with_tblPr(self):
"""include tblPr element even if it's empty"""
self._tmpl = CT_TableBuilder.with_props_tmpl
return self
def with_prop(self, name, value):
"""add property named *name* with specified *value*"""
self._tmpl = CT_TableBuilder.with_props_tmpl
self._properties.append((name, value))
return self
def a_coreProperties():
"""Syntactic sugar to construct a CT_CorePropertiesBuilder instance"""
return CT_CorePropertiesBuilder()
def a_prstGeom(prst='rect'):
"""Syntactic sugar to construct a CT_PresetGeometry2DBuilder instance"""
return CT_PresetGeometry2DBuilder(prst)
def a_tbl():
"""Syntactic sugar to construct a CT_TableBuilder"""
return CT_TableBuilder()
class _TestShapeXml(object):
"""XML snippets of various shapes for use in unit tests"""
@property
def autoshape(self):
"""
XML for an autoshape for unit testing purposes, a rounded rectangle in
this case.
"""
return (
'<p:sp xmlns:p="http://schemas.openxmlformats.org/presentationml/'
'2006/main" xmlns:a="http://schemas.openxmlformats.org/drawingml/'
'2006/main"><p:nvSpPr><p:cNvPr id="3" name="Rounded Rectangle 2"/'
'><p:cNvSpPr/><p:nvPr/></p:nvSpPr><p:spPr><a:xfrm><a:off x="76009'
'6" y="562720"/><a:ext cx="2520824" cy="914400"/></a:xfrm><a:prst'
'Geom prst="roundRect"><a:avLst><a:gd name="adj" fmla="val 30346"'
'/></a:avLst></a:prstGeom></p:spPr><p:style><a:lnRef idx="1"><a:s'
'chemeClr val="accent1"/></a:lnRef><a:fillRef idx="3"><a:schemeCl'
'r val="accent1"/></a:fillRef><a:effectRef idx="2"><a:schemeClr v'
'al="accent1"/></a:effectRef><a:fontRef idx="minor"><a:schemeClr '
'val="lt1"/></a:fontRef></p:style><p:txBody><a:bodyPr rtlCol="0" '
'anchor="ctr"/><a:lstStyle/><a:p><a:pPr algn="ctr"/><a:r><a:rPr l'
'ang="en-US" dirty="0" smtClean="0"/><a:t>This is text inside a r'
'ounded rectangle</a:t></a:r><a:endParaRPr lang="en-US" dirty="0"'
'/></a:p></p:txBody></p:sp>'
)
@property
def empty_spTree(self):
return (
'<p:spTree %s>\n'
' <p:nvGrpSpPr>\n'
' <p:cNvPr id="1" name=""/>\n'
' <p:cNvGrpSpPr/>\n'
' <p:nvPr/>\n'
' </p:nvGrpSpPr>\n'
' <p:grpSpPr/>\n'
'</p:spTree>\n' % nsdecls('p', 'a')
)
@property
def picture(self):
""" XML for an pic shape, for unit testing purposes """
return (
'<p:pic %s>\n'
' <p:nvPicPr>\n'
' <p:cNvPr id="9" name="Picture 8" descr="image.png"/>\n'
' <p:cNvPicPr>\n'
' <a:picLocks noChangeAspect="1"/>\n'
' </p:cNvPicPr>\n'
' <p:nvPr/>\n'
' </p:nvPicPr>\n'
' <p:blipFill>\n'
' <a:blip r:embed="rId7"/>\n'
' <a:stretch>\n'
' <a:fillRect/>\n'
' </a:stretch>\n'
' </p:blipFill>\n'
' <p:spPr>\n'
' <a:xfrm>\n'
' <a:off x="111" y="222"/>\n'
' <a:ext cx="333" cy="444"/>\n'
' </a:xfrm>\n'
' <a:prstGeom prst="rect">\n'
' <a:avLst/>\n'
' </a:prstGeom>\n'
' </p:spPr>\n'
'</p:pic>\n' % nsdecls('a', 'p', 'r')
)
@property
def placeholder(self):
"""Generic placeholder XML, a date placeholder in this case"""
return (
'<p:sp xmlns:a="http://schemas.openxmlformats.org/drawingml/2006/'
'main" xmlns:p="http://schemas.openxmlformats.org/presentationml/'
'2006/main">\n'
' <p:nvSpPr>\n'
' <p:cNvPr id="9" name="Date Placeholder 8"/>\n'
' <p:cNvSpPr>\n'
' <a:spLocks noGrp="1"/>\n'
' </p:cNvSpPr>\n'
' <p:nvPr>\n'
' <p:ph type="dt" sz="half" idx="10"/>\n'
' </p:nvPr>\n'
' </p:nvSpPr>\n'
' <p:spPr/>\n'
'</p:sp>\n'
)
@property
def rounded_rectangle(self):
"""XML for a rounded rectangle auto shape"""
return self.autoshape
@property
def textbox(self):
"""Generic text box XML"""
return (
'<p:sp %s>\n'
' <p:nvSpPr>\n'
' <p:cNvPr id="9" name="TextBox 8"/>\n'
' <p:cNvSpPr txBox="1"/>\n'
' <p:nvPr/>\n'
' </p:nvSpPr>\n'
' <p:spPr>\n'
' <a:xfrm>\n'
' <a:off x="111" y="222"/>\n'
' <a:ext cx="333" cy="444"/>\n'
' </a:xfrm>\n'
' <a:prstGeom prst="rect">\n'
' <a:avLst/>\n'
' </a:prstGeom>\n'
' <a:noFill/>\n'
' </p:spPr>\n'
' <p:txBody>\n'
' <a:bodyPr wrap="none">\n'
' <a:spAutoFit/>\n'
' </a:bodyPr>\n'
' <a:lstStyle/>\n'
' <a:p/>\n'
' </p:txBody>\n'
'</p:sp>' % nsdecls('a', 'p')
)
test_shape_xml = _TestShapeXml()
class _TestTableXml(object):
"""XML snippets of table-related elements for use in unit tests"""
@property
def cell(self):
"""
XML for empty default table cell
"""
return (
'<a:tc %s>\n'
' <a:txBody>\n'
' <a:bodyPr/>\n'
' <a:lstStyle/>\n'
' <a:p/>\n'
' </a:txBody>\n'
'</a:tc>\n' % nsdecls('a')
)
@property
def cell_with_margins(self):
"""
XML for cell having top, left, right, and bottom margin settings
"""
return (
'<a:tc %s>\n'
' <a:txBody>\n'
' <a:bodyPr/>\n'
' <a:lstStyle/>\n'
' <a:p/>\n'
' </a:txBody>\n'
' <a:tcPr marT="12" marR="34" marB="56" marL="78"/>\n'
'</a:tc>\n' % nsdecls('a')
)
@property
def top_aligned_cell(self):
"""
XML for empty top-aligned table cell
"""
return (
'<a:tc %s>\n'
' <a:txBody>\n'
' <a:bodyPr/>\n'
' <a:lstStyle/>\n'
' <a:p/>\n'
' </a:txBody>\n'
' <a:tcPr anchor="t"/>\n'
'</a:tc>\n' % nsdecls('a')
)
test_table_xml = _TestTableXml()
class _TestTextXml(object):
"""XML snippets of text-related elements for use in unit tests"""
@property
def centered_paragraph(self):
"""
XML for centered paragraph
"""
return (
'<a:p %s>\n'
' <a:pPr algn="ctr"/>\n'
'</a:p>\n' % nsdecls('a')
)
@property
def paragraph(self):
"""
XML for a default, empty paragraph
"""
return '<a:p %s/>\n' % nsdecls('a')
test_text_xml = _TestTextXml()
class _TestShapeElements(object):
"""Shape elements for use in unit tests"""
@property
def autoshape(self):
return oxml_fromstring(test_shape_xml.autoshape)
@property
def empty_spTree(self):
return oxml_fromstring(test_shape_xml.empty_spTree)
@property
def picture(self):
return oxml_fromstring(test_shape_xml.picture)
@property
def placeholder(self):
return oxml_fromstring(test_shape_xml.placeholder)
@property
def rounded_rectangle(self):
return oxml_fromstring(test_shape_xml.rounded_rectangle)
@property
def textbox(self):
return oxml_fromstring(test_shape_xml.textbox)
test_shape_elements = _TestShapeElements()
class _TestTableElements(object):
"""Table-related elements for use in unit tests"""
@property
def cell(self):
return oxml_fromstring(test_table_xml.cell)
@property
def cell_with_margins(self):
return oxml_fromstring(test_table_xml.cell_with_margins)
@property
def isolated_tbl(self):
return oxml_fromstring(test_table_xml.isolated_tbl)
@property
def isolated_tbl_with_true_props(self):
return oxml_fromstring(test_table_xml.isolated_tbl_with_true_props)
@property
def top_aligned_cell(self):
return oxml_fromstring(test_table_xml.top_aligned_cell)
test_table_elements = _TestTableElements()
class _TestTextElements(object):
"""Text elements for use in unit tests"""
@property
def centered_paragraph(self):
return oxml_fromstring(test_text_xml.centered_paragraph)
@property
def paragraph(self):
return oxml_fromstring(test_text_xml.paragraph)
test_text_elements = _TestTextElements()
class _TestShapes(object):
"""Shape instances for use in unit tests"""
@property
def autoshape(self):
return _Shape(test_shape_elements.autoshape)
@property
def empty_shape_collection(self):
return _ShapeCollection(test_shape_elements.empty_spTree)
@property
def picture(self):
return _Picture(test_shape_elements.picture)
@property
def placeholder(self):
return _Shape(test_shape_elements.placeholder)
@property
def rounded_rectangle(self):
return _Shape(test_shape_elements.rounded_rectangle)
@property
def textbox(self):
return _Shape(test_shape_elements.textbox)
test_shapes = _TestShapes()
class _TestTableObjects(object):
"""Table-related object instances for use in unit tests"""
@property
def cell(self):
return _Cell(test_table_elements.cell)
test_table_objects = _TestTableObjects()
class _TestTextObjects(object):
"""Text object instances for use in unit tests"""
@property
def paragraph(self):
return _Paragraph(test_text_elements.paragraph)
test_text_objects = _TestTextObjects()
|
StarcoderdataPython
|
3376035
|
<reponame>anhp95/forest_attr_segment<gh_stars>0
# %%
import os
import matplotlib.pyplot as plt
import pandas as pd
import numpy as np
import seaborn as sns
dir_ = r"D:\Publication\IGARSS 2021\tree_spec\material"
csv_dir = "loss_acc_performance"
figure_dir = "figure"
attr = "spec"
csv_file = os.path.join(dir_, csv_dir, f"{attr}.csv")
fig_file = os.path.join(dir_, figure_dir, f"{attr}_acc.png")
df = pd.read_csv(csv_file)
epoch = df.epoch.values + 1
fig_2d_p2 = df.acc_2d_p2
fig_2d_p1p2 = df.acc_2d_p1p2
fig_2d_p1p2p3 = df.acc_2d_p1p2p3
fig_3d_org = df.acc_3d_org_p1p2p3
fig_3d_adj = df.acc_3d_adj_p1p2p3
# fig_2d_p2 = df.loss_2d_p2
# fig_2d_p1p2 = df.loss_2d_p1p2
# fig_2d_p1p2p3 = df.loss_2d_p1p2p3
# fig_3d_org = df.loss_3d_org_p1p2p3
# fig_3d_adj = df.loss_3d_adj_p1p2p3
fig, ax1 = plt.subplots()
ax1.plot(epoch, fig_2d_p2, "pink", label="2D UNET - P2")
ax1.plot(epoch, fig_2d_p1p2, "g-", label="2D UNET - P1P2")
ax1.plot(epoch, fig_2d_p1p2p3, "y-", label="2D UNET - P1P2P3")
ax1.plot(epoch, fig_3d_org, "r-", label="3D UNET - P1P2P3")
ax1.plot(epoch, fig_3d_adj, "b-", label="Our model")
ax1.set_xlabel("Number of epochs")
ax1.set_ylabel("Overall Accuracy")
plt.legend()
plt.show()
# fig.savefig(fig_file, format="png", dpi=1200)
# %%
|
StarcoderdataPython
|
3355050
|
<reponame>Cajivah/nlp-information-extraction
import glob
import json
import os
attributes = ['subject', 'flatMeterage', 'roomMeterage', 'rent', 'bills', 'deposit', 'internetSpeed',
'district', 'street', 'roomsCount', 'flatmatesCount', 'flatmatesGenders', 'flatmatesOccupation',
'preferredOccupation', 'preferredGender']
def load(json_dir_name):
json_pattern = os.path.join(json_dir_name, '*.json')
data = []
files = glob.glob(json_pattern)
for file in files:
with open(file, encoding='utf-8') as json_data:
classified_json = json.load(json_data)
data.append(classified_json)
return data
def save_by_category(directory, data_sets, category):
for i in range(0, len(data_sets)):
content = data_sets[i]["content"]
category = data_sets[i]["meta"][category]
if category is None:
category = 'any'
with open(f'{directory}/{category}/{str(i)}.txt', mode='x', encoding='utf-8') as f:
f.write(content)
return None
def load_stopwords():
with open('data/nltk/stopwords.json', encoding='utf-8') as f:
return json.load(f)
|
StarcoderdataPython
|
1659865
|
# # @package elementpropertiesvalidation
# This module exists to pair with element.py and validate anything that is
# being attempted to set as a value for one of an Element's properties
from . import content_options as content
# used for tuplifying
import string
import ast
# # A listing of the available options for the 'Content' field of an Element
__CONTENT_OPTIONS = content.GetContentOptions()
# # Returns the listing of options for the 'Content' field of an Element
def GetContentOptions():
return __CONTENT_OPTIONS
# # Confirms that a position is in the form (int, int) for (x,y)
def validatePos(name, raw):
if isinstance(raw, str):
val = tuplify(raw)
else:
val = raw
if not isinstance(val, tuple):
raise TypeError(f'Parameter {name} must be a 2-tuple of ints')
if len(val) != 2:
raise ValueError(f'Parameter {name} must be 2-tuple of ints')
if not isinstance(val[0], int):
raise TypeError(f'Parameter {name}: only integers allowed for x-coords')
if not isinstance(val[1], int):
raise TypeError(f'Parameter {name}: only integers allowed for y-coords')
return val
# # Confirms that dimensions are in the form (int, int) for (width, height)
def validateDim(name, raw):
if isinstance(raw, str):
val = tuplify(raw)
else:
val = raw
if not isinstance(val, tuple):
raise TypeError('Parameter ' + name + ' must be a 2-tuple of ints')
if len(val) != 2:
raise ValueError('Parameter ' + name + ' must be 2-tuple of ints')
if not(val[0] > 0 and val[1] > 0):
raise ValueError('Parameter ' + name + ' must only have positive values')
if not isinstance(val[0], int):
raise TypeError('Parameter ' + name + ': only integers allowed for width')
if not isinstance(val[1], int):
raise TypeError('Parameter ' + name + ': only integers allowed for height')
return val
# # Confirms that a color is in the form (int, int, int) for (R,G,B)
def validateColor(name, raw):
if isinstance(raw, str):
val = tuplify(raw)
else:
val = raw
if not isinstance(val, tuple):
raise TypeError('Parameter ' + name + ' must be a 3-tuple of ints')
if len(val) != 3:
raise ValueError('Parameter ' + name + ' must be 3-tuple of ints')
if not isinstance(val[0], int):
raise TypeError('Parameter ' + name + ': only integers allowed for red')
if not isinstance(val[1], int):
raise TypeError('Parameter ' + name + ': only integers allowed for green')
if not isinstance(val[2], int):
raise TypeError('Parameter ' + name + ': only integers allowed for blue')
if not 0 <= val[0] <= 255:
raise ValueError('Parameter ' + name + ': red must be between 0 & 255')
if not 0 <= val[1] <= 255:
raise ValueError('Parameter ' + name + ': green must be between 0 & 255')
if not 0 <= val[2] <= 255:
raise ValueError('Parameter ' + name + ': blue must be between 0 & 255')
return val
# # Confirms that an LocationString is a str
def validateLocation(name, val):
val = str(val)
if not isinstance(val, str):
raise TypeError('Parameter ' + name + ' must be an str')
return val
# # Confirms that the Content specification is a str corresponding to the
# available options
def validateContent(name, val):
if not isinstance(val, str):
raise TypeError('Parameter ' + name + ' must be a str')
if val not in __CONTENT_OPTIONS:
raise ValueError('Parameter ' + name + ' must be one of the options: '
+str(__CONTENT_OPTIONS))
return val
# ## Confirms that the clock offset is valid
def validateClockOffset(name, raw):
if isinstance(raw, str):
val = tuplify(raw)
else:
val = raw
if not isinstance(val, tuple):
raise TypeError('Parameter ' + name + ' must be a tuple of (clock, cycles)')
return val
# ## Confirms that scale factor is an int
def validateTimeScale(name, raw):
try:
val = float(raw)
except:
raise TypeError('Parameter ' + name + ' must be a number')
return val
# Confirms that an offset is an int
def validateOffset(name, raw):
if isinstance(raw, str) and isNumeral(raw):
val = string.atoi(raw)
else:
val = raw
if not isinstance(val, int):
raise TypeError('Parameter ' + name + ' must be a int')
return val
# # Confirms this is a string
# Treats None objects as empty string
def validateString(name, val):
if val is None:
val = ''
else:
val = str(val)
if not isinstance(val, str):
raise TypeError('Parameter ' + name + ' must be a str')
return val
# # Confirms this is a bool and converts if necessary
def validateBool(name, val):
if val is None:
val = False
else:
val = not not val
return val
# # Confirms this is list
def validateList(name, val):
if isinstance(val, str):
val = ast.literal_eval(val)
else:
val = list(val)
if not isinstance(val, list):
raise TypeError('Parameter ' + name + ' must be a list')
return val
# Takes a string (of supposed user input) and converts it, if possible, to a
# tuple of ints. Floats are currently discarded and disregarded
def tuplify(raw):
# strip away any leading characters that are not numeric digits
strip = string.whitespace + string.ascii_letters + string.punctuation
strip = strip.replace('-', '')
strip = strip.replace(',', '')
temp = raw.strip(strip)
temp = temp.split(',')
for i in range(len(temp)):
temp[i] = temp[i].split(' ')
nums = []
for element in temp:
for s in element:
if isNumeral(s):
nums.append(int(s))
val = tuple(nums)
return val
# # A simple helper method for tuplify(), providing a level of abstraction for
# checking that every character within a string is a digit (base 10)
def isNumeral(s):
options = string.digits + '-'
res = True
for char in s:
if char not in options:
res = False
if len(s) == 0:
res = False
return res
|
StarcoderdataPython
|
192794
|
#!/usr/bin/env python
####################
# Required Modules #
####################
# Generic/Built-in
import logging
# Libs
# Custom
##################
# Configurations #
##################
###################################
# Response Parser Class - Results #
###################################
class Results:
""" Parser class for converting responses into a resuable object """
def __init__(self, response: dict):
for attribute, value in response.items():
setattr(self, attribute, value)
|
StarcoderdataPython
|
199401
|
import unittest
import subprocess
import capitals
class TestCapitals(unittest.TestCase):
def test_correct_capital_returned(self):
city = capitals.capital("Central African Republic")
self.assertEqual("Bangui", city)
city = capitals.capital("Hungary")
self.assertEqual("Budapest", city)
def test_correct_capital_returned_when_country_is_uppercase(self):
city = capitals.capital("HUNGARY")
self.assertEqual("Budapest", city)
def test_correct_capital_returned_when_country_is_lowercase(self):
city = capitals.capital("hungary")
self.assertEqual("Budapest", city)
def test_correct_capital_returned_when_country_name_contains_multiple_words(self):
city = capitals.capital("Saint Vincent And The Grenadines")
self.assertEqual("Kingstown", city)
def test_correct_output_returned_when_country_is_invalid(self):
city = capitals.capital("Atlantis")
self.assertEqual(0, city)
def test_correct_help_message_displayed(self):
help_message = b"""usage: capitals [country]\n\nDisplays capital city of specified country.\n\npositional arguments:\n country Displays the capital of country.\n\noptional arguments:\n -h, --help show this help message and exit\n"""
command_output = subprocess.check_output(["python3", "capitals.py", "-h"])
self.assertEqual(help_message, command_output)
def test_correct_return_value_returned_when_running_interactively(self):
return_value = subprocess.check_output(["python3", "capitals.py", "Hungary"])
return_value = return_value.strip()
self.assertEqual(b"The Capital city of Hungary is Budapest.", return_value)
def test_correct_return_value_returned_when_not_running_interactively(self):
city = capitals.capital("Hungary")
self.assertEqual("Budapest", city)
def test_correct_return_value_returned_when_country_is_incorrect(self):
return_value = subprocess.check_output(["python3", "capitals.py", "Atlantis"])
return_value = return_value.strip()
self.assertEqual(b"Please enter a valid country name.", return_value)
if __name__ == "__main__":
unittest.main()
|
StarcoderdataPython
|
1790371
|
<gh_stars>1-10
from Owner import Owner
from Tenant import Tenant
from Apartment import Apartment
def create_owner(name, address, phone):
return Owner(name, address, phone)
def create_apartment(apartment_type, size, no_rooms, bau_year, address, apartment_owner):
return Apartment(apartment_type, size, no_rooms, None, None, bau_year, address, apartment_owner, None)
def create_tenant(name, address, phone):
return Tenant(name, address, phone)
def rent_apartment(apartment, tenant, duration, rent):
apartment.set_tenant(tenant=tenant)
apartment.set_duration(duration=duration)
apartment.set_rent(rent=rent)
def change_owner(owner, apartment):
apartment.set_owner(owner=owner)
def search_apartment(apartment) :
print("apartment information")
print("#####################")
print("apartemnt owner:", apartment.get_owner().get_name())
print("#####################")
if __name__ == '__main__':
owner = create_owner("Asif", "Munich", "0123")
apartment = create_apartment("private", 30, 3, 1990, "Munich", owner)
tenant = create_tenant("Saiful", "Munich", "01234")
rent_apartment(apartment, tenant, 2, 500)
search_apartment(apartment)
|
StarcoderdataPython
|
115037
|
<filename>backend/apps/utils/api_views.py
from django.shortcuts import render_to_response
# Import the tastypie.api.Api object with which your api resources are registered.
from backend.urls import v1_api as api
def api_profile(request, resource):
""" Allows easy profiling of API requests with django-debug-toolbar. """
context = {}
resource = resource.strip('/')
resource = api.canonical_resource_for(resource)
response = resource.get_list(request)
context['api_response'] = response
response = render_to_response('utils/api_profile.html', context)
print response['content-type']
return response
|
StarcoderdataPython
|
10199
|
<gh_stars>0
import datetime
from app.models import Log
from flask_login import current_user
from app.extensions import db
# https://stackoverflow.com/questions/6558535/find-the-date-for-the-first-monday-after-a-given-date
def next_weekday(
d: datetime.datetime = datetime.datetime.utcnow(),
weekday: int = 0,
) -> datetime.datetime:
days_ahead = weekday - d.weekday()
if days_ahead <= 0: # Target day already happened this week
days_ahead += 7
# Flatten the current time to just the date
date = datetime.datetime(d.year, d.month, d.day)
return date + datetime.timedelta(days_ahead)
def add_moderator_log(log_text: str) -> None:
db.session.add(Log(
moderator_id=current_user.id,
message=log_text,
))
db.session.commit()
|
StarcoderdataPython
|
106696
|
<reponame>fhvilshoj/ECINN
# convenience file to load ibinn model with or without pretrained weights.
import os
# This is the local model file, to override GenerativeClassifier from IB-INN module
# in order to add Celeba and FakeMNIST datasets.
from model import GenerativeClassifier
def load_model(args, cfg):
N_epochs = eval(cfg['training']['n_epochs'])
beta = eval(cfg['training']['beta_IB'])
train_nll = bool(not eval(cfg['ablations']['no_NLL_term']))
label_smoothing = eval(cfg['data']['label_smoothing'])
train_vib = eval(cfg['ablations']['vib'])
resume = cfg['checkpoints']['resume_checkpoint']
inn = GenerativeClassifier(cfg)
inn.cuda()
if resume:
print(">> Loading model weights", resume)
inn.load(resume)
else:
fname = os.path.join(args.output_dir, cfg.get('checkpoints', 'base_name'), 'model.pt')
if os.path.exists(fname):
print(">> Loading model weights", fname)
inn.load(fname)
return inn
|
StarcoderdataPython
|
3345246
|
from phenotype.Core import (
__op_attr_getter__,
__op_item_getter__,
__return_as__,
__try_except__,)
#2 LOCAL GLOBALS
__try__ = __try_except__.Unary
#2 PUBLIC INTERFACE
def Item(index): return __op_item_getter__(index)
def Sliced(*indices): return __op_attr_getter__(tuple(indices))
def Name(name,default=None): return __try__(__op_attr_getter__(name), __return_as__(default) )
|
StarcoderdataPython
|
1686974
|
<filename>main.py
import configparser
from twisted.internet.task import LoopingCall
from twisted.internet import reactor
from scrapy.crawler import CrawlerRunner
from websites.amazon import Amazon
from websites.ebay import Ebay
from websites.facebook import Facebook
from websites.kijiji import Kijiji
from websites.lespacs import Lespacs
def main():
config = configparser.ConfigParser(allow_no_value=False)
if(len(config.read('config.ini')) < 1):
create_config(config)
config.read('config.ini')
keywords = config['DEFAULT']['Keywords']
keywords = keywords.split(" ")
exclusions = config['DEFAULT']['Exclusions']
exclusions = exclusions.split(",")
max_price = config['DEFAULT']['MaxPrice']
min_price = config['DEFAULT']['MinPrice']
enable_facebook = config['DEFAULT'].getboolean('EnableFacebook')
enable_kijiji = config['DEFAULT'].getboolean('EnableKijiji')
enable_ebay = config['DEFAULT'].getboolean('EnableEbay')
enable_amazon = config['DEFAULT'].getboolean('EnableAmazon')
enable_lespacs = config['DEFAULT'].getboolean('EnableLespacs')
strictmode = config['DEFAULT'].getboolean('StrictMode')
facebook_city_id = config['DEFAULT']['FacebookCityId']
interval = config['DEFAULT']['Interval']
if(enable_facebook):
scrape(Facebook, keywords, exclusions,
max_price, min_price, interval, strictmode, facebook_city_id)
if(enable_kijiji):
scrape(Kijiji, keywords,
exclusions, max_price, interval, min_price, strictmode)
if(enable_ebay):
scrape(Ebay, keywords, exclusions, interval,
max_price, min_price, strictmode)
if(enable_amazon):
scrape(Amazon, keywords,
exclusions, max_price, interval, min_price, strictmode)
if(enable_lespacs):
scrape(Lespacs, keywords,
exclusions, max_price, interval, min_price, strictmode)
reactor.run()
process = CrawlerRunner(
settings={"FEEDS": {"hits.json": {"format": "json", "overwrite": False}, }, "USER_AGENT": 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/94.0.4606.81 Safari/537.36)'}) # Kijiji has anti scraping method using the user agent
def scrape(spider, keywords: list, exclusions: list, max_price, min_price, interval, strictmode, facebook_city_id=None):
if(facebook_city_id):
# Facebook behaves differently with another user agent
process_for_facebook = CrawlerRunner(
settings={"FEEDS": {"hits.json": {"format": "json", "overwrite": False}}})
task = LoopingCall(lambda: process_for_facebook.crawl(
spider, keywords, exclusions, max_price, min_price, strictmode, facebook_city_id))
task.start(60 * int(interval))
else:
task = LoopingCall(lambda: process.crawl(
spider, keywords, exclusions, max_price, min_price, strictmode))
task.start(60 * int(interval))
def create_config(config):
config['DEFAULT'] = {
'Keywords': "airpods,pro",
'Exclusions': "case",
'MaxPrice': "100",
'MinPrice': "0",
"EnableFacebook": "True",
"EnableKijiji": "True",
"EnableEbay": "True",
"EnableAmazon": "True",
"EnableLespacs": "True",
"StrictMode": "True",
"FacebookCityId": "110941395597405"
}
config.set(
'DEFAULT', '; Facebook use the id of the closest city to you for the searches, if not set it will return no ads', None)
config.set('DEFAULT', 'Interval', "10")
config.set('DEFAULT', '; Every minutes the bot should scrape', None)
with open('config.ini', 'w') as configfile:
config.write(configfile)
main()
|
StarcoderdataPython
|
3307202
|
#!/usr/bin/env python3
# Copyright 2021 Battelle Energy Alliance, LLC
# Python std library imports
import argparse
import os
import pkg_resources
import pickle
import sys
from math import ceil
# package imports
from navv import utilities
from navv import spreadsheet_tools
from navv import _version
DATA_PATH = pkg_resources.resource_filename("navv", "data/")
MAX_ROWS = 1048574
def parse_args():
parser = argparse.ArgumentParser(
description=f"NAVV: Network Architecture Verification and Validation {_version.__version__}"
)
parser.add_argument("customer_name", help="Name of the customer")
parser.add_argument(
"-o",
"--output-dir",
help="Directory to place resultant analysis files in. Defaults to current working directory.",
default=os.getcwd(),
)
parser.add_argument(
"-p",
"--pcap",
help="Path to pcap file. NAVV requires zeek logs or pcap. If used, zeek will run on pcap to create new logs.",
)
parser.add_argument(
"-v",
"--version",
help="Display NAVV version",
dest="show_version",
action="store_true",
)
parser.add_argument(
"-z",
"--zeek-logs",
help="Path to store or contain zeek log files. Defaults to current working directory.",
default=os.getcwd()
)
parser.add_argument(
"-lf",
"--long_file_handling",
choices=['ask', 'truncate', 'split'],
dest="long_file_handling",
help="How to handle long files that may exceed Excel's row capabilities. Default is ask.",
default="ask",
)
return parser.parse_args()
@utilities.timeit
def main(args):
"""Main function for performing zeek-cut commands, sorting the output, and saving data."""
if args.show_version:
print(f"{_version.__version__}")
sys.exit(0)
out_dir = args.output_dir
with utilities.pushd(out_dir):
pass
file_name = os.path.join(out_dir, args.customer_name + "_network_analysis.xlsx")
wb = spreadsheet_tools.get_workbook(file_name)
services, conn_states = spreadsheet_tools.get_package_data()
timer_data = dict()
segments = spreadsheet_tools.get_segments_data(wb["Segments"])
inventory = spreadsheet_tools.get_inventory_data(wb["Inventory"])
zeek_logs_path = args.zeek_logs
if args.pcap:
utilities.run_zeek(os.path.abspath(args.pcap), zeek_logs_path, timer=timer_data)
else:
timer_data["run_zeek"] = "NOT RAN"
zeek_data = (
utilities.perform_zeekcut(
fields=[
"id.orig_h",
"id.resp_h",
"id.resp_p",
"proto",
"conn_state",
"orig_l2_addr",
"resp_l2_addr",
],
log_file=os.path.join(zeek_logs_path, "conn.log"),
)
.decode("utf-8")
.split("\n")[:-1]
)
# turn zeekcut data into rows for spreadsheet
print("Tool is starting analysis of data.")
rows, mac_dict = spreadsheet_tools.create_analysis_array(zeek_data, timer=timer_data)
analysis_pages=1
truncated_analysis = False
if len(rows) > MAX_ROWS:
if args.long_file_handling == "ask":
user_input = input(f"The number of rows needed exceeds Excel's row limitation of {MAX_ROWS}. Would you like to 'split' the data among {ceil(len(rows)/MAX_ROWS)} pages of the sheet, or 'truncate' the data to the top {MAX_ROWS} rows? (split not available yet) \nReply with 's' or 't': ")
elif args.long_file_handling == "truncate":
user_input = "t"
elif args.long_file_handling == "split":
user_input = "s"
if user_input == "t":
print("Truncating Data")
rows = rows[:MAX_ROWS]
truncated_analysis = True
elif user_input == "s":
print("Splitting analysis across multiple worksheets.")
analysis_pages = ceil(len(rows) / MAX_ROWS)
rows_in_pages = [rows[x:x+MAX_ROWS] for x in range(0,len(rows), MAX_ROWS)]
# get dns data for resolution
pkl_path = os.path.join(out_dir, "{}_dns_data.pkl".format(args.customer_name))
if os.path.exists(pkl_path):
with open(pkl_path, "rb") as pkl:
dns_filtered = pickle.load(pkl)
else:
dns_data = utilities.perform_zeekcut(
fields=["query", "answers", "qtype", "rcode_name"],
log_file=os.path.join(zeek_logs_path, "dns.log"),
)
dns_filtered = utilities.trim_dns_data(dns_data)
ext_IPs = set()
unk_int_IPs = set()
# Write Analysis to Analysis tab
spreadsheet_tools.perform_analysis(
wb,
rows_in_pages,
services,
conn_states,
inventory,
segments,
dns_filtered,
pkl_path,
ext_IPs,
unk_int_IPs,
truncated_option=truncated_analysis,
analysis_pages=analysis_pages,
timer=timer_data,
)
spreadsheet_tools.write_macs_sheet(mac_dict, wb, analysis_pages+3)
spreadsheet_tools.write_ext_int_sheet("External_IPs", ext_IPs, wb, analysis_pages+4, max_rows=MAX_ROWS)
spreadsheet_tools.write_ext_int_sheet("Internal_IPs", unk_int_IPs, wb, analysis_pages+5, max_rows=MAX_ROWS)
for tabs in range(1, analysis_pages+1):
if analysis_pages > 1:
name_of_sheet = f"Analysis({tabs})"
elif analysis_pages == 1:
if truncated_analysis == False:
name_of_sheet = "Analysis"
else:
name_of_sheet = "Analysis(Truncated)"
spreadsheet_tools.auto_adjust_width(wb[name_of_sheet])
times = (
utilities.perform_zeekcut(fields=["ts"], log_file=os.path.join(zeek_logs_path, "conn.log"))
.decode("utf-8")
.split("\n")[:-1]
)
forward = sorted(times)
start = float(forward[0])
end = float(forward[len(forward) - 1])
cap_time = end - start
timer_data["Length of Capture time"] = "{} day(s) {} hour(s) {} minutes {} seconds".format(
int(cap_time / 86400),
int(cap_time % 86400 / 3600),
int(cap_time % 3600 / 60),
int(cap_time % 60),
)
spreadsheet_tools.write_stats_sheet(wb, timer_data, analysis_pages+6)
spreadsheet_tools.write_conn_states_sheet(conn_states, wb, analysis_pages+7)
wb.save(file_name)
if __name__ == "__main__":
main(parse_args())
|
StarcoderdataPython
|
1762788
|
import re
import unittest
from scrubadub.filth import Filth, MergedFilth
from scrubadub.exceptions import InvalidReplaceWith, FilthMergeError
class FilthTestCase(unittest.TestCase):
def test_disallowed_replace_with(self):
"""replace_with should fail gracefully"""
filth = Filth()
with self.assertRaises(InvalidReplaceWith):
filth.replace_with('surrogate')
with self.assertRaises(InvalidReplaceWith):
filth.replace_with('something_invalid')
def test_nonoverlapping_filth(self):
"""can't merge non-overlapping filth"""
a_filth = Filth(beg=0, end=3, text="the")
b_filth = Filth(beg=4, end=7, text="end")
with self.assertRaises(FilthMergeError):
a_filth.merge(b_filth)
with self.assertRaises(FilthMergeError):
b_filth.merge(a_filth)
def test_text_merge(self):
"""make sure text length is correct"""
class SomeFilth(Filth):
type = 'something'
text = "the end"
a_filth = SomeFilth(beg=0, end=3, text=text[:3])
b_filth = SomeFilth(beg=1, end=7, text=text[1:])
c_filth = a_filth.merge(b_filth)
self.assertEqual(c_filth.text, text)
c_filth = b_filth.merge(a_filth)
self.assertEqual(c_filth.text, text)
d_filth = c_filth.merge(a_filth)
self.assertEqual(d_filth.text, text)
b_filth.end = 2
with self.assertRaises(FilthMergeError):
b_filth.merge(a_filth)
def test_invalid_merge_documents(self):
"""Ensure Filth in two different documents cant be merged"""
filth_a = Filth(0, 2, text='aa', document_name='one')
filth_b = Filth(1, 2, text='a', document_name='two')
with self.assertRaises(FilthMergeError):
filth_a.merge(filth_b)
with self.assertRaises(FilthMergeError):
filth_b.merge(filth_a)
def test_filth_string(self):
"""Test the Filth to string function"""
filth = Filth(beg=0, end=5)
self.assertEqual(str(filth), "<Filth text='' beg=0 end=5>")
filth = Filth(beg=0, end=5)
self.assertEqual(filth.__repr__(), "<Filth text='' beg=0 end=5>")
filth = Filth(beg=0, end=5)
self.assertEqual(filth._to_string(), "<Filth text='' beg=0 end=5>")
filth = Filth(beg=0, end=5, text='hello')
self.assertEqual(str(filth), "<Filth text='hello' beg=0 end=5>")
filth = Filth(beg=0, end=5, text='hello', document_name='hello.txt')
self.assertEqual(str(filth), "<Filth text='hello' document_name='hello.txt' beg=0 end=5>")
filth = Filth(beg=0, end=5, text='hello', document_name='hello.txt')
self.assertEqual(filth._to_string(attributes=['text']), "<Filth text='hello'>")
self.assertEqual(filth._to_string(attributes=['beg', 'end', 'text']), "<Filth beg=0 end=5 text='hello'>")
self.assertEqual(
filth._to_string(attributes=['text', 'document_name']),
"<Filth text='hello' document_name='hello.txt'>"
)
def test_merged_to_string(self):
"""Test the MergedFilth to string"""
class TestFilth(Filth):
type = 'test_filth'
merged = MergedFilth(TestFilth(0, 2, 'ab'), Filth(1, 2, 'b'))
self.assertEqual(merged.__repr__(), "<MergedFilth filths=[<TestFilth text='ab' beg=0 end=2>, <Filth text='b' beg=1 end=2>]>")
def test_equality(self):
"""Test the filth equality function"""
self.assertTrue(
Filth(beg=0, end=5, text='hello') ==
Filth(beg=0, end=5, text='hello')
)
self.assertTrue(
Filth(beg=0, end=5, text='hello') ==
Filth(beg=0, end=5, text='hello', match=re.match('123', '1234'))
)
self.assertTrue(
Filth(beg=0, end=5, text='hello') !=
Filth(beg=1, end=5, text='hello')
)
self.assertTrue(
Filth(beg=0, end=5, text='hello') !=
Filth(beg=0, end=6, text='hello')
)
self.assertTrue(
Filth(beg=0, end=5, text='hello') !=
Filth(beg=0, end=5, text='hellou')
)
self.assertTrue(
Filth(beg=0, end=5, text='hello', document_name='test') ==
Filth(beg=0, end=5, text='hello', document_name='test')
)
self.assertTrue(
Filth(beg=0, end=5, text='hello') !=
Filth(beg=0, end=5, text='hello', document_name='test')
)
self.assertTrue(
Filth(beg=0, end=5, text='hello', document_name='test') !=
Filth(beg=0, end=5, text='hello')
)
self.assertTrue(
Filth(beg=0, end=5, text='hello', document_name='test') !=
Filth(beg=0, end=5, text='hello', document_name='another_test')
)
self.assertTrue(
Filth(beg=0, end=5, text='hello', detector_name='tester') ==
Filth(beg=0, end=5, text='hello', detector_name='tester')
)
self.assertTrue(
Filth(beg=0, end=5, text='hello', detector_name='tester') !=
Filth(beg=0, end=5, text='hello', detector_name='another_tester')
)
self.assertTrue(
Filth(beg=0, end=5, text='hello', detector_name='tester') !=
Filth(beg=0, end=5, text='hello')
)
self.assertTrue(
Filth(beg=0, end=5, text='hello') !=
Filth(beg=0, end=5, text='hello', detector_name='tester')
)
self.assertTrue(
Filth(beg=0, end=5, text='hello', document_name='test', detector_name='tester') ==
Filth(beg=0, end=5, text='hello', document_name='test', detector_name='tester')
)
self.assertTrue(
Filth(beg=0, end=5, text='hello', document_name='test', detector_name='tester') !=
Filth(beg=0, end=5, text='hello', document_name='test', detector_name='another_tester')
)
self.assertTrue(
Filth(beg=0, end=5, text='hello', document_name='test', detector_name='tester') !=
Filth(beg=0, end=5, text='hello', document_name='another_test', detector_name='tester')
)
|
StarcoderdataPython
|
1613499
|
from __future__ import unicode_literals, division, absolute_import
from builtins import * # pylint: disable=unused-import, redefined-builtin
import logging
from flask import jsonify
from sqlalchemy.orm.exc import NoResultFound
from flexget.api import api, APIResource
from flexget.plugins.filter.retry_failed import FailedEntry
log = logging.getLogger('failed_api')
retry_failed_api = api.namespace('failed', description='View and manage failed entries')
empty_response = api.schema('empty', {'type': 'object'})
retry_failed_entry_object = {
'type': 'object',
'properties': {
'id': {'type': 'integer'},
'title': {'type': 'string'},
'url': {'type': 'string'},
'added_at': {'type': 'string', 'format': 'date-time'},
'reason': {'type': 'string'},
'count': {'type': 'integer'},
'retry_time': {'type': 'string', 'format': 'date-time'}
}
}
retry_entries_list_object = {
'type': 'object',
'properties': {
'failed_entries': {'type': 'array', 'items': retry_failed_entry_object},
'number_of_failed_entries': {'type': 'integer'}
}
}
retry_failed_entry_schema = api.schema('retry_failed_entry_schema', retry_failed_entry_object)
retry_entries_list_schema = api.schema('retry_entries_list_schema', retry_entries_list_object)
@retry_failed_api.route('/')
class RetryFailed(APIResource):
@api.response(200, model=retry_entries_list_schema)
def get(self, session=None):
""" List all failed entries """
failed_entries = [failed.to_dict() for failed in session.query(FailedEntry).all()]
return jsonify({
'failed_entries': failed_entries,
'number_of_failed_entries': len(failed_entries)
})
@api.response(200, 'success', model=empty_response)
def delete(self, session=None):
""" Clear all failed entries """
log.debug('deleting all failed entries')
session.query(FailedEntry).delete()
return {}
@retry_failed_api.route('/<int:failed_entry_id>/')
@api.response(404, 'No failed entry found')
class RetryFailed(APIResource):
@api.doc(params={'failed_entry_id': 'ID of the failed entry'})
@api.response(200, model=retry_failed_entry_schema)
def get(self, failed_entry_id, session=None):
""" Get failed entry by ID """
try:
failed_entry = session.query(FailedEntry).filter(FailedEntry.id == failed_entry_id).one()
except NoResultFound:
return {'status': 'error',
'message': 'could not find entry with ID %i' % failed_entry_id}, 404
return jsonify(failed_entry.to_dict())
@api.response(200, 'success', model=empty_response)
def delete(self, failed_entry_id, session=None):
""" Delete failed entry by ID """
try:
failed_entry = session.query(FailedEntry).filter(FailedEntry.id == failed_entry_id).one()
except NoResultFound:
return {'status': 'error',
'message': 'could not find entry with ID %i' % failed_entry_id}, 404
log.debug('deleting failed entry: "%s"' % failed_entry.title)
session.delete(failed_entry)
return {}
|
StarcoderdataPython
|
134830
|
from typing import List
import numpy as np
import segmentation_models_pytorch as smp
from segmentation_models_pytorch.base.modules import Activation
import torch
from torch import nn
from torch.nn import functional as F
from torchvision import datasets
from torchvision.transforms import transforms
from baal import ActiveLearningDataset
pascal_voc_ids = np.array([
[0, 0, 0],
[128, 0, 0],
[0, 128, 0],
[128, 128, 0],
[0, 0, 128],
[128, 0, 128],
[0, 128, 128],
[128, 128, 128],
[64, 0, 0],
[192, 0, 0],
[64, 128, 0],
[192, 128, 0],
[64, 0, 128],
[192, 0, 128],
[64, 128, 128],
[192, 128, 128],
[0, 64, 0],
[128, 64, 0],
[0, 192, 0],
[128, 192, 0],
[0, 64, 128],
])
def active_pascal(
path="/tmp",
*args,
transform=transforms.ToTensor(),
test_transform=transforms.ToTensor(),
**kwargs,
):
"""Get active Pascal-VOC 2102 datasets.
Arguments:
path : str
The root folder for the Pascal dataset
Returns:
ActiveLearningDataset
the active learning dataset, training data
Dataset
the evaluation dataset
"""
return (
ActiveLearningDataset(datasets.VOCSegmentation(
path, image_set='train', transform=transform, download=False, *args, **kwargs
)),
datasets.VOCSegmentation(path, image_set='val', transform=test_transform, download=False,
*args, **kwargs),
)
class SegmentationHead(nn.Sequential):
def __init__(self, in_channels, out_channels, kernel_size=3, activation=None, upsampling=1):
dropout = nn.Dropout2d(0.5)
conv2d = nn.Conv2d(in_channels, out_channels, kernel_size=kernel_size,
padding=kernel_size // 2)
upsampling = nn.UpsamplingBilinear2d(
scale_factor=upsampling) if upsampling > 1 else nn.Identity()
activation = Activation(activation)
super().__init__(dropout, conv2d, upsampling, activation)
def add_dropout(model: smp.Unet, decoder_channels: List[int] = (256, 128, 64, 32, 16),
classes=1, activation=None):
seg_head = SegmentationHead(
in_channels=decoder_channels[-1],
out_channels=classes,
activation=activation,
kernel_size=3,
)
model.add_module('segmentation_head', seg_head)
model.initialize()
class FocalLoss(nn.Module):
"""
References:
Author: clcarwin
Site https://github.com/clcarwin/focal_loss_pytorch/blob/master/focalloss.py
"""
def __init__(self, gamma=0, alpha=None, size_average=True):
super(FocalLoss, self).__init__()
self.gamma = gamma
self.alpha = alpha
if isinstance(alpha, (float, int)): self.alpha = torch.Tensor([alpha, 1 - alpha])
if isinstance(alpha, list): self.alpha = torch.Tensor(alpha)
self.size_average = size_average
def forward(self, input, target):
if input.dim() > 2:
input = input.view(input.size(0), input.size(1), -1) # N,C,H,W => N,C,H*W
input = input.transpose(1, 2) # N,C,H*W => N,H*W,C
input = input.contiguous().view(-1, input.size(2)) # N,H*W,C => N*H*W,C
target = target.view(-1, 1)
logpt = F.log_softmax(input, dim=1)
logpt = logpt.gather(1, target)
logpt = logpt.view(-1)
pt = logpt.data.exp()
if self.alpha is not None:
if self.alpha.type() != input.data.type():
self.alpha = self.alpha.type_as(input.data)
select = (target != 0).type(torch.LongTensor).to(self.alpha.device)
at = self.alpha.gather(0, select.data.view(-1))
logpt = logpt * at
loss = -1 * (1 - pt) ** self.gamma * logpt
if self.size_average:
return loss.mean()
else:
return loss.sum()
|
StarcoderdataPython
|
1777534
|
# -*- coding: utf-8 -*-
"""
:author: T8840
:tag: Thinking is a good thing!
纸上得来终觉浅,绝知此事要躬行!
:description: 用来统计埋点数据出现个数
"""
from pprint import pprint
def count(f):
Count = {}
with open(f,'r',encoding='utf-8') as file:
for line in file.readlines():
Count[line] = Count.get(line,1) +1
pprint(Count)
count('./upload_record.txt')
count('./upload_record_type.txt')
|
StarcoderdataPython
|
1629074
|
import random
from typing import Any, List, Optional
import numpy as np
import numpy.typing as npt
import pytorch_lightning as pl
import torch
import torch.utils.data
from nuplan.planning.training.modeling.types import FeaturesType, TargetsType, move_features_type_to_device
from nuplan.planning.training.preprocessing.feature_collate import FeatureCollate
from nuplan.planning.training.preprocessing.features.raster import Raster
from nuplan.planning.training.preprocessing.features.trajectory import Trajectory
from nuplan.planning.training.visualization.raster_visualization import get_raster_with_trajectories_as_rgb
class RasterVisualizationCallback(pl.Callback):
"""
Callbacks that visualizes model input raster and logs them in Tensorboard.
"""
def __init__(
self,
images_per_tile: int,
num_train_tiles: int,
num_val_tiles: int,
pixel_size: float,
):
"""
Initializes the class.
:param images_per_tile: number of images per tiles to visualize
:param num_train_tiles: number of tiles from the training set
:param num_val_tiles: number of tiles from the validation set
:param pixel_size: [m] size of pixel in meters
"""
super().__init__()
self.custom_batch_size = images_per_tile
self.num_train_images = num_train_tiles * images_per_tile
self.num_val_images = num_val_tiles * images_per_tile
self.pixel_size = pixel_size
self.train_dataloader: Optional[torch.utils.data.DataLoader] = None
self.val_dataloader: Optional[torch.utils.data.DataLoader] = None
def _initialize_dataloaders(self, datamodule: pl.LightningDataModule) -> None:
"""
Initializes the dataloaders. This makes sure that the same examples are sampled
every time for comparison during visualization.
:param datamodule: lightning datamodule
"""
train_set = datamodule.train_dataloader().dataset # type: ignore
val_set = datamodule.val_dataloader().dataset # type: ignore
self.train_dataloader = self._create_dataloader(train_set, self.num_train_images)
self.val_dataloader = self._create_dataloader(val_set, self.num_val_images)
def _create_dataloader(self, dataset: torch.utils.data.Dataset, num_samples: int) -> torch.utils.data.DataLoader:
dataset_size = len(dataset)
num_keep = min(dataset_size, num_samples)
sampled_idxs = random.sample(range(dataset_size), num_keep)
subset = torch.utils.data.Subset(dataset=dataset, indices=sampled_idxs)
return torch.utils.data.DataLoader(dataset=subset, batch_size=self.custom_batch_size,
collate_fn=FeatureCollate())
def _log_from_dataloader(
self,
pl_module: pl.LightningModule,
dataloader: torch.utils.data.DataLoader,
loggers: List[Any],
training_step: int,
prefix: str,
) -> None:
"""
Visualizes and logs all examples from the input dataloader.
:param pl_module: lightning module used for inference
:param dataloader: torch dataloader
:param loggers: list of loggers from the trainer
:param training_step: global step in training
:param prefix: prefix to add to the log tag
"""
for batch_idx, batch in enumerate(dataloader):
features: FeaturesType = batch[0]
targets: TargetsType = batch[1]
predictions = self._infer_model(pl_module, move_features_type_to_device(features, pl_module.device))
self._log_batch(loggers, features, targets, predictions, batch_idx, training_step, prefix)
def _log_batch(
self,
loggers: List[Any],
features: FeaturesType,
targets: TargetsType,
predictions: TargetsType,
batch_idx: int,
training_step: int,
prefix: str,
) -> None:
"""
Visualizes and logs a batch of data (features, targets, predictions) from the model.
:param loggers: list of loggers from the trainer
:param features: tensor of model features
:param targets: tensor of model targets
:param predictions: tensor of model predictions
:param batch_idx: index of total batches to visualize
:param training_step: global trainign step
:param prefix: prefix to add to the log tag
"""
if 'trajectory' not in targets and 'trajectory' not in predictions:
return
if 'raster' in features:
image_batch = self._get_raster_images_from_batch(
features['raster'], targets['trajectory'], predictions['trajectory'])
else:
return
tag = f'{prefix}_visualization_{batch_idx}'
for logger in loggers:
if isinstance(logger, torch.utils.tensorboard.writer.SummaryWriter):
logger.add_images(
tag=tag,
img_tensor=torch.from_numpy(image_batch),
global_step=training_step,
dataformats='NHWC',
)
def _get_raster_images_from_batch(self, features: Raster, targets: Trajectory, predictions: Trajectory) \
-> npt.NDArray[np.float32]:
"""
Creates a list of RGB raster images from a batch of model data.
:param features: tensor of model features
:param targets: tensor of model targets
:param predictions: tensor of model predictions
:return: list of raster images
"""
images = list()
for feature, target, prediction in zip(features.data, targets.data, predictions.data):
raster = Raster.from_feature_tensor(feature)
target_trajectory = Trajectory(target)
predicted_trajectory = Trajectory(prediction)
image = get_raster_with_trajectories_as_rgb(
self.pixel_size,
raster,
target_trajectory,
predicted_trajectory,
)
images.append(image)
return np.asarray(images)
def _infer_model(self, pl_module: pl.LightningModule, features: FeaturesType) -> TargetsType:
"""
Makes an inference of the input batch features given a model.
:param pl_module: lightning model
:param features: model inputs
:return: model predictions
"""
with torch.no_grad():
pl_module.eval()
predictions = move_features_type_to_device(pl_module(features), torch.device('cpu'))
pl_module.train()
return predictions
def on_train_epoch_end(
self,
trainer: pl.Trainer,
pl_module: pl.LightningModule,
unused: Optional = None, # type: ignore
) -> None:
"""
Visualizes and logs training examples at the end of the epoch.
:param trainer: lightning trainer
:param pl_module: lightning module
"""
assert hasattr(trainer, 'datamodule'), "Trainer missing datamodule attribute"
assert hasattr(trainer, 'global_step'), "Trainer missing global_step attribute"
if self.train_dataloader is None:
self._initialize_dataloaders(trainer.datamodule) # type: ignore
self._log_from_dataloader(
pl_module,
self.train_dataloader,
trainer.logger.experiment,
trainer.global_step, # type: ignore
'train',
)
def on_validation_epoch_end(
self,
trainer: pl.Trainer,
pl_module: pl.LightningModule,
unused: Optional = None, # type: ignore
) -> None:
"""
Visualizes and logs validation examples at the end of the epoch.
:param trainer: lightning trainer
:param pl_module: lightning module
"""
assert hasattr(trainer, 'datamodule'), "Trainer missing datamodule attribute"
assert hasattr(trainer, 'global_step'), "Trainer missing global_step attribute"
if self.val_dataloader is None:
self._initialize_dataloaders(trainer.datamodule) # type: ignore
self._log_from_dataloader(
pl_module,
self.val_dataloader,
trainer.logger.experiment,
trainer.global_step, # type: ignore
'val',
)
|
StarcoderdataPython
|
193855
|
import datetime
import pathlib
import unittest
from signify.authenticode import TRUSTED_CERTIFICATE_STORE, TRUSTED_CERTIFICATE_STORE_NO_CTL
from signify.certificates import Certificate
from signify.context import VerificationContext, FileSystemCertificateStore
from signify.exceptions import VerificationError
from signify.signed_pe import SignedPEFile
root_dir = pathlib.Path(__file__).parent
class TrustedStoreTestCase(unittest.TestCase):
def test_amount_of_certificates(self):
self.assertGreaterEqual(len(TRUSTED_CERTIFICATE_STORE), 40)
class ContextTestCase(unittest.TestCase):
def test_potential_chains(self):
with open(str(root_dir / "test_data" / "19e818d0da361c4feedd456fca63d68d4b024fbbd3d9265f606076c7ee72e8f8.ViR"), "rb") as f:
pefile = SignedPEFile(f)
for signed_data in pefile.signed_datas:
context = VerificationContext(TRUSTED_CERTIFICATE_STORE_NO_CTL, signed_data.certificates)
potential_chains = list(signed_data.signer_info.potential_chains(context))
self.assertEqual(len(potential_chains), 2)
# for chain in potential_chains:
# print("xxxx")
# for cert in chain:
# print(cert)
class ValidationTestCase(unittest.TestCase):
def test_revoked_certificate(self):
root = FileSystemCertificateStore(root_dir / "certs" / 'digicert-global-root-ca.pem', trusted=True)
intermediate = FileSystemCertificateStore(root_dir / "certs" / 'digicert-sha2-secure-server-ca.pem')
with open(str(root_dir / "certs" / 'revoked.badssl.com.pem'), "rb") as f:
cert = Certificate.from_pem(f.read())
# check that when we do not verify the CRL it does not fail
context = VerificationContext(root, intermediate)
context.verify(cert)
context = VerificationContext(root, intermediate, allow_fetching=True, revocation_mode='hard-fail')
with self.assertRaises(VerificationError):
context.verify(cert)
|
StarcoderdataPython
|
45878
|
<gh_stars>10-100
from django.conf.urls import patterns, url
from rest_framework import routers
from accounts.api import UserViewSet, LostKeyViewSet, AuthView, MemberViewSet
from nodes.api import NodeViewSet, NodePathView, NodeDataView, PolicyViewSet
from news.api import NewsApiView
from search.api import SearchView
from vaultier.views import ConfigView
# todo: move
from workspaces.api import WorkspaceKeyViewSet, InvitationViewSet
from vaultier.api import ServerTimeView
router = routers.DefaultRouter()
router.register(r'users', UserViewSet, base_name='user')
router.register(r'nodes', NodeViewSet, base_name='node')
router.register(r'workspace_keys', WorkspaceKeyViewSet,
base_name='workspace_key')
router.register(r'members', MemberViewSet, base_name='member')
router.register(r'invitations', InvitationViewSet, base_name='invitation')
router.register(r'roles', PolicyViewSet, base_name='role')
router.register(r'lost_keys', LostKeyViewSet, base_name='lost_keys')
urlpatterns = router.urls
urlpatterns += patterns(
'',
url(r'^config/', ConfigView.as_view(), name='config'),
# node path
url(r'^nodes/(?P<pk>\d+)/path/$', NodePathView.as_view(),
name='node-path'),
# node data
url(r'^nodes/(?P<pk>\d+)/data/$', NodeDataView.as_view(),
name='node-data'),
# server time
url(r'^server-time/$', ServerTimeView.as_view(),
name='server_time'),
# news
url(r'^news/$', NewsApiView.as_view(), name='news-list'),
# search
url(r'^search/search$', SearchView.as_view(), name='search-search'),
# auth
url(r'^auth/auth$', AuthView.as_view(), name='auth-auth'),
url(r'^auth/user$', UserViewSet.as_view(), name='auth-user'),
)
|
StarcoderdataPython
|
3202024
|
"""
Pre-processing Functions
"""
import cv2
import torch
import numpy as np
import torch.nn.functional as F
from torchvision.transforms import ToTensor
def resize(image, size):
"""Resize images"""
image = F.interpolate(image.unsqueeze(0), size=size, mode="nearest").squeeze(0)
return image
def pad_to_square(img, pad_value):
c, h, w = img.shape
dim_diff = np.abs(h - w)
# (upper / left) padding and (lower / right) padding
pad1, pad2 = dim_diff // 2, dim_diff - dim_diff // 2
# Determine padding
pad = (0, 0, pad1, pad2) if h <= w else (pad1, pad2, 0, 0)
# Add padding
img = F.pad(img, pad, "constant", value=pad_value)
return img, pad
def lane_prx2(im, im_h=360, im_w=640):
""" pre-processing images for lane detector
:param im: ndarray
an image or a frame on which lanes are shown
:param im_h: int
re-scaled image's height for the detector
:param im_w: int
re-scaled image's width for the detector
:return:
proc_im: tensor with a shape (1, n_channel, im_h, im_w)
processed image
"""
# to_tensor = ToTensor()
im_rz = cv2.resize(im, (im_w, im_h)) / 255
proc_im = ToTensor()(im_rz.astype(np.float32))
proc_im = torch.unsqueeze(proc_im, 0)
return proc_im
def car_prx2(im, im_size):
""" pre-processing images for car detector
:param im: ndarray
an image or a frame on which lanes are shown
:param im_size: int
re-scaled image's size for the detector
:return:
proc_im: tensor with a shape (1, n_channel, im_size, im_size)
processed image
"""
# Convert to Pytorch tensor
proc_im = ToTensor()(im)
# Pad to square resolution
proc_im, _ = pad_to_square(proc_im, 0)
# Resize
proc_im = resize(proc_im, im_size)
proc_im = torch.unsqueeze(proc_im, 0)
return proc_im
|
StarcoderdataPython
|
50327
|
<filename>Data-Structures/Arrays & LinkedLists/SinglyLinkedList.py
class Node:
def __init__(self, value):
self.value = value
self.next = None
class LinkedList:
"""
Class to create a linked list and perform some basic operations such as:
append, display, prepend, convert, insert, remove, search, pop
"""
def __init__(self, value=None):
self.head = value
def append(self, value):
if self.head is None:
self.head = Node(value)
return
node = self.head
while node.next is not None:
node = node.next
node.next = Node(value)
return
def display(self):
node = self.head
while node:
print(node.value)
node = node.next
def convert_list(self):
Pylist = []
node = self.head
while node:
Pylist.append(node.value)
node = node.next
return Pylist
def prepand(self, value):
new_node = Node(value)
new_node.next = self.head
self.head = new_node
def search(self, value):
node = self.head
while node:
if node.value == value:
return node
node = node.next
return None
def remove(self, value):
node = self.head
#If head contains the value, change the head
if node is not None:
if node.value == value:
self.head = node.next
node = None
return
# Keep track of the previous node of the node that contains the value
while node:
if node.value == value:
break
prev_node = node
node = node.next
if node == None:
return
#Link the previous node to the next node
prev_node.next = node.next
node = None
def pop(self):
node = self.head
value = node.value
self.head = node.next
# node = None
return value
def insert(self, value, pos):
node = self.head
new_node = Node(value)
idx = 0
prev_node = None
while node:
if idx == pos:
break
prev_node = node
node = node.next
idx+=1
if prev_node == None:
new_node.next = self.head
self.head = new_node
return
new_node.next = prev_node.next
prev_node.next = new_node
return
LinkedList.insert = insert
LinkedList.prepand = prepand
LinkedList.pop = pop
LinkedList.remove = remove
LinkedList.search = search
def create_linked_list(_list_):
"""
The function converts a python list/array into a linked list; Time complexity: O(n)
:param _list_: a Python list
:return: A linked list
"""
head = None
tail = None
for val in _list_:
if head is None:
head = Node(val)
tail = head
else:
tail.next = Node(val)
tail = tail.next
return head
def reverse(linked_list):
"""
Reverse a linked list
:param linked_list: linked_list
:return: reversed linked list
"""
new_list = LinkedList
prev_node = None
for value in linked_list:
new_node = Node(value)
new_node.next = prev_node
prev_node = new_node
new_list.head = prev_node
return new_list
def isCircular(linked_list):
"""
Checks if a linked list is circular i.e., has loops
:param linked_list: Linked list
:return: True/False
"""
if linked_list.head is None:
return False
fast = linked_list.head
slow = linked_list.head
while fast and fast.next:
fast = fast.next.next
slow = slow.next
if fast == slow:
return True
return False
if __name__ == "__main__":
llist = LinkedList()
llist.append(2)
llist.append(-1)
llist.append(10)
llist.append(1)
_list_ = llist.convert_list()
print("Linked List: ")
llist.display()
print("Python List: ", _list_)
llist.prepand(8)
print("Prepanded Linked List")
llist.display()
node = llist.search(-1)
print("Value searched: ", node.value, " Next Node: ", node.next.value)
rm = 10
llist.remove(rm)
print("Linked List after removal {}: ".format(rm))
llist.display()
Pop = llist.pop()
print("Element Popped: {}, Value of the head now: {}, linked list after popping element: ".format(Pop, llist.head.value))
llist.display()
print("Insertion: ")
llist.insert(3, 2)
llist.display()
reverse_list = reverse(llist)
reverse_list.display()
|
StarcoderdataPython
|
4823091
|
class StandardClassifier_1D:
def __init__(self, model_name, **model_params):
self.model_name = model_name
self.model = None
if self.model_name == 'KNN':
from sklearn.neighbors import KNeighborsClassifier
self.model = KNeighborsClassifier(**model_params)
elif self.model_name == 'GaussianNB':
from sklearn.naive_bayes import GaussianNB
self.model = GaussianNB(**model_params)
elif self.model_name == 'LinearSVC':
from sklearn.svm import LinearSVC
self.model = LinearSVC(**model_params)
elif self.model_name == 'RBF SVC':
from sklearn.svm import SVC
self.model = SVC(**model_params)
elif self.model_name == 'DecisionTree':
from sklearn.tree import DecisionTreeClassifier
self.model = DecisionTreeClassifier(**model_params)
elif self.model_name == 'RandomForest':
from sklearn.ensemble import RandomForestClassifier
self.model = RandomForestClassifier(**model_params)
elif self.model_name == 'GradientBoost':
from sklearn.ensemble import GradientBoostingClassifier
self.model = GradientBoostingClassifier(**model_params)
elif self.model_name == 'AdaBoost':
from sklearn.ensemble import AdaBoostClassifier
self.model = AdaBoostClassifier(**model_params)
elif self.model_name == 'XGBoost':
from xgboost import XGBClassifier
self.model = XGBClassifier(**model_params)
elif (self.model_name == 'Stratified' or self.model_name == 'MostFrequent' or
self.model_name == 'Prior' or self.model_name == 'Uniform'):
from sklearn.dummy import DummyClassifier
self.model = DummyClassifier(**model_params)
def fit(self, trainX, trainY, validX, validY):
trainX = trainX.reshape((-1, 258)) # TODO: A hack for now
print(self.model)
self.model.fit(trainX, trainY.ravel())
def predict(self, testX):
testX = testX.reshape((-1, 258)) # TODO: A hack for now
return self.model.predict(testX)
def save(self, path):
# save the model to disk
import pickle
filename = path + self.model_name + '.sav'
pickle.dump(self.model, open(filename, 'wb'))
def load(self, path):
# Test
# load the model from disk
import pickle
filename = path + self.model_name + '.sav'
self.model = pickle.load(open(filename, 'rb'))
|
StarcoderdataPython
|
3295438
|
"""
Vectorize text field
"""
from sklearn.base import BaseEstimator, TransformerMixin
from sklearn.feature_extraction.text import TfidfVectorizer
import pandas as pd
class VectorizeText(BaseEstimator, TransformerMixin):
"""
Class for altering sklearn.feature_extraction.text.TfidfVectorizer
so that its transform method will return a pandas.DataFrame
"""
def __init__(self, vectorizer = TfidfVectorizer(max_features=100), params : dict = None):
self.vectorizer = vectorizer
if params:
self.vectorizer.set_params(**params)
def fit(self, X, y = None):
"""
Fit method
Parameters
----------
X : pandas.Series
y : array_like
Returns
-------
self
"""
self.vectorizer = self.vectorizer.fit(X, y)
return self
def transform(self, X):
"""
Transform method
Parameters
----------
X : pandas.Series
Returns
-------
res_df : pandas.DataFrame
"""
col = X.name
res = self.vectorizer.transform(X)
res_df = pd.DataFrame(
res.todense(),
columns = [col + "_" + i for i in self.vectorizer.get_feature_names()])
return res_df
|
StarcoderdataPython
|
1667240
|
<reponame>nuzcraft/RLTut<filename>helpers/target_monster.py
# function to target a specific monster
from helpers.target_tile import target_tile
import variables as var
def target_monster(max_range = None):
# returns a clicked monster inside FOV up to a range, or Non if right-clicked
while True:
(x, y) = target_tile(max_range)
if x is None: # player cancelled
return None
# return the first clicked monster, otherwise continue looping
for ent in var.entities:
if ent.x == x and ent.y == y and ent.fighter and ent != var.player:
return ent
|
StarcoderdataPython
|
3379889
|
# -*- coding: utf-8 -*-
# standard libraries
import csv
import logging
from pathlib import Path
from typing import List
# third-party libraries
from bs4 import BeautifulSoup
import pandas as pd
import pendulum
# my libraries
import src.helpers as hlp
logger = logging
def filter_download_contents(contents):
"""
TODO
"""
filtered_contents = contents
filtered_contents = filtered_contents.replace('\r', '')
filtered_contents = filtered_contents.replace('\n', '')
filtered_contents = filtered_contents.replace('\t', '')
return filtered_contents
def build_journal_record(url):
"""
TODO
"""
# download journal
logger.info(f'Downloading {url}')
resp: dict = hlp.download_url(url)
contents_raw = resp['contents'].decode('utf-8')
# filters
# remove characters in contents that may break CSV format
contents = filter_download_contents(contents_raw)
# extract important info about journal
record = {}
# journal download
record = hlp.parse_download_response(resp)
record['data'] = contents
# session designation
session: str = record['filename'].split('DAY')[0].split('/')[-1]
record['session'] = session
# extract date and legislative day
soup = BeautifulSoup(contents, "html.parser")
html_title = soup.title.string
# supplements are a special case
if 'SUPPLEMENT —' in html_title:
date_raw, lege_day_raw = html_title.split('SUPPLEMENT —')
elif 'SUPPLEMENT –– ' in html_title:
date_raw, lege_day_raw = html_title.split('SUPPLEMENT ––')
else:
date_raw, lege_day_raw = html_title.split('—')
date = pendulum.parse(date_raw, strict=False).date()
record['date'] = date
lege_day = ''.join(ch for ch in lege_day_raw if ch.isdigit())
record['legislative_day'] = lege_day
return record
def acquire(**kwargs):
"""
acquire journal data in raw format
"""
# configuration/input
input_filepath = kwargs['input_filepath']
output_filepath = kwargs['output_filepath']
field_names = tuple(kwargs['field_names'])
logger.info('Reading URL file for journals.')
targets: bytes = Path(input_filepath).read_bytes()
urls: List[str] = targets.splitlines()
logger.info('Creating journal CSV file.')
# create parent directories for output file
hlp.create_parent_directories(output_filepath)
dest = Path(output_filepath)
with dest.open(mode='w', encoding=hlp.ENCODING, newline='') as fp:
writer = csv.DictWriter(fp,
fieldnames=field_names,
restval='',
delimiter=hlp.DELIMITER,
doublequote=hlp.DOUBLEQUOTE,
escapechar=hlp.ESCAPE_CHAR,
lineterminator=hlp.LINE_TERMINATOR,
quotechar=hlp.QUOTE_CHAR,
quoting=hlp.QUOTING)
writer.writeheader()
logger.info('Downloading journal URLs.')
for url in urls:
record = build_journal_record(url)
writer.writerow(record)
logger.info('Done.')
def filter_data(data):
"""
run data through filters
"""
clean_data = data
# handle spanish chars
clean_data = clean_data.replace('á', 'á')
clean_data = clean_data.replace('ñ', 'ñ')
# needed for picking up record votes for measures; the measures are
# surrounded by bold tags and it messes up the algorithm
clean_data = clean_data.replace('<b>', '')
clean_data = clean_data.replace('</b>', '')
return clean_data
def clean_record(record):
"""
clean each journal record
"""
typ = record['type']
location = record['location']
retrieval_datetime = record['retrieval_datetime']
filename = record['filename']
session = record['session']
date = record['date']
legislative_day = record['legislative_day']
raw_data: str = record['data']
# filter data
data = filter_data(raw_data)
# create clean record
clean_record = {}
clean_record['typ'] = typ
clean_record['location'] = location
clean_record['retrieval_datetime'] = retrieval_datetime
clean_record['filename'] = filename
clean_record['session'] = session
clean_record['date'] = date
clean_record['legislative_day'] = legislative_day
clean_record['data'] = data
# convert dict record to a Series
clean_series = pd.Series(clean_record)
return clean_series
def clean(**kwargs):
"""
clean journal records
"""
# configuration/input
input_filepath = kwargs['input_filepath']
output_filepath = kwargs['output_filepath']
df_journals = hlp.read_tsv(input_filepath)
clean_data = df_journals.apply(clean_record, axis=1)
# create parent directories for output file
hlp.create_parent_directories(output_filepath)
# write output file
df_clean = pd.DataFrame(clean_data)
hlp.write_tsv(df_clean, output_filepath)
|
StarcoderdataPython
|
1753125
|
from ..biotools import windows_overlap
import itertools
import numpy as np
class MutationChoice:
"""Represent a segment of a sequence with several possible variants.
Parameters
----------
segment
A pair (start, end) indicating the range of nucleotides concerned. We
are applying Python range, so
variants
A set of sequence variants, at the given position
Examples
--------
>>> choice = MutationChoice((70, 73), {})
"""
__slots__ = ["segment", "start", "end", "variants", "is_any_nucleotide"]
def __init__(self, segment, variants, is_any_nucleotide=False):
if isinstance(segment, int):
segment = (segment, segment + 1)
self.segment = segment
self.start, self.end = segment
self.variants = variants
self.is_any_nucleotide = is_any_nucleotide
# self.possible_subsequences = set(m.subsequence for m in mutations)
def random_variant(self, sequence):
"""Return one of the variants, randomly."""
subsequence = sequence[self.start : self.end]
variants = [v for v in self.variants if v != subsequence]
# the sorting of variants seems essential to ensure reproducibility
# between sessions.
# it does not slow down the global algorithm (or less than 3%)
variants = sorted(variants)
return variants[np.random.randint(len(variants))]
def merge_with(self, others):
"""Merge this mutation choice with others to form a single choice
Examples:
---------
>>> ((2, 5), {'ATT', 'ATA'})
merged with:
>>> [
>>> ((0, 3), {'GTA', 'GCT', 'GTT'}),
>>> ((3, 4), {'A', 'T', 'G', 'C'}),
>>> ((4, 7), {'ATG', 'ACC', 'CTG'})
>>> ]
returns the only choices on the full interval which are compatible with
at least one choice in each of the MutationChoices
>>> (0, 7), {'GTATACC', 'GTATATG'}
"""
others = sorted(others, key=lambda o: o.start)
others_start = others[0].start
final_segment = others_start, others[-1].end
final_variants = set()
for candidate in self.variants:
slots = []
for other in others:
istart, iend = windows_overlap(other.segment, self.segment)
slot = []
for variant in other.variants:
subseq = variant[istart - other.start : iend - other.start]
subcandidate = candidate[
istart - self.start : iend - self.start
]
if subseq == subcandidate:
slot.append(variant)
slots.append(slot)
for subseqs in itertools.product(*slots):
seq = "".join(subseqs)
matching_seq = seq[
self.start - others_start : self.end - others_start
]
if matching_seq == candidate:
final_variants.add(seq)
return MutationChoice(segment=final_segment, variants=final_variants)
def extract_varying_region(self):
"""Return MutationChoices for the central varying region and 2 flanks.
For instance:
>>> choice = MutationChoice((5, 12), [
>>> 'ATGCGTG',
>>> 'AAAAATG',
>>> 'AAATGTG',
>>> 'ATGAATG',
>>> ])
>>> choice.extract_varying_region()
Result :
>>> [
>>> MutChoice(5-6 A),
>>> MutChoice(6-10 TGCG-AATG-TGAA-AAAA),
>>> MutChoice(10-12 TG)
>>> ]
"""
if len(self.variants) <= 1:
return [self]
variants = list(self.variants)
reference = variants[0]
start = -1
end = len(reference)
for i in range(len(reference)):
for variant in variants[1:]:
if variant[i] != reference[i]:
if start == -1:
start = i
end = i + 1
break
result = []
if start > 0:
result.append(
MutationChoice(
(self.start, self.start + start), set([reference[:start]])
)
)
result.append(
MutationChoice(
(self.start + start, self.start + end),
set([v[start:end] for v in variants]),
)
)
if end < len(reference):
result.append(
MutationChoice(
(self.start + end, self.end),
set([v[end:] for v in variants]),
)
)
return result
def __repr__(self):
"""Represent."""
subsequences = "-".join(self.variants)
return "MutChoice(%d-%d %s)" % (self.start, self.end, subsequences)
def __str__(self):
"""Represent."""
subsequences = "-".join(self.variants)
return "MutChoice(%d-%d %s)" % (self.start, self.end, subsequences)
|
StarcoderdataPython
|
3367988
|
<filename>kete_hs21/lesson/apps.py
from django.apps import AppConfig
class LessonConfig(AppConfig):
default_auto_field = 'django.db.models.BigAutoField'
name = 'lesson'
|
StarcoderdataPython
|
113830
|
#! /usr/bin/env python3
import os
import sys
import time
import notify2
from datetime import datetime
from pprint import pprint
from daterelate.daterelate import relate
from selenium import webdriver
from selenium.common.exceptions import NoSuchElementException, TimeoutException
from selenium.webdriver.chrome.options import Options as ChromeOptions
from selenium.webdriver.firefox.options import Options as FirefoxOptions
from selenium.webdriver.common.by import By
from selenium.webdriver.support import expected_conditions as conditions
from selenium.webdriver.support import ui
from configreader import ConfigReader
os.environ['DISPLAY'] = ':0' # Set the display if set to run as cronjob
HOMEPAGE = 'http://myaccount.telkom.co.ke'
TODAY = datetime.now()
config = ConfigReader('defaults.ini')
TITLE = config.get('notificationtitle', default='Telkom Balance')
NUMBER = config.get('number', section='credentials', default='')
PASSWD = config.get('pass', section='credentials', default='')
s = os.path.join(os.path.expanduser('~'), 'bin')
driver_path = config.get('driverspath', default=s)
chrome_driver_name = config.get('chromedrivername',
section='Chrome',
default='chromedriver')
firefox_driver_name = config.get('firefoxdrivername',
section='Firefox',
default='geckodriver')
headless = config.get('headless', default=True)
chrome_options = ChromeOptions()
firefox_options = FirefoxOptions()
if headless:
chrome_options.add_argument('--headless')
chrome_options.add_argument('--disable-gpu')
firefox_options.add_argument('--headless')
os.environ['MOZ_HEADLESS'] = '1'
browsers = {
'chrome': {
'filename': chrome_driver_name,
'class': webdriver.Chrome,
'kwargs': {
'chrome_options': chrome_options
}
},
'firefox': {
'filename': firefox_driver_name,
'class': webdriver.Firefox,
'kwargs': {
'firefox_options': firefox_options,
'log_path': os.path.join(
driver_path, 'geckodriver.log')
}
}
}
choice_browser = browsers[config.get('Browser', default='chrome')]
driver_full_path = os.path.join(
driver_path, choice_browser['filename'])
choice_browser['kwargs']['executable_path'] = driver_full_path
read = notify2.init(TITLE)
notifier = notify2.Notification(TITLE, 'Querying')
notifier.show()
def alert(message, title=TITLE):
notifier.update(title, message)
notifier.show()
kwargs = choice_browser.get('kwargs', {})
def login(browser):
"""Log in to the website using the provided credentials"""
number_input = browser.find_element_by_xpath(
"//div[@class='login_form']/div[@id='divInputNumber']/input")
pwd_input = browser.find_element_by_xpath(
"//div[@class='login_form']/div[@id='divInputPwd']/input")
login_btn = browser.find_element_by_id('userLoginBtn')
number_input.clear()
number_input.send_keys(NUMBER)
pwd_input.clear()
pwd_input.send_keys(<PASSWORD>)
login_btn.click()
def query():
"""Load selenium and scrape"""
browser = choice_browser.get(
'class', webdriver.Chrome)(**kwargs)
browser.get(HOMEPAGE)
try:
# Wait until logout button is visible
ui.WebDriverWait(browser, 3).until(
conditions.visibility_of_element_located(
(By.ID, 'userLogoutBtn')))
# browser.find_element_by_id('userLogoutBtn').click()
except TimeoutException:
# Login if logout button not visible
login(browser)
try:
# Locate details table
ui.WebDriverWait(browser, 15).until(
conditions.visibility_of_element_located(
(By.CLASS_NAME, 'table_main')))
except TimeoutException:
browser.close()
sys.exit(1)
else:
table = browser.find_element_by_class_name('table_main')
rows = table.find_elements_by_tag_name('tr')
messages = []
for row in rows[1:6]:
message = ''
tds = row.find_elements_by_tag_name('td')
for td in tds:
try:
input = td.find_element_by_tag_name('input')
except NoSuchElementException:
text = td.text.strip()
message += '{}'.format(text)
else:
text = input.get_attribute('value')
message += ': {}\n'.format(text)
try:
date = datetime.strptime(text, '%d-%m-%Y')
except ValueError:
pass
else:
message += relate(date, TODAY, future='to expiry', past='ago')
messages.append(message)
browser.close()
pprint(messages)
for message in messages:
# Display notification
alert(message)
time.sleep(3)
if __name__ == "__main__":
query()
|
StarcoderdataPython
|
3284255
|
# Copyright (c) 2020, <NAME>, Honda Research Institute Europe GmbH, and
# Technical University of Darmstadt.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# 3. Neither the name of <NAME>, Honda Research Institute Europe GmbH,
# or Technical University of Darmstadt, nor the names of its contributors may
# be used to endorse or promote products derived from this software without
# specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL <NAME>, HONDA RESEARCH INSTITUTE EUROPE GMBH,
# OR TECHNICAL UNIVERSITY OF DARMSTADT BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
# OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER
# IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
"""
Test Linear Policy with RBF Features for the WAM ball-in-the-cup task.
"""
import torch as to
import numpy as np
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D # This import registers the 3D projection, but is otherwise unused.
import pyrado
from pyrado.environments.mujoco.wam_bic import WAMBallInCupSim
from pyrado.policies.special.dual_rfb import DualRBFLinearPolicy
from pyrado.utils.argparser import get_argparser
from pyrado.utils.data_types import RenderMode
from pyrado.policies.features import RBFFeat
from pyrado.sampling.rollout import rollout, after_rollout_query
from pyrado.utils.input_output import print_cbt
def compute_trajectory(weights, time, width):
centers = np.linspace(0, 1, weights.shape[0]).reshape(1, -1) # RBF center locations
diffs = time - centers
# Features
w = np.exp(-(diffs ** 2) / (2 * width))
wd = -(diffs / width) * w
w_sum = np.sum(w, axis=1, keepdims=True)
wd_sum = np.sum(wd, axis=1, keepdims=True)
# Normalized features
pos_features = w / w_sum
vel_features = (wd * w_sum - w * wd_sum) / w_sum ** 2
# Trajectory
q = pos_features @ weights
qd = vel_features @ weights
# Check gradient computation with finite difference approximation
for i in range(q.shape[1]):
qd_approx = np.gradient(q[:, i], 1 / len(time))
assert np.allclose(qd_approx, qd[:, i], rtol=1e-3, atol=1e-3)
return q, qd
def compute_trajectory_pyrado(weights, time, width):
weights = to.from_numpy(weights).to(dtype=to.get_default_dtype())
time = to.tensor(time, requires_grad=True, dtype=to.get_default_dtype())
rbf = RBFFeat(num_feat_per_dim=weights.shape[0], bounds=(np.array([0.0]), np.array([1.0])), scale=1 / (2 * width))
pos_feat = rbf(time)
q = pos_feat @ weights
# Explicit
vel_feat_E = rbf.derivative(time)
qd_E = vel_feat_E @ weights
# Autograd
q_1, q_2, q3 = q.t()
q_1.backward(to.ones((1750,)), retain_graph=True)
q_1d = time.grad.clone()
time.grad.fill_(0.0)
q_2.backward(to.ones((1750,)), retain_graph=True)
q_2d = time.grad.clone()
time.grad.fill_(0.0)
q3.backward(to.ones((1750,)))
q3d = time.grad.clone()
qd = to.cat([q_1d, q_2d, q3d], dim=1)
# Check similarity
assert to.norm(qd_E - qd) < 1e-3 # used to be 1e-6 with double precision
return q, qd
def check_feat_equality():
weights = np.random.normal(0, 1, (5, 3))
time = np.linspace(0, 1, 1750).reshape(-1, 1)
width = 0.0035
q_1, qd_1 = compute_trajectory_pyrado(weights, time, width)
q_2, qd_2 = compute_trajectory(weights, time, width)
assert q_1.size() == q_2.shape
assert qd_1.size() == qd_2.shape
is_q_equal = np.allclose(q_1.detach().cpu().numpy(), q_2, atol=1e-6)
is_qd_equal = np.allclose(qd_1.detach().cpu().numpy(), qd_2, atol=1e-5)
correct = is_q_equal and is_qd_equal
if not correct:
_, axs = plt.subplots(2)
axs[0].set_title("Joint Positions: pyrado and reference")
axs[0].plot(q_1.detach().cpu().numpy(), ls="--", label="pyrado")
axs[0].set_prop_cycle(None)
axs[0].plot(q_2, ls="-.", label="reference")
axs[0].legend()
axs[1].set_title("velocities - solid: pyrado, dashed: reference, dotted: finite difference")
axs[1].plot(qd_1.detach().cpu().numpy(), ls="--", label="pyrado")
axs[1].set_prop_cycle(None)
axs[1].plot(qd_2, ls="-.", label="reference")
axs[1].legend()
if is_q_equal: # q_1 and a2 are the same
finite_diff = np.diff(np.concatenate([np.zeros((1, 3)), q_2], axis=0) * 500.0, axis=0) # init with 0, 500Hz
axs[1].plot(finite_diff, c="k", ls=":")
plt.show()
return correct
def eval_damping():
""" Plot joint trajectories for different joint damping parameters """
# Environment
env = WAMBallInCupSim(num_dof=7, max_steps=1500)
# Policy (random init)
policy_hparam = dict(num_feat_per_dim=12, bounds=(np.array([0.0]), np.array([1.0])))
policy = DualRBFLinearPolicy(env.spec, policy_hparam, dim_mask=2)
# Do the rolllouts
t_all = []
qpos_all = []
dp_vals = [0.0, 0.01, 0.1, 0.5, 1.0]
print_cbt(f"Run policy for damping coefficients: {dp_vals}")
for dpv in dp_vals:
env.reset(
domain_param=dict(
joint_1_damping=dpv,
joint_2_damping=dpv,
joint_3_damping=dpv,
joint_4_damping=dpv,
joint_5_damping=dpv,
joint_6_damping=dpv,
joint_7_damping=dpv,
)
)
ro = rollout(env, policy, render_mode=RenderMode(video=False), eval=True)
t_all.append(ro.time[:-1])
qpos_all.append(ro.env_infos["qpos"])
# Plot
fig, ax = plt.subplots(nrows=env.num_dof, sharex="all", figsize=(16, 7))
for i, idx_joint in enumerate([dof for dof in range(env.num_dof)]):
ax[i].set_prop_cycle(color=plt.get_cmap("cividis")(np.linspace(0, 1, env.num_dof)))
ax[i].set_ylabel(f"joint {idx_joint+1} pos [rad]")
for j in range(len(dp_vals)):
ax[i].plot(t_all[j], qpos_all[j][:, idx_joint], ls="--", label=f"d = {dp_vals[j]}")
if i == 0:
ax[i].legend(ncol=len(dp_vals))
ax[-1].set_xlabel("time [s]")
plt.suptitle("Evaluation of joint damping coefficients")
plt.show()
def eval_dryfriction():
""" Plot joint trajectories for different joint stiction parameters """
# Environment
env = WAMBallInCupSim(num_dof=7, max_steps=1500)
# Policy (random init)
policy_hparam = dict(num_feat_per_dim=12, bounds=(np.array([0.0]), np.array([1.0])))
policy = DualRBFLinearPolicy(env.spec, policy_hparam, dim_mask=2)
# Do the rolllouts
t_all = []
qpos_all = []
dp_vals = [0.0, 0.3, 0.6, 0.9, 1.2]
print_cbt(f"Run policy for stiction coefficients: {dp_vals}")
for dpv in dp_vals:
env.reset(
domain_param=dict(
joint_1_dryfriction=dpv,
joint_2_dryfriction=dpv,
joint_3_dryfriction=dpv,
joint_4_dryfriction=dpv,
joint_5_dryfriction=dpv,
joint_6_dryfriction=dpv,
joint_7_dryfriction=dpv,
)
)
ro = rollout(env, policy, render_mode=RenderMode(video=False), eval=True)
t_all.append(ro.time[:-1])
qpos_all.append(ro.env_infos["qpos"])
# Plot
fig, ax = plt.subplots(nrows=env.num_dof, sharex="all", figsize=(16, 7))
for i, idx_joint in enumerate([dof for dof in range(env.num_dof)]):
ax[i].set_prop_cycle(color=plt.get_cmap("cividis")(np.linspace(0, 1, env.num_dof)))
ax[i].set_ylabel(f"joint {idx_joint+1} pos [rad]")
for j in range(len(dp_vals)):
ax[i].plot(t_all[j], qpos_all[j][:, idx_joint], ls="--", label=f"s = {dp_vals[j]}")
if i == 0:
ax[i].legend(ncol=len(dp_vals))
ax[-1].set_xlabel("time [s]")
plt.suptitle("Evaluation of joint stiction coefficients")
plt.show()
def rollout_dummy_rbf_policy_7dof():
# Environment
env = WAMBallInCupSim(num_dof=7, max_steps=1750, task_args=dict(sparse_rew_fcn=True))
# Stabilize around initial position
env.reset(domain_param=dict(cup_scale=1.0, rope_length=0.3103, ball_mass=0.021))
act = np.zeros((6,)) # desired deltas from the initial pose
for i in range(500):
env.step(act)
env.render(mode=RenderMode(video=True))
# Apply DualRBFLinearPolicy
policy_hparam = dict(num_feat_per_dim=7, bounds=(np.array([0.0]), np.array([1.0])))
policy = DualRBFLinearPolicy(env.spec, policy_hparam, dim_mask=1)
done, param = False, None
while not done:
ro = rollout(env, policy, render_mode=RenderMode(video=True), eval=True, reset_kwargs=dict(domain_param=param))
print_cbt(f"Return: {ro.undiscounted_return()}", "g", bright=True)
done, _, param = after_rollout_query(env, policy, ro)
# Retrieve infos from rollout
t = ro.time
des_pos_traj = ro.env_infos["qpos_des"]
pos_traj = ro.env_infos["qpos"]
des_vel_traj = ro.env_infos["qvel_des"]
vel_traj = ro.env_infos["qvel"]
ball_pos = ro.env_infos["ball_pos"]
cup_pos = ro.env_infos["cup_pos"]
# Plot trajectories of the directly controlled joints and their corresponding desired trajectories
fig, ax = plt.subplots(3, sharex="all")
for i, idx in enumerate([1, 3, 5]):
ax[i].plot(t, des_pos_traj[:, idx], label=f"qpos_des {idx}")
ax[i].plot(t, pos_traj[:, idx], label=f"qpos {idx}")
ax[i].legend()
fig = plt.figure()
ax = fig.add_subplot(111, projection="3d")
ax.plot(xs=ball_pos[:, 0], ys=ball_pos[:, 1], zs=ball_pos[:, 2], color="blue", label="Ball")
ax.scatter(xs=ball_pos[-1, 0], ys=ball_pos[-1, 1], zs=ball_pos[-1, 2], color="blue", label="Ball final")
ax.plot(xs=cup_pos[:, 0], ys=cup_pos[:, 1], zs=cup_pos[:, 2], color="red", label="Cup")
ax.scatter(xs=cup_pos[-1, 0], ys=cup_pos[-1, 1], zs=cup_pos[-1, 2], color="red", label="Cup final")
ax.legend()
ax.set_xlabel("x")
ax.set_ylabel("y")
ax.set_zlabel("z")
ax.view_init(elev=16.0, azim=-7.0)
plt.show()
def rollout_dummy_rbf_policy_4dof():
# Environment
env = WAMBallInCupSim(
num_dof=4,
max_steps=3000,
# Note, when tuning the task args: the `R` matrices are now 4x4 for the 4 dof WAM
task_args=dict(R=np.zeros((4, 4)), R_dev=np.diag([0.2, 0.2, 1e-2, 1e-2])),
)
# Stabilize ball and print out the stable state
env.reset()
act = np.zeros(env.spec.act_space.flat_dim)
for i in range(1500):
env.step(act)
env.render(mode=RenderMode(video=True))
# Printing out actual positions for 4-dof (..just needed to setup the hard-coded values in the class)
print("Ball pos:", env.sim.data.get_body_xpos("ball"))
print("Cup goal:", env.sim.data.get_site_xpos("cup_goal"))
print("Cup bottom:", env.sim.data.get_site_xpos("cup_bottom"))
print("Joint pos (incl. first rope angle):", env.sim.data.qpos[:5])
# Apply DualRBFLinearPolicy and plot the joint states over the desired ones
rbf_hparam = dict(num_feat_per_dim=7, bounds=(np.array([0.0]), np.array([1.0])))
policy = DualRBFLinearPolicy(env.spec, rbf_hparam, dim_mask=2)
done, param = False, None
while not done:
ro = rollout(env, policy, render_mode=RenderMode(video=True), eval=True, reset_kwargs=dict(domain_param=param))
print_cbt(f"Return: {ro.undiscounted_return()}", "g", bright=True)
done, _, param = after_rollout_query(env, policy, ro)
if __name__ == "__main__":
# Parse command line arguments
args = get_argparser().parse_args()
# Set the seed
pyrado.set_seed(0)
# Check for function equality
if check_feat_equality():
print_cbt("The two methods to compute the trajectory yield equal results.", "g")
else:
print_cbt("The two methods to compute the trajectory do not yield equal results.", "r")
if args.mode.lower() == "damping":
eval_damping()
elif args.mode.lower() == "stiction":
eval_dryfriction()
elif args.mode.lower() == "7dof":
rollout_dummy_rbf_policy_7dof()
elif args.mode.lower() == "4dof":
rollout_dummy_rbf_policy_4dof()
else:
raise pyrado.ValueErr(given=args.mode, eq_constraint="damping, stiction, 7dof, or 4dof")
|
StarcoderdataPython
|
4824888
|
<reponame>lukepfister/scico
import operator as op
import numpy as np
from jax.config import config
import pytest
# enable 64-bit mode for output dtype checks
config.update("jax_enable_x64", True)
from typing import Optional
import jax
import scico.numpy as snp
from scico import linop
from scico.random import randn
from scico.typing import PRNGKey
def adjoint_AtA_test(A: linop.LinearOperator, key: Optional[PRNGKey] = None, rtol: float = 1e-4):
"""Check the validity of A.conj().T as the adjoint for a LinearOperator A
Compares the quantity sum(x.conj() * A.conj().T @ A @ x) against
norm(A @ x)**2. If the adjoint is correct, these quantities should be equal.
Args:
A : LinearOperator to test
key: PRNGKey for generating `x`.
rtol: Relative tolerance
"""
# Generate a signal in the domain of A
x, key = randn(A.input_shape, dtype=A.input_dtype, key=key)
Ax = A @ x
AtAx = A.conj().T @ Ax
num = snp.sum(x.conj() * AtAx)
den = snp.linalg.norm(Ax) ** 2
np.testing.assert_allclose(num / den, 1, rtol=rtol)
AtAx = A.H @ Ax
num = snp.sum(x.conj() * AtAx)
den = snp.linalg.norm(Ax) ** 2
np.testing.assert_allclose(num / den, 1, rtol=rtol)
AtAx = A.adj(Ax)
num = snp.sum(x.conj() * AtAx)
den = snp.linalg.norm(Ax) ** 2
np.testing.assert_allclose(num / den, 1, rtol=rtol)
def adjoint_AAt_test(A: linop.LinearOperator, key: Optional[PRNGKey] = None, rtol: float = 1e-4):
"""Check the validity of A as the adjoint for a LinearOperator A.conj().T
Compares the quantity sum(y.conj() * A @ A.conj().T @ y) against
norm(A.conj().T @ y)**2. If the adjoint is correct, these quantities should be equal.
Args:
A : LinearOperator to test
key: PRNGKey for generating `x`.
rtol: Relative tolerance
"""
# Generate a signal in the domain of A^T
y, key = randn(A.output_shape, dtype=A.output_dtype, key=key)
Aty = A.conj().T @ y
AAty = A @ Aty
num = snp.sum(y.conj() * AAty)
den = snp.linalg.norm(Aty) ** 2
np.testing.assert_allclose(num / den, 1, rtol=rtol)
Aty = A.H @ y
AAty = A @ Aty
num = snp.sum(y.conj() * AAty)
den = snp.linalg.norm(Aty) ** 2
np.testing.assert_allclose(num / den, 1, rtol=rtol)
Aty = A.adj(y)
AAty = A @ Aty
num = snp.sum(y.conj() * AAty)
den = snp.linalg.norm(Aty) ** 2
np.testing.assert_allclose(num / den, 1, rtol=rtol)
class AbsMatOp(linop.LinearOperator):
"""Simple LinearOperator subclass for testing purposes.
Similar to linop.MatrixOperator, but does not use the specialized MatrixOperator methods (.T, adj, etc).
Used to verify the LinearOperator interface.
"""
def __init__(self, A, adj_fn=None):
self.A = A
super().__init__(
input_shape=A.shape[1], output_shape=A.shape[0], input_dtype=A.dtype, adj_fn=adj_fn
)
def _eval(self, x):
return self.A @ x
class LinearOperatorTestObj:
def __init__(self, dtype):
M, N = (32, 64)
key = jax.random.PRNGKey(12345)
self.dtype = dtype
self.A, key = randn((M, N), dtype=dtype, key=key)
self.B, key = randn((M, N), dtype=dtype, key=key)
self.C, key = randn((N, M), dtype=dtype, key=key)
self.D, key = randn((M, N - 1), dtype=dtype, key=key)
self.x, key = randn((N,), dtype=dtype, key=key)
self.y, key = randn((M,), dtype=dtype, key=key)
scalar, key = randn((1,), dtype=dtype, key=key)
self.scalar = scalar.copy().ravel()[0]
self.Ao = AbsMatOp(self.A)
self.Bo = AbsMatOp(self.B)
self.Co = AbsMatOp(self.C)
self.Do = AbsMatOp(self.D)
@pytest.fixture(scope="module", params=[np.float32, np.float64, np.complex64, np.complex128])
def testobj(request):
yield LinearOperatorTestObj(request.param)
@pytest.mark.parametrize("operator", [op.add, op.sub])
def test_binary_op(testobj, operator):
# Our AbsMatOp class does not override the __add__, etc
# so AbsMatOp + AbsMatOp -> LinearOperator
# So to verify results, we evaluate the new LinearOperator on a random input
comp_mat = operator(testobj.A, testobj.B) # composite matrix
comp_op = operator(testobj.Ao, testobj.Bo) # composite linop
assert isinstance(comp_op, linop.LinearOperator) # Ensure we don't get a Map
assert comp_op.input_dtype == testobj.A.dtype
np.testing.assert_allclose(comp_mat @ testobj.x, comp_op @ testobj.x, rtol=5e-5)
# linops of different sizes
with pytest.raises(ValueError):
operator(testobj.Ao, testobj.Co)
with pytest.raises(ValueError):
operator(testobj.Ao, testobj.Do)
@pytest.mark.parametrize("operator", [op.mul, op.truediv])
def test_scalar_left(testobj, operator):
comp_mat = operator(testobj.A, testobj.scalar)
comp_op = operator(testobj.Ao, testobj.scalar)
assert isinstance(comp_op, linop.LinearOperator) # Ensure we don't get a Map
assert comp_op.input_dtype == testobj.A.dtype
np.testing.assert_allclose(comp_mat @ testobj.x, comp_op @ testobj.x, rtol=5e-5)
np.testing.assert_allclose(comp_mat.conj().T @ testobj.y, comp_op.adj(testobj.y), rtol=5e-5)
@pytest.mark.parametrize("operator", [op.mul, op.truediv])
def test_scalar_right(testobj, operator):
if operator == op.truediv:
pytest.xfail("scalar / LinearOperator is not supported")
comp_mat = operator(testobj.scalar, testobj.A)
comp_op = operator(testobj.scalar, testobj.Ao)
assert comp_op.input_dtype == testobj.A.dtype
np.testing.assert_allclose(comp_mat @ testobj.x, comp_op @ testobj.x, rtol=5e-5)
def test_negation(testobj):
comp_mat = -testobj.A
comp_op = -testobj.Ao
assert comp_op.input_dtype == testobj.A.dtype
np.testing.assert_allclose(comp_mat @ testobj.x, comp_op @ testobj.x, rtol=5e-5)
@pytest.mark.parametrize("operator", [op.add, op.sub])
def test_invalid_add_sub_array(testobj, operator):
# Try to add or subtract an ndarray with AbsMatOp
with pytest.raises(TypeError):
operator(testobj.A, testobj.Ao)
@pytest.mark.parametrize("operator", [op.add, op.sub])
def test_invalid_add_sub_scalar(testobj, operator):
# Try to add or subtract a scalar with AbsMatOp
with pytest.raises(TypeError):
operator(1.0, testobj.Ao)
def test_matmul_left(testobj):
comp_mat = testobj.A @ testobj.C
comp_op = testobj.Ao @ testobj.Co
assert comp_op.input_dtype == testobj.A.dtype
np.testing.assert_allclose(comp_mat @ testobj.y, comp_op @ testobj.y, rtol=5e-5)
def test_matmul_right(testobj):
comp_mat = testobj.C @ testobj.A
comp_op = testobj.Co @ testobj.Ao
assert comp_op.input_dtype == testobj.A.dtype
np.testing.assert_allclose(comp_mat @ testobj.x, comp_op @ testobj.x, rtol=5e-5)
def test_matvec_left(testobj):
comp_mat = testobj.A @ testobj.x
comp_op = testobj.Ao @ testobj.x
assert comp_op.dtype == testobj.A.dtype
np.testing.assert_allclose(comp_mat, comp_op, rtol=5e-5)
def test_matvec_right(testobj):
comp_mat = testobj.C @ testobj.y
comp_op = testobj.Co @ testobj.y
assert comp_op.dtype == testobj.A.dtype
np.testing.assert_allclose(comp_mat, comp_op, rtol=5e-5)
def test_gram(testobj):
Ao = testobj.Ao
a = Ao.gram(testobj.x)
b = Ao.conj().T @ Ao @ testobj.x
c = Ao.gram_op @ testobj.x
comp_mat = testobj.A.conj().T @ testobj.A @ testobj.x
np.testing.assert_allclose(a, comp_mat, rtol=5e-5)
np.testing.assert_allclose(b, comp_mat, rtol=5e-5)
np.testing.assert_allclose(c, comp_mat, rtol=5e-5)
def test_matvec_call(testobj):
# A @ x and A(x) should return same
np.testing.assert_allclose(testobj.Ao @ testobj.x, testobj.Ao(testobj.x), rtol=5e-5)
def test_adj_composition(testobj):
Ao = testobj.Ao
Bo = testobj.Bo
A = testobj.A
B = testobj.B
x = testobj.x
comp_mat = A.conj().T @ B
a = Ao.conj().T @ Bo
b = Ao.adj(Bo)
assert a.input_dtype == testobj.A.dtype
assert b.input_dtype == testobj.A.dtype
np.testing.assert_allclose(comp_mat @ x, a @ x, rtol=5e-5)
np.testing.assert_allclose(comp_mat @ x, b @ x, rtol=5e-5)
def test_transpose_matvec(testobj):
Ao = testobj.Ao
y = testobj.y
a = Ao.T @ y
b = y.T @ Ao
comp_mat = testobj.A.T @ y
assert a.dtype == testobj.A.dtype
assert b.dtype == testobj.A.dtype
np.testing.assert_allclose(a, comp_mat, rtol=5e-5)
np.testing.assert_allclose(a, b, rtol=5e-5)
def test_transpose_matmul(testobj):
Ao = testobj.Ao
Bo = testobj.Bo
x = testobj.x
comp_op = Ao.T @ Bo
comp_mat = testobj.A.T @ testobj.B
assert comp_op.input_dtype == testobj.A.dtype
np.testing.assert_allclose(comp_mat @ x, comp_op @ x, rtol=5e-5)
def test_conj_transpose_matmul(testobj):
Ao = testobj.Ao
Bo = testobj.Bo
x = testobj.x
comp_op = Ao.conj().T @ Bo
comp_mat = testobj.A.conj().T @ testobj.B
assert comp_mat == testobj.A.dtype
np.testing.assert_allclose(comp_mat @ x, comp_op @ x, rtol=5e-5)
def test_conj_matvec(testobj):
Ao = testobj.Ao
x = testobj.x
a = Ao.conj() @ x
comp_mat = testobj.A.conj() @ x
assert a.dtype == testobj.A.dtype
np.testing.assert_allclose(a, comp_mat, rtol=5e-5)
def test_adjoint_matvec(testobj):
Ao = testobj.Ao
y = testobj.y
a = Ao.adj(y)
b = Ao.conj().T @ y
c = (y.conj().T @ Ao).conj()
comp_mat = testobj.A.conj().T @ y
assert a.dtype == testobj.A.dtype
assert b.dtype == testobj.A.dtype
assert c.dtype == testobj.A.dtype
np.testing.assert_allclose(a, comp_mat, rtol=5e-5)
np.testing.assert_allclose(a, b, rtol=5e-5)
np.testing.assert_allclose(a, c, rtol=5e-5)
def test_adjoint_matmul(testobj):
# shape mismatch
Ao = testobj.Ao
Co = testobj.Co
with pytest.raises(ValueError):
Ao.adj(Co)
def test_hermitian(testobj):
Ao = testobj.Ao
y = testobj.y
np.testing.assert_allclose(Ao.conj().T @ y, Ao.H @ y)
def test_shape(testobj):
Ao = testobj.Ao
x = testobj.x
y = testobj.y
with pytest.raises(ValueError):
Ao @ y
with pytest.raises(ValueError):
Ao(y)
with pytest.raises(ValueError):
Ao.T @ x
with pytest.raises(ValueError):
Ao.adj(x)
class TestDiagonal:
def setup_method(self, method):
self.key = jax.random.PRNGKey(12345)
input_shapes = [(32,), (32, 48), ((3,), (4, 5))]
@pytest.mark.parametrize("diagonal_dtype", [np.float32, np.complex64])
@pytest.mark.parametrize("input_shape", input_shapes)
def test_eval(self, input_shape, diagonal_dtype):
diagonal, key = randn(input_shape, dtype=diagonal_dtype, key=self.key)
x, key = randn(input_shape, dtype=diagonal_dtype, key=key)
D = linop.Diagonal(diagonal=diagonal)
assert (D @ x).shape == D.output_shape
np.testing.assert_allclose((diagonal * x).ravel(), (D @ x).ravel(), rtol=1e-5)
@pytest.mark.parametrize("diagonal_dtype", [np.float32, np.complex64])
@pytest.mark.parametrize("input_shape", input_shapes)
def test_adjoint(self, input_shape, diagonal_dtype):
diagonal, key = randn(input_shape, dtype=diagonal_dtype, key=self.key)
D = linop.Diagonal(diagonal=diagonal)
adjoint_AtA_test(D)
adjoint_AAt_test(D)
@pytest.mark.parametrize("operator", [op.add, op.sub])
@pytest.mark.parametrize("diagonal_dtype", [np.float32, np.complex64])
@pytest.mark.parametrize("input_shape1", input_shapes)
@pytest.mark.parametrize("input_shape2", input_shapes)
def test_binary_op(self, input_shape1, input_shape2, diagonal_dtype, operator):
diagonal1, key = randn(input_shape1, dtype=diagonal_dtype, key=self.key)
diagonal2, key = randn(input_shape2, dtype=diagonal_dtype, key=key)
x, key = randn(input_shape1, dtype=diagonal_dtype, key=key)
D1 = linop.Diagonal(diagonal=diagonal1)
D2 = linop.Diagonal(diagonal=diagonal2)
if input_shape1 != input_shape2:
with pytest.raises(ValueError):
a = operator(D1, D2) @ x
else:
a = operator(D1, D2) @ x
Dnew = linop.Diagonal(operator(diagonal1, diagonal2))
b = Dnew @ x
np.testing.assert_allclose(a.ravel(), b.ravel(), rtol=1e-5)
@pytest.mark.parametrize("operator", [op.add, op.sub])
def test_binary_op_mismatch(self, operator):
diagonal_dtype = np.float32
input_shape1 = (32,)
input_shape2 = (48,)
diagonal1, key = randn(input_shape1, dtype=diagonal_dtype, key=self.key)
diagonal2, key = randn(input_shape2, dtype=diagonal_dtype, key=key)
D1 = linop.Diagonal(diagonal=diagonal1)
D2 = linop.Diagonal(diagonal=diagonal2)
with pytest.raises(ValueError):
operator(D1, D2)
@pytest.mark.parametrize("operator", [op.mul, op.truediv])
def test_scalar_right(self, operator):
if operator == op.truediv:
pytest.xfail("scalar / LinearOperator is not supported")
diagonal_dtype = np.float32
input_shape = (32,)
diagonal1, key = randn(input_shape, dtype=diagonal_dtype, key=self.key)
scalar = np.random.randn()
x, key = randn(input_shape, dtype=diagonal_dtype, key=key)
D = linop.Diagonal(diagonal=diagonal1)
scaled_D = operator(scalar, D)
np.testing.assert_allclose(scaled_D @ x, operator(scalar, D @ x), rtol=5e-5)
@pytest.mark.parametrize("operator", [op.mul, op.truediv])
def test_scalar_left(self, operator):
diagonal_dtype = np.float32
input_shape = (32,)
diagonal1, key = randn(input_shape, dtype=diagonal_dtype, key=self.key)
scalar = np.random.randn()
x, key = randn(input_shape, dtype=diagonal_dtype, key=key)
D = linop.Diagonal(diagonal=diagonal1)
scaled_D = operator(D, scalar)
np.testing.assert_allclose(scaled_D @ x, operator(D @ x, scalar), rtol=5e-5)
def test_adj_lazy():
dtype = np.float32
M, N = (32, 64)
A, key = randn((M, N), dtype=np.float32, key=None)
y, key = randn((M,), dtype=np.float32, key=key)
Ao = AbsMatOp(A, adj_fn=None) # defer setting the linop
assert Ao._adj is None
a = Ao.adj(y) # Adjoint is set when .adj() is called
b = A.T @ y
np.testing.assert_allclose(a, b, rtol=1e-5)
def test_jit_adj_lazy():
dtype = np.float32
M, N = (32, 64)
A, key = randn((M, N), dtype=np.float32, key=None)
y, key = randn((M,), dtype=np.float32, key=key)
Ao = AbsMatOp(A, adj_fn=None) # defer setting the linop
assert Ao._adj is None
Ao.jit() # Adjoint set here
assert Ao._adj is not None
a = Ao.adj(y)
b = A.T @ y
np.testing.assert_allclose(a, b, rtol=1e-5)
class PowerIterTestObj:
def __init__(self, dtype):
M, N = (8, 8)
key = jax.random.PRNGKey(12345)
self.dtype = dtype
A, key = randn((M, N), dtype=dtype, key=key)
self.A = A.conj().T @ A # ensure symmetric
self.Ao = linop.MatrixOperator(self.A)
self.Bo = AbsMatOp(self.A)
self.key = key
self.ev = snp.linalg.norm(
self.A, 2
) # The largest eigenvalue of A is the spectral norm of A
@pytest.fixture(scope="module", params=[np.float32, np.complex64])
def pitestobj(request):
yield PowerIterTestObj(request.param)
def test_power_iteration(pitestobj):
"""Verify that power iteration calculates largest eigenvalue for real and complex
symmetric matrices.
"""
# Test using the LinearOperator MatrixOperator
mu, v = linop.power_iteration(A=pitestobj.Ao, maxiter=100, key=pitestobj.key)
assert np.abs(mu - pitestobj.ev) < 1e-4
# Test using the AbsMatOp for test_linop.py
mu, v = linop.power_iteration(A=pitestobj.Bo, maxiter=100, key=pitestobj.key)
assert np.abs(mu - pitestobj.ev) < 1e-4
class SumTestObj:
def __init__(self, dtype):
self.x, key = randn((4, 5, 6, 7), dtype=dtype)
@pytest.fixture(scope="module", params=[np.float32, np.complex64])
def sumtestobj(request):
yield SumTestObj(request.param)
sum_axis = [
None,
0,
1,
2,
3,
(0, 1),
(0, 2),
(0, 3),
(1, 2),
(1, 3),
(2, 3),
(0, 1, 2),
(0, 1, 3),
(1, 2, 3),
(0, 1, 2, 3),
]
@pytest.mark.parametrize("axis", sum_axis)
def test_sum_eval(sumtestobj, axis):
x = sumtestobj.x
A = linop.Sum(input_shape=x.shape, input_dtype=x.dtype, sum_axis=axis)
np.testing.assert_allclose(A @ x, snp.sum(x, axis=axis), rtol=1e-3)
@pytest.mark.parametrize("axis", sum_axis)
def test_sum_adj(sumtestobj, axis):
x = sumtestobj.x
A = linop.Sum(input_shape=x.shape, input_dtype=x.dtype, sum_axis=axis)
adjoint_AtA_test(A)
adjoint_AAt_test(A)
@pytest.mark.parametrize("axis", (5, (1, 1), (0, 1, 2, 3, 4)))
def test_sum_bad_shapes(sumtestobj, axis):
# integer too high, repeated values, list too long
x = sumtestobj.x
with pytest.raises(ValueError):
A = linop.Sum(input_shape=x.shape, input_dtype=x.dtype, sum_axis=axis)
|
StarcoderdataPython
|
3399890
|
import time
import pytest
from he.decorators import (
timer,
debug,
throttle,
singleton,
repeat,
count_calls,
CountCalls,
)
# GIVEN any decorator
@pytest.mark.parametrize(
'decorator',
[timer, debug, throttle(rate=0.01), singleton, repeat, count_calls, CountCalls],
)
def test_consistency(decorator):
# WHEN decorating a function
@decorator
def nothing(arg, *, kwarg=5):
"""nothing's docstring"""
return arg + kwarg
# THEN the function's identity is preserved
assert 'nothing' == nothing.__name__
assert "nothing's docstring" == nothing.__doc__
# THEN the function's return value is returned, and its arguments are preserved
assert 1337 == nothing(1269, kwarg=68)
def test_timer(caplog):
caplog.set_level(0)
@timer
def sleep_one_second():
time.sleep(0.1)
sleep_one_second()
assert caplog.record_tuples
logged_message = caplog.messages[0]
time_str = logged_message.split()[1]
time_rounded = round(float(time_str), 1)
assert 0.1 == time_rounded
def test_debug(caplog):
caplog.set_level(0)
@debug
def add(val1, val2=855):
return val1 + val2
add(482)
assert caplog.messages[0] == ' called with args (482)'
assert caplog.messages[1] == ' returned 1337'
# Test missing: decorator `throttle` without argument; not doing it because 1 second would be too long for a test.
def test_throttle():
# GIVEN a function that shouldn't get called more often than once every 0.1 seconds
@throttle(rate=0.1)
def nothing():
pass
# WHEN it gets called two times
start = time.time()
nothing()
nothing()
end = time.time()
# THEN around 0.2 seconds have passed
execution_time = end - start
assert 0.2 == round(execution_time, 1)
def test_singleton():
# GIVEN a class that shouldn't be instantiated more than once
@singleton
class Single:
def __init__(self, value):
self.value_initializable_once = value
# WHEN the user tries to instantiate it more than once
class_1 = Single('initialization text')
class_2 = Single("different text that shouldn't get used")
# THEN all instances actually refer to the same initial instance
assert class_1 is class_2
assert 'initialization text' == class_2.value_initializable_once
def test_repeat():
# GIVEN a function that should run three times for each call directive
@repeat
def nothing_three_times():
nothing_three_times.counter += 1
nothing_three_times.counter = 0
# WHEN it got called once
nothing_three_times()
# THEN it actually ran three times
assert 3 == nothing_three_times.counter
# And now the same test with an argument on the decorator...
@repeat(num_times=4)
def nothing_four_times():
nothing_four_times.counter += 1
nothing_four_times.counter = 0
nothing_four_times()
assert 4 == nothing_four_times.counter
def test_count_calls(caplog):
caplog.set_level(level=0)
@count_calls
def nothing():
pass
for _ in range(5):
nothing()
assert 5 == len(caplog.record_tuples)
assert '5 times' in caplog.record_tuples[4][2]
assert 5 == nothing.num_calls
def test_CountCalls(caplog):
caplog.set_level(level=0)
@CountCalls
def nothing():
pass
for _ in range(5):
nothing()
assert 5 == len(caplog.record_tuples)
assert '5 times' in caplog.record_tuples[4][2]
assert 5 == nothing.num_calls
|
StarcoderdataPython
|
72923
|
<reponame>code-impactor/arque<filename>main.py<gh_stars>1-10
import signal
import random
import logging
import asyncio
import aioredis
import time
from functools import wraps
from arque import Arque
logger = logging.getLogger(__name__)
async def shutdown(signal, loop):
"""Cleanup tasks tied to the service's shutdown."""
logging.info(f"Received exit signal {signal.name}...")
tasks = [t for t in asyncio.all_tasks() if t is not asyncio.current_task()]
[task.cancel() for task in tasks]
logging.info(f"Cancelling {len(tasks)} outstanding tasks")
await asyncio.gather(*tasks)
logging.info(f"Flushing metrics")
loop.stop()
def aioredis_pool(host='redis://localhost', encoding='utf8'):
def wrapper(func):
@wraps(func)
async def wrapped():
redis = await aioredis.create_redis_pool(host, encoding=encoding)
try:
return await func(redis=redis)
finally:
redis.close()
await redis.wait_closed()
return wrapped
return wrapper
@aioredis_pool(host='redis://localhost', encoding='utf8')
async def produce_task(redis=None):
logger.info('Starting producing...')
queue = Arque(redis=redis)
while True:
for _ in range(1):
task = {'value': random.randint(0, 99)}
task_id = f"custom_{task['value']}_{time.time()}"
logger.debug('Produced task %s', task)
await queue.enqueue(task, task_id=task_id, task_timeout=10, delay=1)
await asyncio.sleep(1)
async def process(task_data):
logger.debug('Consumed task %s', task_data)
await asyncio.sleep(1)
@aioredis_pool(host='redis://localhost', encoding='utf8')
async def consume_task(redis=None):
logger.info('Starting consuming...')
queue = Arque(redis=redis, working_limit=3)
while True:
task_id, task_data = await queue.dequeue()
if task_id == '__not_found__':
continue
if task_id == '__overloaded__':
print(f'TASK ID: {task_id}')
await asyncio.sleep(1)
continue
if task_id == '__marked_as_failed___':
print(f'FAILED ID: {task_id}')
continue
try:
await process(task_data)
await queue.release(task_id)
except Exception:
logger.exception('Job processing has failed')
await queue.requeue(task_id, delay=5)
stats = await queue.get_stats()
logger.info(stats)
@aioredis_pool(host='redis://localhost', encoding='utf8')
async def sweep_task(redis=None):
logger.info('Starting sweeping...')
queue = Arque(redis=redis, sweep_interval=5)
await queue.schedule_sweep()
@aioredis_pool(host='redis://localhost', encoding='utf8')
async def stats_task(redis=None):
logger.info('Starting stats...')
queue = Arque(redis=redis)
while True:
stats = await queue.get_stats()
logger.info(stats)
await asyncio.sleep(5)
async def example():
tasks = []
for _ in range(5):
tasks.append(consume_task())
tasks.append(produce_task())
tasks.append(sweep_task())
tasks.append(stats_task())
await asyncio.gather(*tasks)
if __name__ == '__main__':
logging.basicConfig(level=logging.DEBUG, format='%(asctime)s %(message)s')
loop = asyncio.get_event_loop()
signals = (signal.SIGHUP, signal.SIGTERM, signal.SIGINT, signal.SIGUSR1)
for s in signals:
loop.add_signal_handler(s, lambda s=s: asyncio.create_task(shutdown(s, loop)))
try:
loop.run_until_complete(example())
finally:
loop.close()
logging.info("Successfully shutdown...")
|
StarcoderdataPython
|
3213842
|
<reponame>ffreemt/gpt3-api
"""Test gpt3_api."""
from gpt3_api import __version__
from gpt3_api import gpt3_api
def test_version():
"""Test version."""
assert __version__ == "0.1.0"
def test_sanity():
"""Sanity check."""
try:
assert not gpt3_api()
except Exception:
assert True
|
StarcoderdataPython
|
3369621
|
<filename>backend/api/login.py
from typing import List
from datetime import timedelta
from fastapi import APIRouter, Depends, HTTPException
from sqlalchemy.orm import Session
from backend import schemas, crud, config
from backend.utils import get_db, get_current_user, create_access_token
from backend.models import User as DBUser
router = APIRouter()
@router.post("/login", response_model=schemas.Token, tags=["login"])
def login_access_token(
form_data: schemas.Login,db: Session = Depends(get_db)
):
"""
OAuth2 compatible token login, get an access token for future requests
"""
user = crud.user.authenticate(
db, email=form_data.username, password=form_data.password
)
if not user:
raise HTTPException(status_code=400, detail="Incorrect email or password")
elif not crud.user.is_active(user):
raise HTTPException(status_code=400, detail="Inactive user")
access_token_expires = timedelta(minutes=config.ACCESS_TOKEN_EXPIRE_MINUTES)
return {
"access_token": create_access_token(
data={"user_id": user.id}, expires_delta=access_token_expires
),
"token_type": "bearer",
}
@router.post("/user/info", tags=["login"], response_model=schemas.User)
def get_info(current_user: DBUser = Depends(get_current_user)):
"""
Test access token
"""
return current_user
|
StarcoderdataPython
|
4827137
|
<gh_stars>10-100
"""
Install the following dependencies:
html-sanitizer==1.9.1
bleach==3.2.1
lxml==4.6.2
html5lib==1.1
"""
from bleach.sanitizer import Cleaner as BleachSanitizer
from html_sanitizer import Sanitizer as HTMLSanitizerSanitizer
from lxml.html import html5parser, tostring
from lxml.html.clean import Cleaner
from pybluemonday import UGCPolicy as BlueMondaySanitizer
# Snipppet from https://lxml.de/lxmlhtml.html#cleaning-up-html
TEST = """
<html>
<head>
<script type="text/javascript" src="evil-site"></script>
<link rel="alternate" type="text/rss" src="evil-rss">
<style>
body {background-image: url(javascript:do_evil)};
div {color: expression(evil)};
</style>
</head>
<body onload="evil_function()">
<!-- I am interpreted for EVIL! -->
<a href="javascript:evil_function()">a link</a>
<a href="#" onclick="evil_function()">another link</a>
<p onclick="evil_function()">a paragraph</p>
<div style="display: none">secret EVIL!</div>
<object> of EVIL! </object>
<iframe src="evil-site"></iframe>
<form action="evil-site">
Password: <input type="password" name="password">
</form>
<blink>annoying EVIL!</blink>
<a href="evil-site">spam spam SPAM!</a>
<image src="evil!">
</body>
</html>"""
BLUE_MONDAY = BlueMondaySanitizer()
HTML_SANITIZER = HTMLSanitizerSanitizer()
BLEACH_SANITIZER = BleachSanitizer()
LXML_SANITIZER = Cleaner()
def test_bleach():
BLEACH_SANITIZER.clean(TEST)
def test_html_sanitizer():
HTML_SANITIZER.sanitize(TEST)
def test_lxml_sanitizer():
LXML_SANITIZER.clean_html(TEST)
def test_bluemonday():
BLUE_MONDAY.sanitize(TEST)
if __name__ == "__main__":
import timeit
x = [
"bleach",
"html_sanitizer",
"lxml Cleaner",
"pybluemonday",
]
y = [
timeit.timeit("test_bleach()", globals=locals(), number=20000),
timeit.timeit("test_html_sanitizer()", globals=locals(), number=20000),
timeit.timeit("test_lxml_sanitizer()", globals=locals(), number=20000),
timeit.timeit("test_bluemonday()", globals=locals(), number=20000,),
]
for name, result in list(zip(x, y)):
print(name, "(20000 sanitizations):", result)
import seaborn as sns
import matplotlib.pyplot as plt
chart = sns.barplot(x=x, y=y)
chart.set_title("Time Taken to Sanitize HTML (20000 iterations, lower is better)")
chart.set_xlabel("Library")
chart.set_ylabel("Time (seconds)")
for p in chart.patches:
chart.annotate(
f"{round(p.get_height(), 2)} s",
(p.get_x() + 0.4, p.get_height()),
ha="center",
va="bottom",
color="black",
)
plt.show()
|
StarcoderdataPython
|
3339730
|
from os.path import join
from django.conf import settings
from django.forms import TextInput, Textarea
from django.utils.safestring import mark_safe
from django.template.loader import render_to_string
from cms.settings import CMS_MEDIA_URL
from cms.models import Page
from django.forms.widgets import Widget
class PluginEditor(Widget):
def __init__(self, attrs=None, installed=None, list=None):
if attrs is not None:
self.attrs = attrs.copy()
else:
self.attrs = {}
class Media:
js = [join(CMS_MEDIA_URL, path) for path in (
'javascript/jquery.js',
'javascript/plugin_editor.js',
'javascript/ui.core.js',
'javascript/ui.sortable.js',
)]
css = {
'all': [join(CMS_MEDIA_URL, path) for path in (
'css/plugin_editor.css',
)]
}
def render(self, name, value, attrs=None):
context = {
'plugin_list': self.attrs['list'],
'installed_plugins': self.attrs['installed']
}
return mark_safe(render_to_string(
'admin/cms/page/widgets/plugin_editor.html', context))
|
StarcoderdataPython
|
31177
|
"""Module containing definitions of arithmetic functions used by perceptrons"""
from abc import ABC, abstractmethod
import numpy as np
from NaiveNeurals.utils import ErrorAlgorithm
class ActivationFunction(ABC):
"""Abstract function for defining functions"""
label = ''
@staticmethod
@abstractmethod
def function(arg: np.array) -> np.array:
"""Implementation of function
:param arg: float
:return: float
"""
raise NotImplementedError()
@classmethod
@abstractmethod
def prime(cls, arg: np.array) -> np.array:
"""First derivative of implemented function
:param arg: float
:return: float
"""
raise NotImplementedError()
class Sigmoid(ActivationFunction):
"""Represents sigmoid function and its derivative"""
label = 'sigmoid'
@staticmethod
def function(arg: np.array) -> np.array:
"""Calculate sigmoid(arg)
:param arg: float input value
:return: float sig(arg) value
"""
return 1 / (1 + np.exp(-arg))
@classmethod
def prime(cls, arg: np.array) -> np.array:
"""Calculate value of sigmoid's prime derivative for given arg
:param arg: float input value
:return: float value
"""
return cls.function(arg) * (1 - cls.function(arg))
class Tanh(ActivationFunction):
"""Represents hyperbolic tangent"""
label = 'tanh'
@staticmethod
def function(arg: np.array) -> np.array:
"""Calculate tanh(arg)
:param arg: float input value
:return: float tanh(arg) value
"""
return np.tanh(arg)
@classmethod
def prime(cls, arg: np.array) -> np.array:
"""Calculate value of tanh's prime derivative for given arg
:param arg: float input value
:return: float value
"""
return 1 - np.tanh(arg)**2
class Linear(ActivationFunction):
"""Represents linear function"""
label = 'lin'
@staticmethod
def function(arg: np.array) -> np.array:
"""Calculate lin(arg)
:param arg: float input value
:return: float lin(arg) value
"""
return arg
@classmethod
def prime(cls, arg: np.array) -> np.array:
"""Calculate value of lin's prime derivative for given arg
:param arg: float input value
:return: float value
"""
ones = np.array(arg)
ones[::] = 1.0
return ones
class SoftMax(ActivationFunction):
"""Represents SoftMax function
The ``softmax`` function takes an N-dimensional vector of arbitrary real values and produces
another N-dimensional vector with real values in the range (0, 1) that add up to 1.0.
source: https://eli.thegreenplace.net/2016/the-softmax-function-and-its-derivative/
"""
label = 'softmax'
@staticmethod
def function(arg: np.array, beta: int = 20) -> np.array: # pylint: disable=arguments-differ
"""Calculate softmax(arg)
:param arg: float input value
:param beta: scaling parameter
:return: float softmax(arg) value
"""
exps = np.exp(beta * arg - beta * arg.max())
return exps / np.sum(exps)
@classmethod
def prime(cls, arg: np.array) -> np.array:
"""Calculate value of softmax's prime derivative for given arg
:param arg: float input value
:return: float value
"""
return cls.function(arg) * (1 - cls.function(arg))
class SoftPlus(ActivationFunction):
"""Represents softplus function"""
label = 'softplus'
@staticmethod
def function(arg: np.array) -> np.array:
"""Calculate softplus(arg)
:param arg: float input value
:return: float softmax(arg) value
"""
return np.log(1 + np.exp(arg))
@classmethod
def prime(cls, arg: np.array) -> np.array:
"""Calculate value of softplus's prime derivative for given arg
:param arg: float input value
:return: float value
"""
return 1/(1 + np.exp(-arg))
def get_activation_function(label: str) -> ActivationFunction:
"""Get activation function by label
:param label: string denoting function
:return: callable function
"""
if label == 'lin':
return Linear()
if label == 'sigmoid':
return Sigmoid()
if label == 'tanh':
return Tanh()
return Sigmoid()
def calculate_error(target: np.array, actual: np.array,
func_type: ErrorAlgorithm = ErrorAlgorithm.SQR) -> np.array:
"""Calculates error for provided actual and targeted data.
:param target: target data
:param actual: actual training data
:param func_type: denotes type of used function for error
:return: calculated error
"""
if func_type == ErrorAlgorithm.SQR:
return np.sum(0.5 * np.power(actual - target, 2), axis=1)
elif func_type == ErrorAlgorithm.CE:
return -1 * np.sum(target * np.log(abs(actual)), axis=1)
raise NotImplementedError()
|
StarcoderdataPython
|
80518
|
"""This module handles all operations involving the user's settings."""
import json
from os import path
from PyQt5.QtWidgets import QWidget
from PyQt5.QtCore import QObject, pyqtSignal
from ui import SettingsTab
import ManageDB
from Constants import *
import GeneralUtils
from GeneralUtils import JsonModel
class Setting(Enum):
"""An enum of all settings"""
YEARLY_DIR = 0
OTHER_DIR = 1
REQUEST_INTERVAL = 2
REQUEST_TIMEOUT = 3
CONCURRENT_VENDORS = 4
CONCURRENT_REPORTS = 5
USER_AGENT = 6
class SettingsModel(JsonModel):
"""This holds the user's settings.
:param yearly_directory: The directory where yearly reports are saved. Yearly reports are reports that include all
the available data for a year.
:param other_directory: The default directory where non-yearly reports are saved.
:param request_interval: The time to wait between each report request, per vendor.
:param request_timeout: The time to wait before timing out a connection (seconds).
:param concurrent_vendors: The max number of vendors to work on at a time.
:param concurrent_reports: The max number of reports to work on at a time, per vendor.
:param user_agent: The user-agent that's included in the header when making requests.
"""
def __init__(self, show_debug_messages: bool, yearly_directory: str, other_directory: str, request_interval: int,
request_timeout: int, concurrent_vendors: int, concurrent_reports: int, user_agent: str,
default_currency: str):
self.show_debug_messages = show_debug_messages
self.yearly_directory = path.abspath(yearly_directory) + path.sep
self.other_directory = path.abspath(other_directory) + path.sep
self.request_interval = request_interval
self.request_timeout = request_timeout
self.concurrent_vendors = concurrent_vendors
self.concurrent_reports = concurrent_reports
self.user_agent = user_agent
self.default_currency = default_currency
@classmethod
def from_json(cls, json_dict: dict):
show_debug_messages = json_dict["show_debug_messages"]\
if "show_debug_messages" in json_dict else SHOW_DEBUG_MESSAGES
yearly_directory = json_dict["yearly_directory"]\
if "yearly_directory" in json_dict else YEARLY_DIR
other_directory = json_dict["other_directory"]\
if "other_directory" in json_dict else OTHER_DIR
request_interval = int(json_dict["request_interval"])\
if "request_interval" in json_dict else REQUEST_INTERVAL
request_timeout = int(json_dict["request_timeout"])\
if "request_timeout" in json_dict else REQUEST_TIMEOUT
concurrent_vendors = int(json_dict["concurrent_vendors"])\
if "concurrent_vendors" in json_dict else CONCURRENT_VENDORS
concurrent_reports = int(json_dict["concurrent_reports"])\
if "concurrent_reports" in json_dict else CONCURRENT_REPORTS
user_agent = json_dict["user_agent"]\
if "user_agent" in json_dict else USER_AGENT
default_currency = json_dict["default_currency"]\
if "default_currency" in json_dict else DEFAULT_CURRENCY
return cls(show_debug_messages, yearly_directory, other_directory, request_interval, request_timeout,
concurrent_vendors, concurrent_reports, user_agent, default_currency)
class SettingsController(QObject):
"""Controls the Settings tab
:param settings_widget: The settings widget.
:param settings_ui: The UI for settings_widget.
"""
settings_changed_signal = pyqtSignal(SettingsModel)
def __init__(self, settings_widget: QWidget, settings_ui: SettingsTab.Ui_settings_tab):
# region General
super().__init__()
self.settings_widget = settings_widget
json_string = GeneralUtils.read_json_file(SETTINGS_FILE_DIR + SETTINGS_FILE_NAME)
json_dict = json.loads(json_string)
self.settings = SettingsModel.from_json(json_dict)
self.show_debug_checkbox = settings_ui.show_debug_check_box
self.show_debug_checkbox.setChecked(self.settings.show_debug_messages)
# endregion
# region Reports
self.yearly_dir_edit = settings_ui.yearly_directory_edit
self.other_dir_edit = settings_ui.other_directory_edit
self.request_interval_spin_box = settings_ui.request_interval_spin_box
self.request_timeout_spin_box = settings_ui.request_timeout_spin_box
self.concurrent_vendors_spin_box = settings_ui.concurrent_vendors_spin_box
self.concurrent_reports_spin_box = settings_ui.concurrent_reports_spin_box
self.user_agent_edit = settings_ui.user_agent_edit
self.yearly_dir_edit.setText(self.settings.yearly_directory)
self.other_dir_edit.setText(self.settings.other_directory)
self.request_interval_spin_box.setValue(self.settings.request_interval)
self.request_timeout_spin_box.setValue(self.settings.request_timeout)
self.concurrent_vendors_spin_box.setValue(self.settings.concurrent_vendors)
self.concurrent_reports_spin_box.setValue(self.settings.concurrent_reports)
self.user_agent_edit.setText(self.settings.user_agent)
settings_ui.yearly_directory_button.clicked.connect(
lambda: self.on_directory_setting_clicked(Setting.YEARLY_DIR))
settings_ui.other_directory_button.clicked.connect(
lambda: self.on_directory_setting_clicked(Setting.OTHER_DIR))
# Reports Help Messages
settings_ui.yearly_directory_help_button.clicked.connect(
lambda: GeneralUtils.show_message("This is where the calendar-year reports will be saved"))
settings_ui.other_directory_help_button.clicked.connect(
lambda: GeneralUtils.show_message("This is where the special and non-calendar-year date range reports will "
"be saved by default"))
settings_ui.request_interval_help_button.clicked.connect(
lambda: GeneralUtils.show_message("The number of seconds the program will wait between sending each report "
"request to a given vendor"))
settings_ui.request_timeout_help_button.clicked.connect(
lambda: GeneralUtils.show_message("The number of seconds the program will allow a vendor to respond to "
"each report request before canceling it"))
settings_ui.concurrent_vendors_help_button.clicked.connect(
lambda: GeneralUtils.show_message("The maximum number of vendors to work on at the same time. "
"If set too high, the UI might freeze while fetching reports but the "
"fetch process will continue"))
settings_ui.concurrent_reports_help_button.clicked.connect(
lambda: GeneralUtils.show_message("The maximum number of reports to work on at the same time (per vendor). "
"If set too high, the UI might freeze while fetching reports but the "
"fetch process will continue"))
settings_ui.user_agent_help_button.clicked.connect(
lambda: GeneralUtils.show_message("How program identifies itself to the SUSHI servers. Some vendors will "
"reject some particular user agents. Only change this if there is a "
"known problem as it will affect all requests to all vendors. "
"See Help for more information."))
settings_ui.default_currency_help_button.clicked.connect(
lambda: GeneralUtils.show_message("The currency shown first in the Costs pulldown and also by Visual to "
"label the local currency in the spreadsheets generated with the Cost "
"Ratio option. Note: This doesn't have to be one of the pre-loaded "
"currencies."))
# endregion
# region Costs
self.default_currency_combobox = settings_ui.settings_costs_default_currency_combobox
self.default_currency_combobox.addItems(CURRENCY_LIST)
self.default_currency_combobox.setCurrentText(self.settings.default_currency)
# endregion
# region Search
# set up restore database button
self.is_rebuilding_database = False
self.update_database_dialog = ManageDB.UpdateDatabaseProgressDialogController(self.settings_widget)
self.rebuild_database_button = settings_ui.settings_rebuild_database_button
self.rebuild_database_button.clicked.connect(self.on_rebuild_database_clicked)
# endregion
settings_ui.save_button.clicked.connect(self.on_save_button_clicked)
def on_directory_setting_clicked(self, setting: Setting):
"""Handles the signal emitted when a choose folder button is clicked
:param setting: The setting to be changed
"""
dir_path = GeneralUtils.choose_directory()
if dir_path:
if setting == Setting.YEARLY_DIR:
self.yearly_dir_edit.setText(dir_path)
elif setting == Setting.OTHER_DIR:
self.other_dir_edit.setText(dir_path)
def on_save_button_clicked(self):
"""Handles the signal emitted when the save button is clicked"""
self.update_settings()
self.save_settings_to_disk()
self.settings_changed_signal.emit(self.settings)
GeneralUtils.show_message("Changes saved!")
def on_rebuild_database_clicked(self):
"""Restores the database when the restore database button is clicked"""
if not self.is_rebuilding_database: # check if already running
if GeneralUtils.ask_confirmation('Are you sure you want to rebuild the database?'):
self.is_rebuilding_database = True
self.update_database_dialog.update_database(ManageDB.get_all_report_files() +
ManageDB.get_all_cost_files(),
True)
self.is_rebuilding_database = False
else:
if self.settings.show_debug_messages: print('Database is already being rebuilt')
def update_settings(self):
"""Updates the app's settings using the values entered on the UI"""
self.settings.show_debug_messages = self.show_debug_checkbox.isChecked()
self.settings.yearly_directory = self.yearly_dir_edit.text()
self.settings.other_directory = self.other_dir_edit.text()
self.settings.request_interval = self.request_interval_spin_box.value()
self.settings.request_timeout = self.request_timeout_spin_box.value()
self.settings.concurrent_vendors = self.concurrent_vendors_spin_box.value()
self.settings.concurrent_reports = self.concurrent_reports_spin_box.value()
self.settings.user_agent = self.user_agent_edit.text()
self.settings.default_currency = self.default_currency_combobox.currentText()
def save_settings_to_disk(self):
"""Saves all settings to disk"""
json_string = json.dumps(self.settings, default=lambda o: o.__dict__)
GeneralUtils.save_json_file(SETTINGS_FILE_DIR, SETTINGS_FILE_NAME, json_string)
|
StarcoderdataPython
|
6345
|
# -*- coding: utf-8 -*-
#
# Tencent is pleased to support the open source community by making QT4C available.
# Copyright (C) 2020 THL A29 Limited, a Tencent company. All rights reserved.
# QT4C is licensed under the BSD 3-Clause License, except for the third-party components listed below.
# A copy of the BSD 3-Clause License is included in this file.
#
'''单元测试
'''
import unittest
import os
import sys
test_dir = os.path.dirname(os.path.abspath(__file__))
sys.path.insert(0, os.path.dirname(test_dir))
def main():
runner = unittest.TextTestRunner(verbosity=10 + sys.argv.count('-v'))
suite = unittest.TestLoader().discover(test_dir, pattern='test_*.py')
raise SystemExit(not runner.run(suite).wasSuccessful())
if __name__ == '__main__':
main()
|
StarcoderdataPython
|
3313069
|
# -*- coding: utf-8 -*-
"""window.py
A module implementing the Window class, which allows drawing and the saving of images and video.
License:
http://www.apache.org/licenses/LICENSE-2.0"""
import enum
import os
import pathlib
from typing import Any, Tuple
import cv2
import numpy as np
# noinspection PyUnresolvedReferences
class LineType(enum.Enum):
FILLED = cv2.FILLED
LINE_4 = cv2.LINE_4
LINE_8 = cv2.LINE_8
LINE_AA = cv2.LINE_AA
class Window(object):
"""A class for rendering and displaying primitives."""
def __init__(self, window_name: str, dim: Tuple[int, int],
color: Tuple[float, float, float] = (255.0, 255.0, 255.0), resizeable: bool = False,
image_dir: str = 'results_images', video_dir: str = 'results_videos') -> None:
"""Initializes the window. Does not create the actual cv2 window.
Note:
By default, the coordinate system for all draws will be such that the upper-left is (0, 0) and the bottom-
right is (dim[0], dim[1]). This can be altered using other methods after initialization.
Args:
window_name: The name of the window.
dim: The dimensions of the screen in pixels.
color: The bgr color of the window background.
resizeable: If the window should be resizeable.
image_dir: The directory to save images in.
video_dir: The directory to save videos in.
"""
if dim[0] < 1:
dim = (1, dim[1])
if dim[1] < 1:
dim = (dim[0], 1)
self.__dim = dim
self.__left = 0.0
self.__right = float(self.__dim[0])
self.__top = 0.0
self.__bottom = float(self.__dim[1])
self.__color = color
# noinspection PyUnresolvedReferences
self.__screen = np.zeros((self.__dim[1], self.__dim[0], 3), np.uint8)
self.__screen[:] = self.__color
self.__window_created = False
self.__window_name = window_name
self.__window_title = window_name
if resizeable:
# noinspection PyUnresolvedReferences
self.__window_flag = cv2.WINDOW_NORMAL
else:
# noinspection PyUnresolvedReferences
self.__window_flag = cv2.WINDOW_AUTOSIZE
self.__image_dir = image_dir
self.__video_dir = video_dir
self.__image_count = 0
pathlib.Path(self.__image_dir).mkdir(parents=False, exist_ok=True)
pathlib.Path(self.__video_dir).mkdir(parents=False, exist_ok=True)
self.flush_images()
def flush_images(self) -> None:
"""Deletes all images in the image directory."""
filenames = [filename for filename in os.listdir(self.__image_dir) if filename.endswith('.png')]
for filename in filenames:
os.remove(os.path.join(self.__image_dir, filename))
self.__image_count = 0
def flush(self) -> None:
"""Refills the screen with the background color."""
self.__screen[:] = self.__color
def set_title(self, window_title: str) -> None:
"""Sets the title of the window.
Args:
window_title: The new title of the window.
"""
self.__window_title = window_title
if self.__window_created:
# noinspection PyUnresolvedReferences
cv2.setWindowTitle(self.__window_name, self.__window_title)
def display(self, ms: int = 50) -> int:
"""Displays the window.
Args:
ms: The number of milliseconds to display. 0 means infinitely, until a key is pressed.
Returns:
The value of the key event (when ms is 0).
"""
# Create the window if it has not already been created.
if not self.__window_created:
# noinspection PyUnresolvedReferences
cv2.namedWindow(self.__window_name, self.__window_flag)
# noinspection PyUnresolvedReferences
cv2.setWindowProperty(self.__window_name, cv2.WND_PROP_ASPECT_RATIO, cv2.WINDOW_KEEPRATIO)
# noinspection PyUnresolvedReferences
cv2.setWindowTitle(self.__window_name, self.__window_title)
self.__window_created = True
# noinspection PyUnresolvedReferences
cv2.imshow(self.__window_name, self.__screen)
# noinspection PyUnresolvedReferences
return cv2.waitKey(ms)
def save_image(self) -> None:
"""Saves an image in the save_dir with the filename 'image-#.pgn' where # is the image's index."""
filename = '{}/image-{}.png'.format(self.__image_dir, str(self.__image_count).zfill(6))
print('TAKING SCREENSHOT {}'.format(filename))
# noinspection PyUnresolvedReferences
cv2.imwrite(filename, self.__screen)
self.__image_count += 1
def save_video(self, filename: str = 'video', fps: float = 20) -> None:
"""Saves a video from the images saved.
Args:
filename: The filename to save the video with. Note: should not include extension.
fps: The frames per second.
"""
filename = '{}.avi'.format(filename)
filename = os.path.join(self.__video_dir, filename)
image_names = [filename_ for filename_ in os.listdir(self.__image_dir) if filename_.endswith('.png')]
if len(image_names) < 1:
return
# noinspection PyUnresolvedReferences
fourcc = cv2.VideoWriter_fourcc(*'DIVX')
# noinspection PyUnresolvedReferences
video = cv2.VideoWriter(filename, fourcc, fps, self.__dim)
for filename in image_names:
# noinspection PyUnresolvedReferences
video.write(cv2.imread(os.path.join(self.__image_dir, filename)))
def set_coordinate_system(self, left: float, right: float, top: float, bottom: float) -> None:
"""Sets the coordinate system, maintaining the aspect ratio.
Args:
left: The left most x-coordinate to fit on the screen.
right: The right most x-coordinate to fit on the screen.
top: The top most y-coordinate to fit onto the screen.
bottom: The bottom most y-coordinate to fit onto the screen.
"""
y_to_x_ratio_screen = self.__dim[1] / self.__dim[0]
x_span = abs(left - right)
y_span = abs(top - bottom)
y_to_x_ratio_world = y_span / x_span
if y_to_x_ratio_world <= y_to_x_ratio_screen:
# We can fit by padding on the y-axis.
y_center = (top + bottom) * 0.5
y_adjustment = y_to_x_ratio_screen / y_to_x_ratio_world
# Top.
top_off_center = top - y_center
top_new_off_center = top_off_center * y_adjustment
top = top_new_off_center + y_center
# Bottom.
bottom_off_center = bottom - y_center
bottom_new_off_center = bottom_off_center * y_adjustment
bottom = bottom_new_off_center + y_center
else:
# We can fit by padding on the x-axis.
x_to_y_ratio_screen = self.__dim[0] / self.__dim[1]
x_to_y_ratio_world = x_span / y_span
x_center = (left + right) * 0.5
x_adjustment = x_to_y_ratio_screen / x_to_y_ratio_world
# Left.
left_off_center = left - x_center
left_new_off_center = left_off_center * x_adjustment
left = left_new_off_center + x_center
# Right.
right_off_center = right - x_center
right_new_off_center = right_off_center * x_adjustment
right = right_new_off_center + x_center
self.set_coordinate_system_naive(left, right, top, bottom)
def set_coordinate_system_naive(self, left: float, right: float, top: float, bottom: float) -> None:
"""Sets the coordinate system naively. Naive in this sense means that aspect ratio is not maintained.
Example:
If the Window was initialized with screen dimensions (800, 600), and this method is called with parameters
0, 100, 0, 600, the window screen will remain (800, 600) pixels in dimensions but horizontal lines going
from left to right will be 100 units and 800 pixels long (a 1:8 ratio) while vertical lines going from top
to bottom will be 600 units and 600 pixels long (a 1:1 ratio).
Args:
left: The x-coordinate at the very left of the screen.
right: The x-coordinate at the very right of the screen.
top: The y-coordinate at the very top of the screen.
bottom: The y-coordinate at the very bottom of the screen.
"""
self.__left = left
self.__right = right
self.__top = top
self.__bottom = bottom
def __get_screen_point(self, p: Any) -> Tuple[int, int]:
"""Given a point in world coordinates, return the point in screen coordinates.
Note:
p must be of a type such that the x- and y-coordinates are accessible using the [] operator with keys 0 and
1 respectively.
Args:
p: A point in world coordinates.
Returns:
A point in screen coordinates.
"""
# x_distance is the percentage of the way that the point is from the left to the right.
# y_distance is the same except from the top to the bottom.
x_distance = (p[0] - self.__left) / (self.__right - self.__left)
y_distance = (p[1] - self.__top) / (self.__bottom - self.__top)
# x_value and y_values are the actual screen coordinates casted into ints so that the point lies directly on an
# actual pixel.
x_value = int(x_distance * self.__dim[0])
y_value = int(y_distance * self.__dim[1])
return x_value, y_value
def draw_line(self, pt1: Any, pt2: Any, color: Tuple[float, float, float], thickness: int = 1,
line_type: LineType = LineType.LINE_AA) -> None:
"""Draws a line.
Note:
Points pt1 and pt2 must be of a type such that the x- and y-coordinates are accessible using the [] operator
with keys 0 and 1 respectively.
Note:
A thickness > 1 is required for LineType.FILLED.
Args:
pt1: The source point of the line in world coordinates.
pt2: The destination point of the line in world coordinates.
color: The BGR color of the line.
thickness: The thickness of the line in pixels.
line_type: The type of line.
"""
pt1 = self.__get_screen_point(pt1)
pt2 = self.__get_screen_point(pt2)
# noinspection PyUnresolvedReferences
cv2.line(img=self.__screen, pt1=pt1, pt2=pt2, color=color, thickness=thickness, lineType=line_type.value)
def draw_circle(self, center: Any, radius: int, color: Tuple[float, float, float], thickness: int = 1,
line_type: LineType = LineType.LINE_AA) -> None:
"""Draws a circle.
Note:
Center must be of a type such that the x- and y-coordinates are accessible using the [] operator with keys 0
and 1 respectively.
Note:
A thickness > 1 is required for LineType.FILLED.
Args:
center: The center of the circle in world coordinates.
radius: The radius of the circle in pixels (NOT WORLD COORDINATES).
color: The BGR color of the circle.
thickness: The thickness of the circle in pixels.
line_type: The type of line.
"""
center = self.__get_screen_point(center)
# noinspection PyUnresolvedReferences
cv2.circle(img=self.__screen, center=center, radius=radius, color=color, thickness=thickness,
lineType=line_type.value)
def draw_cross(self, center: Any, color: Tuple[float, float, float], length: int, thickness: int = 1,
line_type: LineType = LineType.LINE_AA) -> None:
"""Draws a cross.
Note:
Center must be of a type such that the x- and y-coordinates are accessible using the [] operator with keys 0
and 1 respectively.
Note:
A thickness > 1 is required for LineType.FILLED.
Args:
center: The center of the cross in world coordinates.
color: The BGR color of the cross.
length: The length of the arms of the cross in pixels (NOT WORLD COORDINATES).
thickness: The thickness of the arms of the cross in pixels.
line_type: The type of line.
"""
center = self.__get_screen_point(center)
left = (center[0] - length, center[1])
right = (center[0] + length, center[1])
top = (center[0], center[1] - length)
bottom = (center[0], center[1] + length)
# noinspection PyUnresolvedReferences
cv2.line(img=self.__screen, pt1=left, pt2=right, color=color, thickness=thickness, lineType=line_type.value)
# noinspection PyUnresolvedReferences
cv2.line(img=self.__screen, pt1=top, pt2=bottom, color=color, thickness=thickness, lineType=line_type.value)
|
StarcoderdataPython
|
3238682
|
<reponame>aminhp93/learning_python
from django.shortcuts import render
from django.urls import reverse_lazy
from django.views.generic import (
CreateView,
ListView,
DetailView,
DeleteView,
UpdateView
)
from .models import Comment
# Create your views here.
# class CommentListView(ListView):
# model = Comment
# class CommentCreateView(CreateView):
# model = Comment
# fields = ['content']
class CommentDetailView(DetailView):
model = Comment
pk_url_kwarg = 'id'
def get_context_data(self, **kwargs):
context = super(CommentDetailView, self).get_context_data(**kwargs)
print(context)
return context
# class CommentUpdateView(UpdateView):
# model = Comment
# fields = ['content']
# template_name_suffix = '_update_form'
# pk_url_kwarg = 'id'
class CommentDeleteView(DeleteView):
model = Comment
success_url = reverse_lazy("posts:list")
|
StarcoderdataPython
|
1790079
|
<gh_stars>1-10
#!/usr/bin/python2.7
import ConfigParser
import optparse
import os
import re
import sys
import time
import engine
import data
import genautodep
APPDIR_RE = re.compile(r"(/app)($|/)")
def RegisterJavaLibrary(module, f):
name = "lib%s" % f.name
lib = data.JavaLibrary(
module.name, f.path, name,
list(data.FixPath(module.name, f.path, ["%s.java" % f.name])),
[],
list(c.DepName() for c in f.classes),
[])
data.DataHolder.Register(module.name, f.path, name, lib)
#print "reg %s=%s:%s" % (module.name, f.path, name)
# Create a binary target that depends solely on the lib
binary = data.JavaBinary(
module.name, f.path, f.name,
"%s/%s" % (f.path, f.name),
["%s:%s" % (f.path, name)])
data.DataHolder.Register(module.name, f.path, f.name, binary)
# Create a jar target for the binary as well
jar = data.JavaJar(
module.name, f.path, f.name + "_deploy", binary.FullName())
data.DataHolder.Register(module.name, f.path, f.name + "_deploy", jar)
def main():
start_time = time.time()
parser = optparse.OptionParser()
parser.add_option("-v", "--verbose", action="store_true", dest="verbose")
(options, args) = parser.parse_args()
data.VERBOSE = options.verbose
config = ConfigParser.SafeConfigParser(allow_no_value=True)
# Module paths (the options of the modules section) must be case sensitive.
config.optionxform = str
config.read("icbm.cfg")
if config.has_section("modules"):
module_paths = [path for path, _ in config.items("modules")]
else:
module_paths = ["lib", "src"]
if config.has_option("java", "flags_by_default"):
data.JAVA_BINARY_FLAGS_DEFAULT = config.getboolean(
"java", "flags_by_default")
if config.has_option("proto", "protobuf_java"):
protobuf_java = config.get("proto", "protobuf_java")
else:
protobuf_java = "lib=:protobuf-java-2.5.0"
try:
os.mkdir(engine.BUILD_DIR)
except:
pass
modules = genautodep.ComputeDependencies(module_paths)
for module in modules.itervalues():
mname = module.name
app_dirs = {}
for package, farr in module.files.iteritems():
# Process non-protos before protos, in case there is
# already a checked-in version, so that they don't
# conflict.
filemap = {}
java_files = []
proto_files = []
for f in farr:
if mname == "src":
m = APPDIR_RE.search(f.path)
if m:
appdir = f.path[:m.end(1)]
app_dirs.setdefault(appdir, []).append(f)
# We want to allow tests to depend on these,
# so keep processing as usual. In the future,
# some sort of compromise will need to be
# made, as this can still lead to namespace collisions.
#continue
filemap.setdefault(f.path, []).append(f)
if isinstance(f, genautodep.ProtoFile):
proto_files.append(f)
else:
java_files.append(f)
for f in java_files:
RegisterJavaLibrary(module, f)
for f in proto_files:
# Skip protos if there's already a lib for that name
# that is out there.
if data.DataHolder.Get(mname, f.DepName()):
continue
RegisterJavaLibrary(module, f)
# Autodep doesn't find the dependency on protobufs.
data.DataHolder.Get(mname, f.DepName()).deps.append(
protobuf_java)
gen = data.Generate(
mname, f.path, f.name + "_proto",
"%s/genproto.sh" % engine.ICBM_PATH, None,
list(data.FixPath(mname, f.path, ["%s.proto" % f.protoname])) + f.extras,
[os.path.join(f.path, "%s.java" % f.name)])
data.DataHolder.Register(mname, f.path, f.name + "_proto", gen)
# Create a lib in each package as well
for path, file_arr in filemap.iteritems():
lib = data.JavaLibrary(
mname, path, "lib",
[],
[],
list(f.DepName() for f in file_arr),
[])
data.DataHolder.Register(mname, path, "lib", lib)
for path, file_arr in app_dirs.iteritems():
deps = set()
for f in file_arr:
for c in f.classes:
if not APPDIR_RE.search(c.path):
deps.add(c.DepName())
lib = data.JavaLibrary(
mname, path, "app_deps",
[],
[],
list(deps),
[])
data.DataHolder.Register(mname, path, "app_deps", lib)
for jar in module.jars:
lib = data.JavaLibrary(
mname, "", jar.name, [],
list(data.FixPath(mname, jar.path, ["%s.jar" % jar.name])),
[], [])
data.DataHolder.Register(mname, jar.path, jar.name, lib)
lib = data.JavaLibrary(
mname, "", "jars",
[],
[],
list(f.DepName() for f in module.jars),
[])
data.DataHolder.Register(mname, "", "jars", lib)
if module.jsps:
lib = data.JavaLibrary(
mname, "", "jsp_deps",
[],
[],
list(c.DepName() for jsp in module.jsps for c in jsp.classes),
[])
data.DataHolder.Register(mname, "", "jsp_deps", lib)
for target in args:
# load the corresponding spec files
data.LoadTargetSpec(data.TOPLEVEL, target)
for target in args:
d = data.DataHolder.Get(data.TOPLEVEL, target)
if not d:
print "Unknown target:", target
sys.exit(1)
d.LoadSpecs()
success = data.DataHolder.Go(args)
elapsed_time = time.time() - start_time
print
print "Total ICBM build time: %.1f seconds" % elapsed_time
if not success:
sys.exit(1)
if __name__ == '__main__':
main()
|
StarcoderdataPython
|
1623992
|
from zabbix_enums.common import _ZabbixEnum
class MacroType(_ZabbixEnum):
TEXT = 0
SECRET = 1
|
StarcoderdataPython
|
1612520
|
<filename>apisummariser/preprocessing.py
import numpy as np
from sklearn import manifold
from tqdm import tqdm
from apisummariser.helper import sequences_metrics
class Preprocessor:
def __init__(self, callers_file, callers_package, callers, calls):
'''
:type callers: list
:param callers: a list of caller methods
:type calls: list of lists
:param calls: a list of method call sequences
'''
self.callers_file = callers_file
self.callers_package = callers_package
self.callers = callers
self.calls = calls
def perform_preprocessing(self, mode, params):
"""
Calls the appropriate functions to preprocess the data.
:type mode: string
:param mode: ['vector', 'distance']
:type params: dictionary
:param params: {'metric','remove_singletons','remove_pseudo_singletons','remove_unique'}
"""
params_clean = {'remove_singletons':params['remove_singletons'],
'remove_pseudo_singletons':params['remove_pseudo_singletons']}
self.clean_data(params_clean)
if mode == 'vector':
self.create_vector()
elif mode == 'distance':
dist_func = self.get_dist_func(params['metric'])
self.compute_similarity(dist_func)
if params['remove_unique']:
self.remove_outliers()
else:
raise NotImplementedError
def clean_data(self, params):
"""
Cleans the dataset, based on the specified option. Note that the 'remove_pseudo_singletons' option should be
combined with the 'remove_singletons' one.
:type params: dictionary
:param params: {'remove_singletons','remove_pseudo_singletons'}
"""
upd_callers_file = []
upd_callers_package = []
upd_callers = []
upd_calls = []
for i in range(len(self.calls)):
# remove duplicate callers and sequences with single/identical API calls (includes singleton sequences)
if self.callers[i] not in upd_callers:
if params['remove_singletons']:
if params['remove_pseudo_singletons']:
if self.calls[i].count(self.calls[i][0]) != len(self.calls[i]):
upd_callers_file.append(self.callers_file[i])
upd_callers_package.append(self.callers_package[i])
upd_callers.append(self.callers[i])
upd_calls.append(self.calls[i])
else:
if self.calls[i] > 1:
upd_callers_file.append(self.callers_file[i])
upd_callers_package.append(self.callers_package[i])
upd_callers.append(self.callers[i])
upd_calls.append(self.calls[i])
else:
upd_callers_file.append(self.callers_file[i])
upd_callers_package.append(self.callers_package[i])
upd_callers.append(self.callers[i])
upd_calls.append(self.calls[i])
self.callers_file = upd_callers_file
self.callers_package = upd_callers_package
self.callers = upd_callers
self.calls = upd_calls
def get_dist_func(self, metric):
"""
Gets an instance of the function that will be used for computing sequence similarity.
:type metric: string
:param metric: ['lcs', 'lcs-mod', 'lcs-min', 'lcs-ext', 'jaccard', 'jaccard-min', 'gestalt', 'seqsim',
'levenshtein']
"""
if metric == 'lcs':
dist_func = getattr(sequences_metrics, 'lcs')
elif metric == 'lcs-mod':
dist_func = getattr(sequences_metrics, 'lcs_mod')
elif metric == 'lcs-min':
dist_func = getattr(sequences_metrics, 'lcs_min')
elif metric == 'lcs-ext':
dist_func = getattr(sequences_metrics, 'lcs_ext')
elif metric == 'jaccard':
dist_func = getattr(sequences_metrics, 'jaccard')
elif metric == 'jaccard-min':
dist_func = getattr(sequences_metrics, 'jaccard_min')
elif metric == 'gestalt':
dist_func = getattr(sequences_metrics, 'gestalt')
elif metric == 'seqsim':
dist_func = getattr(sequences_metrics, 'seqsim')
elif metric == 'levenshtein':
dist_func = getattr(sequences_metrics, 'levenshtein')
else:
raise NotImplementedError
return dist_func
def compute_similarity(self, dist_func):
"""
Creates a distance matrix based on the computed similarities between sequences (API calls).
:type dist_func: function
:param dist_func: an instance of the distance function to be used
"""
self.dist_mat = np.zeros((len(self.calls), len(self.calls)))
for i in tqdm(range(len(self.calls))):
for j in range(i + 1):
self.dist_mat[i][j] = dist_func(self.calls[i], self.calls[j])
self.dist_mat[j][i] = self.dist_mat[i][j]
def remove_outliers(self):
"""
Currently removes sequences that are unique. It makes use of the distance matrix, for efficiency reasons.
This could be easily avoided by a brute-force solution.
"""
ind_to_remove = []
for i in range(len(self.callers)):
if np.count_nonzero(self.dist_mat[i] == 0.0) == 1:
ind_to_remove.append(i)
self.dist_mat = np.delete(self.dist_mat, ind_to_remove, axis=0)
self.dist_mat = np.delete(self.dist_mat, ind_to_remove, axis=1)
for i in reversed(ind_to_remove):
self.callers_file.pop(i)
self.callers_package.pop(i)
self.callers.pop(i)
self.calls.pop(i)
print 'Data points after removing outliers: ' + str(len(self.callers))
def dist_to_vec(self, params):
"""
Generates a feature vector given a distance matrix and based on the t-SNE algorithm.
:type params: dictionary
:param params: {'n_components','perplexity','random_state'}
"""
model = manifold.TSNE(n_components=params['n_components'], perplexity=params['perplexity'],
metric="precomputed", random_state=params['random_state'])
np.set_printoptions(suppress=True)
self.f_vector = model.fit_transform(self.dist_mat)
def create_vector(self):
"""
Generate feature vectors, using the API method calls as features. THis does not take into account the order in
which the API methods are invoked.
"""
self.calls_set = set()
for calls in self.calls:
self.calls_set.update(calls)
self.calls_set = list(self.calls_set)
self.f_vector = np.zeros((len(self.callers), len(self.calls_set)))
print len(self.f_vector)
for caller_id in range(len(self.callers)):
for call_id in range(len(self.calls_set)):
if self.calls_set[int(call_id)] in self.calls[int(caller_id)]:
self.f_vector[int(caller_id)][int(call_id)] = 1
print 'Non-zero elements:' + str(np.count_nonzero(self.f_vector))
def freq_idx(self):
seen_el = []
seen_idx = []
for i in range(len(self.calls)):
for j in range(i + 1, len(self.calls)):
if self.calls[j] == self.calls[i] and self.calls[j] not in seen_el:
seen_el.append(self.calls[j])
seen_idx.append(j)
return seen_idx
def non_identical_seqs(self):
"""
Selects all the non_identical sequences.
"""
non_identical_calls = []
for call in self.calls:
if call not in non_identical_calls:
non_identical_calls.append(call)
return non_identical_calls
|
StarcoderdataPython
|
71211
|
<filename>tokenfile.py
token = "<PASSWORD>"
#Looks like MjM4NDk0NzU2NTIxMzc3Nzky.CunGFQ.wUILz7z6HoJzVeq6pyHPmVgQgV4
|
StarcoderdataPython
|
14248
|
import requests
from utils import loginFile, dataAnalysis
import os
import datetime
from dateutil.relativedelta import relativedelta
import json
from utils.logCls import Logger
dirpath = os.path.dirname(__file__)
cookieFile = f"{dirpath}/utils/cookies.txt"
dataFile = f"{dirpath}/datas"
class DevopsProject:
def __init__(self, logFileName):
# 初始化搜索起始与截止时间
self.endDate = datetime.datetime.today().date()
self.startDate = self.endDate - relativedelta(months=+1)
# log日志
self.logger = Logger("[告警信息通报({}-{})]".format(self.startDate, self.endDate), logFileName)
def _load_cookies(self):
print("----------_load_cookies----------")
# 加载cookie
if not os.path.exists(cookieFile):
return False
# 3、判断cookies是否过期
try:
with open(cookieFile, "r")as f:
cookies = f.read()
if self.login_check(cookies):
return cookies
else:
return
except Exception as e:
print(e.args)
os.remove(cookieFile)
self.logger.get_log().debug("[cookies过期]")
return False
def login_check(self, cookies):
# cookie验证是否有效
self.logger.get_log().debug("[正在验证cookie]")
headers = {
'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.9',
'Accept-Encoding': 'gzip, deflate',
'Accept-Language': 'zh-CN,zh-TW;q=0.9,zh;q=0.8,en-US;q=0.7,en;q=0.6',
'Cache-Control': 'max-age=0',
'Connection': 'keep-alive',
'Cookie': cookies,
'Host': 'xt.devops123.net',
'Referer': 'http://xt.devops123.net/Welcome/login/',
'Upgrade-Insecure-Requests': '1',
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/83.0.4103.116 Safari/537.36'
}
checkUrl = "http://xt.devops123.net/portal/substation_list/991"
response = requests.get(checkUrl, headers=headers)
if response.status_code == 200:
if "管理面板" in response.text:
self.logger.get_log().debug("[加载cookie成功]")
return True
else:
self.logger.get_log().debug("[加载失败, 正在进行登录]")
return False
raise response.raise_for_status()
def login(self):
# 登录
cookies = self._load_cookies()
if cookies:
return cookies
cookies = loginFile.loginDevops().login()
return cookies
def getReportData(self, cookies):
self.logger.get_log().debug("[正在搜索告警信息]")
self.searchTime = datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S')
# 搜索告警信息
downloadUrl = "http://xt.devops123.net/alarm?selCity=&selCounty=0&selSubstation=&selRoom=&level=1&selDevModel=&selStatus%5B%5D=unresolved&reportDate={}%E8%87%B3{}&selSignalName=&substationType%5B%5D=A%E7%BA%A7%E5%B1%80%E7%AB%99&substationType%5B%5D=B%E7%BA%A7%E5%B1%80%E7%AB%99&substationType%5B%5D=C%E7%BA%A7%E5%B1%80%E7%AB%99&substationType%5B%5D=D%E7%BA%A7%E5%B1%80%E7%AB%99&substationType%5B%5D=D1%E7%BA%A7%E5%B1%80%E7%AB%99&word=&export=exporttoexcel"
headers = {
'Connection': 'keep-alive',
'Upgrade-Insecure-Requests': '1',
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/83.0.4103.116 Safari/537.36',
'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.9',
'Referer': 'http://xt.devops123.net/alarm?level=1',
'Cookie': cookies,
'Accept-Encoding': 'gzip, deflate',
'Accept-Language': 'zh-CN,zh-TW;q=0.9,zh;q=0.8,en-US;q=0.7,en;q=0.6'
}
response = requests.get(downloadUrl.format(str(self.startDate), str(self.endDate)), headers=headers)
return response.text
def getDingDingInfo(self, cityName):
# 加载钉钉机器人信息
with open("utils/dingdingRobotInfo.json", "r", encoding="utf-8")as f:
robotInfo = json.loads(f.read())
if cityName in list(robotInfo.keys()):
SECRET = robotInfo.get(cityName)[0]
WEBHOOK = robotInfo.get(cityName)[1]
return SECRET, WEBHOOK
else:
self.logger.get_log().debug("[没有该{}对应的钉钉信息,请检查dingdingRobotInfo.json文件]".format(cityName))
return
def detail_data(self, dataList, monitorInfo, warn5=False, byhour=False):
if warn5:
for data in dataList:
k, group = data
SECRET, WEBHOOK = self.getDingDingInfo(k)
htmlPath = dataAnalysis.data2html(k, group, dataFile, k2="超过5天告警信息汇总")
imgFile = dataAnalysis.html2image(htmlPath)
imgUrl = dataAnalysis.img2url(imgFile)
sendTitle = f"{k}-{'超过5天告警信息汇总'}\n\n- 数据提取时间:{self.searchTime}\n- 上报时间段:\t{self.startDate}至{self.endDate} \n"
sendText = sendTitle + "\n".join(
[f"- {k}:\t{v}条" for k, v in group.groupby("信号名称")["信号名称"].count().sort_values(ascending=False).to_dict().items()])
yield k, SECRET, WEBHOOK, imgUrl, sendText
else:
for data in dataList:
k, group = data
if byhour:
group = group.loc[group["信号名称"].isin(monitorInfo)]
SECRET, WEBHOOK = self.getDingDingInfo(k)
htmlPath = dataAnalysis.data2html(k, group, dataFile)
imgFile = dataAnalysis.html2image(htmlPath)
imgUrl = dataAnalysis.img2url(imgFile)
sendText = "\n".join([f"- {k}:\t{v}条" for k, v in group.groupby("区域")["区域"].count().to_dict().items()])
yield k, SECRET, WEBHOOK, imgUrl, sendText
def reportTotal(self, totalInfo, monitorInfo):
self.logger.get_log().debug("正在汇总信息...")
cityNames = ["乌鲁木齐", "昌吉", "吐鲁番", "奎屯", "博州", "哈密", "塔城", "阿勒泰", "伊犁", "巴州",
"和田", "阿克苏", "石河子", "喀什", "克州", "克拉玛依"]
totalSendTextByCity = {}
summaryInfo = dataAnalysis.dataSummary(totalInfo)
for city in cityNames:
summaryText = "\n".join([f"- {k} : {v}条" for k, v in summaryInfo.get(city, {}).items() if k in monitorInfo])
if summaryText:
totalSendText = f"{self.startDate}至{self.endDate}\n- #告警消息汇总#\n- 数据提取时间:{self.searchTime}\n- #按照信号名称汇总如下#\n" + summaryText
else:
totalSendText = f"{self.startDate}至{self.endDate}\n- 数据提取时间:{self.searchTime}\n" + "无告警信息."
totalSendTextByCity[city] = totalSendText
return totalSendTextByCity
def monitorByHour(self):
try:
monitorInfo = ["通信状态", "烟感", "温度", "交流输入停电警告", "交流输入停电告警", "蓄电池组总电压过低", "水浸", "电池熔丝故障告警", "蓄电池总电压过高"]
self.logger.get_log().debug("[正在登录]")
new_cookie = self.login()
# 获取excel的xml
self.logger.get_log().debug("[进入【温度】【交流输入停电告警】【蓄电池组总电压过低】监控...(监控频率:每小时一次)]")
xmlData = self.getReportData(new_cookie)
# 分析xml
if dataAnalysis.parseData(xmlData, dataFile):
totalInfo, warn5days, dataList = dataAnalysis.parseData(xmlData, dataFile, byhour=True)
totalSendTextByCity = self.reportTotal(totalInfo, monitorInfo)
self.logger.get_log().debug("[发送告警信息]")
for k, SECRET, WEBHOOK, imgUrl, sendText in self.detail_data(dataList, monitorInfo, byhour=True):
totalSendText = totalSendTextByCity.get(k)
if "无告警信息" in totalSendText:
dataAnalysis.sendMessage(SECRET, WEBHOOK, totalSendText, imgUrl="")
self.logger.get_log().debug(totalSendText)
else:
sendTextTotal = f"{totalSendText}\n{'- #按照县汇总如下#'}\n{sendText}"
dataAnalysis.sendMessage(SECRET, WEBHOOK, sendTextTotal, imgUrl)
self.logger.get_log().debug(sendTextTotal)
self.logger.get_log().debug("[告警信息发送结束]")
dataAnalysis.clearDir(dataFile)
except Exception as e:
self.logger.get_log().debug(e.args)
def monitorByDay(self):
try:
self.logger.get_log().debug("[进入【通信状态】【烟感】【水浸】【电池熔丝故障告警】【蓄电池总电压过高】【手动控制状态】【启动电池电压低】监控...(监控频率:每天一次)]")
monitorInfo = ["通信状态", "烟感", "水浸", "电池熔丝故障告警", "蓄电池总电压过高", "手动控制状态", "启动电池电压低", "交流输入停电警告", "交流输入停电告警", "温度",
"蓄电池组总电压过低"]
new_cookie = self.login()
# 获取excel的xml
xmlData = self.getReportData(new_cookie)
# 分析xml
if dataAnalysis.parseData(xmlData, dataFile):
totalInfo, warn5days, dataList = dataAnalysis.parseData(xmlData, dataFile)
totalSendTextByCity = self.reportTotal(totalInfo, monitorInfo)
self.logger.get_log().debug("[汇总告警时间超过5天的信息]")
for k, SECRET, WEBHOOK, imgUrl, sendText in self.detail_data(warn5days, monitorInfo, warn5=True):
self.logger.get_log().debug(sendText)
dataAnalysis.sendMessage(SECRET, WEBHOOK, sendText, imgUrl)
self.logger.get_log().debug("[汇总告警信息]")
for k1, SECRET, WEBHOOK, imgUrl, sendText in self.detail_data(dataList, monitorInfo):
totalSendText = totalSendTextByCity.get(k1)
if "无告警信息" in totalSendText:
dataAnalysis.sendMessage(SECRET, WEBHOOK, totalSendText, imgUrl="")
self.logger.get_log().debug(totalSendText)
else:
sendTextTotal = f"{totalSendText}\n{'- #按照县汇总如下#'}\n{sendText}"
self.logger.get_log().debug(sendTextTotal)
dataAnalysis.sendMessage(SECRET, WEBHOOK, sendTextTotal, imgUrl)
self.logger.get_log().debug("告警信息发送结束")
except Exception as e:
self.logger.get_log().debug(e.args)
def main(self):
# 主函数
self.monitorByDay()
# self.monitorByHour()
if __name__ == '__main__':
demo = DevopsProject("test")
demo.main()
|
StarcoderdataPython
|
1608004
|
<reponame>xuefeicao/snorkel
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from builtins import *
import re
import sys
import numpy as np
import scipy.sparse as sparse
class ProgressBar(object):
def __init__(self, N, length=40):
# Protect against division by zero (N = 0 results in full bar being printed)
self.N = max(1, N)
self.nf = float(self.N)
self.length = length
# Precalculate the i values that should trigger a write operation
self.ticks = set([round(i/100.0 * N) for i in range(101)])
self.ticks.add(N-1)
self.bar(0)
def bar(self, i):
"""Assumes i ranges through [0, N-1]"""
if i in self.ticks:
b = int(np.ceil(((i+1) / self.nf) * self.length))
sys.stdout.write(
"\r[{0}{1}] {2}%".format(
"="*b, " "*(self.length-b), int(100*((i+1) / self.nf))))
sys.stdout.flush()
def close(self):
# Move the bar to 100% before closing
self.bar(self.N-1)
sys.stdout.write("\n\n")
sys.stdout.flush()
def get_ORM_instance(ORM_class, session, instance):
"""
Given an ORM class and *either an instance of this class, or the name attribute of an instance
of this class*, return the instance
"""
if isinstance(instance, str):
return session.query(ORM_class).filter(ORM_class.name == instance).one()
else:
return instance
def camel_to_under(name):
"""
Converts camel-case string to lowercase string separated by underscores.
Written by epost
(http://stackoverflow.com/questions/1175208/elegant-python-function-to-convert-camelcase-to-snake-case).
:param name: String to be converted
:return: new String with camel-case converted to lowercase, underscored
"""
s1 = re.sub('(.)([A-Z][a-z]+)', r'\1_\2', name)
return re.sub('([a-z0-9])([A-Z])', r'\1_\2', s1).lower()
def sparse_nonzero(X):
"""Sparse matrix with value 1 for i,jth entry !=0"""
X_nonzero = X.copy()
if not sparse.issparse(X):
X_nonzero[X_nonzero != 0] = 1
return X_nonzero
if sparse.isspmatrix_csr(X) or sparse.isspmatrix_csc(X):
X_nonzero.data[X_nonzero.data != 0] = 1
elif sparse.isspmatrix_lil(X):
X_nonzero.data = [np.ones(len(L)) for L in X_nonzero.data]
else:
raise ValueError("Only supports CSR/CSC and LIL matrices")
return X_nonzero
def sparse_abs(X):
"""Element-wise absolute value of sparse matrix- avoids casting to dense matrix!"""
X_abs = X.copy()
if not sparse.issparse(X):
return abs(X_abs)
if sparse.isspmatrix_csr(X) or sparse.isspmatrix_csc(X):
X_abs.data = np.abs(X_abs.data)
elif sparse.isspmatrix_lil(X):
X_abs.data = np.array([np.abs(L) for L in X_abs.data])
else:
raise ValueError("Only supports CSR/CSC and LIL matrices")
return X_abs
def matrix_coverage(L):
"""
Given an N x M matrix where L_{i,j} is the label given by the jth LF to the ith candidate:
Return the **fraction of candidates that each LF labels.**
"""
return np.ravel(sparse_nonzero(L).sum(axis=0) / float(L.shape[0]))
def matrix_overlaps(L):
"""
Given an N x M matrix where L_{i,j} is the label given by the jth LF to the ith candidate:
Return the **fraction of candidates that each LF _overlaps with other LFs on_.**
"""
L_nonzero = sparse_nonzero(L)
return np.ravel(np.where(L_nonzero.sum(axis=1) > 1, 1, 0).T * L_nonzero / float(L.shape[0]))
def matrix_conflicts(L):
"""
Given an N x M matrix where L_{i,j} is the label given by the jth LF to the ith candidate:
Return the **fraction of candidates that each LF _conflicts with other LFs on_.**
"""
B = L.copy()
if not sparse.issparse(B):
for row in range(B.shape[0]):
if np.unique(np.array(B[row][np.nonzero(B[row])])).size == 1:
B[row] = 0
return matrix_coverage(sparse_nonzero(B))
if not (sparse.isspmatrix_csc(B) or sparse.isspmatrix_lil(B) or sparse.isspmatrix_csr(B)):
raise ValueError("Only supports CSR/CSC and LIL matrices")
if sparse.isspmatrix_csc(B) or sparse.isspmatrix_lil(B):
B = B.tocsr()
for row in range(B.shape[0]):
if np.unique(B.getrow(row).data).size == 1:
B.data[B.indptr[row]:B.indptr[row+1]] = 0
return matrix_coverage(sparse_nonzero(B))
def matrix_tp(L, labels):
return np.ravel([
np.sum(np.ravel((L[:, j] == 1).todense()) * (labels == 1)) for j in range(L.shape[1])
])
def matrix_fp(L, labels):
return np.ravel([
np.sum(np.ravel((L[:, j] == 1).todense()) * (labels == -1)) for j in range(L.shape[1])
])
def matrix_tn(L, labels):
return np.ravel([
np.sum(np.ravel((L[:, j] == -1).todense()) * (labels == -1)) for j in range(L.shape[1])
])
def matrix_fn(L, labels):
return np.ravel([
np.sum(np.ravel((L[:, j] == -1).todense()) * (labels == 1)) for j in range(L.shape[1])
])
def get_as_dict(x):
"""Return an object as a dictionary of its attributes"""
if isinstance(x, dict):
return x
else:
try:
return x._asdict()
except AttributeError:
return x.__dict__
def sort_X_on_Y(X, Y):
return [x for (y,x) in sorted(zip(Y,X), key=lambda t : t[0])]
def corenlp_cleaner(words):
d = {'-RRB-': ')', '-LRB-': '(', '-RCB-': '}', '-LCB-': '{',
'-RSB-': ']', '-LSB-': '['}
return [d[w] if w in d else w for w in words]
def tokens_to_ngrams(tokens, n_max=3, delim=' '):
N = len(tokens)
for root in range(N):
for n in range(min(n_max, N - root)):
yield delim.join(tokens[root:root+n+1])
|
StarcoderdataPython
|
4841359
|
import skimage.io as io
import skimage.transform as skt
import numpy as np
from PIL import Image
from src.models.class_patcher import patcher
from src.utils.imgproc import *
from skimage.color import rgb2hsv, hsv2rgb, rgb2gray
from skimage.filters import gaussian
class patcher(patcher):
def __init__(self, body='./body/body_yuko.png', **options):
super().__init__('幽狐', body=body, pantie_position=[-2, 1130], **options)
self.mask = io.imread('./mask/mask_yuko.png')
self.ribbon_position = [1712, 1601]
self.bra_position = [298, 1301]
try:
self.use_ribbon_mesh = self.options['use_ribbon_mesh']
except:
self.use_ribbon_mesh = self.ask(question='Use Yuko ribbon mesh?', default=False)
if self.use_ribbon_mesh:
self.ribbon_base = io.imread('./mask/ribbon_yuko.png')[:, :, :3] / 255
self.ribbon_shade = io.imread('./material/ribbon_yuko.png')[:, :, 3] / 255
self.bra_base = io.imread('./mask/bra_yuko.png')[1300:, 300:-400] / 255
self.bra_mask = self.bra_base[:, :, 0] > 0
self.bra_center = io.imread('./mask/bra_yuko_center.png')[1300:, 300:-400, 0] > 0
self.bra_shade = io.imread('./material/bra_yuko_shade.png')[1300:, 300:-400, 3] / 255
self.frill = io.imread('./material/bra_yuko_frill.png')[1300:, 300:-400] / 255
self.lace = io.imread('./material/bra_yuko_lace.png')[1300:, 300:-400] / 255
self.ribbon_mask = io.imread('./mask/ribbon.png')
def gen_ribbon(self, image):
image = np.array(image)
ribbon = image[19:58, 5:35, :3]
base_color = np.mean(np.mean(ribbon[5:12, 16:20], axis=0), axis=0) / 255
shade_color = np.mean(np.mean(ribbon[8:14, 7:15], axis=0), axis=0) / 255
ribbon_base = io.imread('./mask/ribbon_yuko.png')[:, :, :3] / 255
ribbon_shade = io.imread('./material/ribbon_yuko.png')[:, :, 3] / 255
ribbon_base = (self.ribbon_base > 0) * base_color
ribbon_shade = self.ribbon_shade[:, :, None] * (1 - shade_color)
ribbon = ribbon_base - ribbon_shade
ribbon = np.dstack((ribbon, ribbon[:, :, 0] > 0))
ribbon = np.clip(ribbon, 0, 1)
return Image.fromarray(np.uint8(ribbon * 255))
def gen_bra(self, image):
# image = Image.open('./dream/0101.png')
pantie = np.array(image)
if self.use_ribbon_mesh:
pantie = ribbon_inpaint(pantie)
else:
ribbon = pantie.copy()
ribbon[:, :, 3] = self.ribbon_mask[:, :, 1]
ribbon = ribbon[19:58, 8:30] / 255.0
front = pantie[20:100, 30:80, :3] / 255
front_shade = pantie[100:150, 0:40, :3] / 255
center = pantie[20:170, -200:-15, :3] / 255
base_color = np.mean(np.mean(center, axis=0), axis=0)
front_color = np.mean(np.mean(front, axis=0), axis=0)
shade_color = np.mean(np.mean(front_shade, axis=0), axis=0)
# make seamless design
design = rgb2gray(center[:, :, :3])[::-1, ::-1]
design = (design - np.min(design)) / (np.max(design) - np.min(design))
edge = 3
design_seamless = gaussian(design, sigma=3)
design_seamless[edge:-edge, edge:-edge] = design[edge:-edge, edge:-edge]
[hr, hc, hd] = center.shape
y = np.arange(-hr / 2, hr / 2, dtype=np.int16)
x = np.arange(-hc / 2, hc / 2, dtype=np.int16)
design_seamless = (design_seamless[y, :])[:, x] # rearrange pixels
design_seamless = resize(design_seamless, [1.65, 1.8])
design_seamless = np.tile(design_seamless, (3, 4))
posy = int((self.bra_center.shape[0] - design_seamless.shape[0]) / 2)
posx = int((self.bra_center.shape[1] - design_seamless.shape[1]) / 2)
sx = 0
sy = 0
design_seamless = (np.pad(design_seamless, [(posy + sy + 1, posy - sy), (posx + sx, posx - sx)], mode='constant'))
# Base shading
bra_base = self.bra_base[:, :, :3] * front_color
bra_base = bra_base - design_seamless[:, :, None] / 10
shade = rgb2hsv(np.tile((self.bra_shade)[:, :, None], [1, 1, 3]) * base_color)
shade[:, :, 0] -= 1
shade[:, :, 1] *= 0.5 + np.mean(base_color) / 3
shade[:, :, 2] /= 1 + 1 * np.mean(base_color)
bra_shade = hsv2rgb(shade)
# bra_shade = bra_shade[:, :, None] * shade_color
# Center painting
sx = -270
sy = -50
center = resize(center, [4, 4])
posy = int((self.bra_center.shape[0] - center.shape[0]) / 2)
posx = int((self.bra_center.shape[1] - center.shape[1]) / 2)
center = (np.pad(center, [(posy + sy, posy - sy), (posx + sx, posx - sx), (0, 0)], mode='constant'))
center = center * self.bra_center[:, :, None]
# Decoration painting
deco_shade = np.median(pantie[5, :, :3], axis=0) / 255
frill = np.dstack((self.frill[:, :, :3] * deco_shade, self.frill[:, :, 3]))
lace = np.dstack((self.lace[:, :, :3] * shade_color, self.lace[:, :, 3]))
# Finalize
textured = bra_base * (1 - self.bra_center[:, :, None]) + center * self.bra_center[:, :, None]
textured = textured - bra_shade
textured = textured * (1 - lace[:, :, 3])[:, :, None] + lace[:, :, :3] * lace[:, :, 3][:, :, None]
textured = textured * (1 - frill[:, :, 3])[:, :, None] + frill[:, :, :3] * frill[:, :, 3][:, :, None]
textured = np.dstack((textured, self.bra_mask))
if self.use_ribbon_mesh is False:
ribbon = skt.rotate(ribbon, 8, resize=True)
ribbon = resize(ribbon, [1.5, 1.5])
[r, c, d] = ribbon.shape
textured[460:460 + r, 35:35 + c] = textured[460:460 + r, 35:35 + c] * (1 - ribbon[:, :, 3][:, :, None]) + ribbon * ribbon[:, :, 3][:, :, None]
return Image.fromarray(np.uint8(np.clip(textured, 0, 1) * 255))
def convert(self, image):
pantie = np.array(image)
[r, c, d] = pantie.shape
# move from hip to front
patch = np.copy(pantie[-140:-5, 546:, :])
patch = skt.resize(patch[::-1, ::-1, :], (270, 63), anti_aliasing=True, mode='reflect')
[pr, pc, d] = patch.shape
pantie[123:123 + pr, :pc, :] = np.uint8(patch * 255)
# Inpainting ribbon
if self.use_ribbon_mesh:
pantie = ribbon_inpaint(pantie)
# Front transform
front = pantie[:390, :250, :]
front = np.pad(front, [(0, 0), (50, 0), (0, 0)], mode='constant')
front = front.transpose(1, 0, 2)
arrx = np.zeros((100))
arry = np.zeros((100))
arrx[40:] -= (np.linspace(0, 1 * np.pi, 60)**2) * 4
arrx[28:70] += (np.sin(np.linspace(0, 1 * np.pi, 100)) * 10)[28:70]
front = affine_transform_by_arr(front, arrx, arry)
front = np.uint8(front.transpose(1, 0, 2) * 255)[:, 38:]
# Back transform
back = pantie[:350, 250:, :]
back = np.pad(back, [(0, 0), (0, 100), (0, 0)], mode='constant')
back = back.transpose(1, 0, 2)
arrx = np.zeros((100))
arry = np.zeros((100))
arrx[10:] -= (np.linspace(0, 1 * np.pi, 90)**3) * 14
back = affine_transform_by_arr(back, arrx, arry, smoothx=True)
back = np.uint8(back.transpose(1, 0, 2) * 255.0)[:, 1:]
# Merge front and back
pantie = np.zeros((np.max((front.shape[0], back.shape[0])), front.shape[1] + back.shape[1], d), dtype=np.uint8)
pantie[:front.shape[0], :front.shape[1]] = front
pantie[:back.shape[0], front.shape[1]:] = back
# main transform
arrx = np.zeros((100))
arry = np.zeros((100))
arrx[35:] += (np.cos(np.linspace(0, 1 * np.pi, 100) - np.pi) * -75)[35:] - 30
arrx[:30] += (np.sin(np.linspace(0, 3 * np.pi, 100) - np.pi / 0.9) * 10)[:30]
arrx[50:80] += (np.sin(np.linspace(0, 3 * np.pi, 100) - np.pi) * 11)[:30]
arry += np.linspace(0, 1, 100) * -50
arry[:30] += (np.sin(np.linspace(0, 3 * np.pi, 100) - np.pi) * 35)[:30]
pantie = affine_transform_by_arr(pantie, arrx, arry, smoothx=True)
pantie = skt.rotate(pantie, 8.1, resize=True)
# Finalize
pantie = resize(pantie, [2.31, 2.35])
pantie = pantie[140:-80, 72:]
pantie = np.uint8(pantie * 255)
pantie = np.bitwise_and(pantie, self.mask)
return Image.fromarray(pantie)
def patch(self, image, transparent=False):
pantie = self.convert(image)
if transparent:
patched = Image.new("RGBA", self.body_size)
else:
patched = self.body.copy()
if self.use_ribbon_mesh:
ribbon = self.gen_ribbon(image)
self.paste(patched, ribbon, self.ribbon_position)
bra = self.gen_bra(image)
patched = self.paste(patched, bra, self.bra_position)
patched = self.paste(patched, pantie, self.pantie_position)
return patched
|
StarcoderdataPython
|
3362479
|
import pathlib
import tempfile
import os
import ray
from ray import workflow
from ray.workflow.storage import set_global_storage
_GLOBAL_MARK_FILE = pathlib.Path(tempfile.gettempdir()) / "__workflow_test"
def unset_global_mark():
if _GLOBAL_MARK_FILE.exists():
_GLOBAL_MARK_FILE.unlink()
def set_global_mark():
_GLOBAL_MARK_FILE.touch()
def check_global_mark():
return _GLOBAL_MARK_FILE.exists()
def _alter_storage(new_storage):
set_global_storage(new_storage)
# alter the storage
ray.shutdown()
os.system("ray stop --force")
workflow.init(new_storage)
|
StarcoderdataPython
|
1617143
|
import pytest
import multiprocessing
import contextlib
import redis
from rediscluster.connection import ClusterConnection, ClusterConnectionPool
from redis.exceptions import ConnectionError
from .conftest import _get_client
@contextlib.contextmanager
def exit_callback(callback, *args):
try:
yield
finally:
callback(*args)
class TestMultiprocessing(object):
# Test connection sharing between forks.
# See issue #1085 for details.
# use a multi-connection client as that's the only type that is
# actuall fork/process-safe
@pytest.fixture()
def r(self, request):
return _get_client(
redis.Redis,
request=request,
single_connection_client=False)
def test_close_connection_in_child(self):
"""
A connection owned by a parent and closed by a child doesn't
destroy the file descriptors so a parent can still use it.
"""
conn = ClusterConnection(port=7000)
conn.send_command('ping')
assert conn.read_response() == b'PONG'
def target(conn):
conn.send_command('ping')
assert conn.read_response() == b'PONG'
conn.disconnect()
proc = multiprocessing.Process(target=target, args=(conn,))
proc.start()
proc.join(3)
assert proc.exitcode is 0
# The connection was created in the parent but disconnected in the
# child. The child called socket.close() but did not call
# socket.shutdown() because it wasn't the "owning" process.
# Therefore the connection still works in the parent.
conn.send_command('ping')
assert conn.read_response() == b'PONG'
def test_close_connection_in_parent(self):
"""
A connection owned by a parent is unusable by a child if the parent
(the owning process) closes the connection.
"""
conn = ClusterConnection(port=7000)
conn.send_command('ping')
assert conn.read_response() == b'PONG'
def target(conn, ev):
ev.wait()
# the parent closed the connection. because it also created the
# connection, the connection is shutdown and the child
# cannot use it.
with pytest.raises(ConnectionError):
conn.send_command('ping')
ev = multiprocessing.Event()
proc = multiprocessing.Process(target=target, args=(conn, ev))
proc.start()
conn.disconnect()
ev.set()
proc.join(3)
assert proc.exitcode is 0
@pytest.mark.parametrize('max_connections', [1, 2, None])
def test_pool(self, max_connections):
"""
A child will create its own connections when using a pool created
by a parent.
"""
pool = ClusterConnectionPool.from_url('redis://localhost:7000',
max_connections=max_connections)
conn = pool.get_random_connection()
main_conn_pid = conn.pid
with exit_callback(pool.release, conn):
conn.send_command('ping')
assert conn.read_response() == b'PONG'
def target(pool):
with exit_callback(pool.disconnect):
conn = pool.get_random_connection()
assert conn.pid != main_conn_pid
with exit_callback(pool.release, conn):
assert conn.send_command('ping') is None
assert conn.read_response() == b'PONG'
proc = multiprocessing.Process(target=target, args=(pool,))
proc.start()
proc.join(3)
assert proc.exitcode is 0
# Check that connection is still alive after fork process has exited
# and disconnected the connections in its pool
conn = pool.get_random_connection()
with exit_callback(pool.release, conn):
assert conn.send_command('ping') is None
assert conn.read_response() == b'PONG'
@pytest.mark.parametrize('max_connections', [1, 2, None])
def test_close_pool_in_main(self, max_connections):
"""
A child process that uses the same pool as its parent isn't affected
when the parent disconnects all connections within the pool.
"""
pool = ClusterConnectionPool.from_url('redis://localhost:7000',
max_connections=max_connections)
conn = pool.get_random_connection()
assert conn.send_command('ping') is None
assert conn.read_response() == b'PONG'
def target(pool, disconnect_event):
conn = pool.get_random_connection()
with exit_callback(pool.release, conn):
assert conn.send_command('ping') is None
assert conn.read_response() == b'PONG'
disconnect_event.wait()
assert conn.send_command('ping') is None
assert conn.read_response() == b'PONG'
ev = multiprocessing.Event()
proc = multiprocessing.Process(target=target, args=(pool, ev))
proc.start()
pool.disconnect()
ev.set()
proc.join(3)
assert proc.exitcode is 0
def test_redis_client(self, r):
"A redis client created in a parent can also be used in a child"
assert r.ping() is True
def target(client):
assert client.ping() is True
del client
proc = multiprocessing.Process(target=target, args=(r,))
proc.start()
proc.join(3)
assert proc.exitcode is 0
assert r.ping() is True
|
StarcoderdataPython
|
1722609
|
<gh_stars>10-100
from __future__ import absolute_import
import email.utils
from .base import URLSettingsBase, is_importable
class EmailSettings(URLSettingsBase):
CONFIG = {
'smtp': {'EMAIL_BACKEND': 'django.core.mail.backends.smtp.EmailBackend',
'EMAIL_USE_TLS': False},
'smtps': {'EMAIL_BACKEND': 'django.core.mail.backends.smtp.EmailBackend',
'EMAIL_USE_TLS': True},
'file': {'EMAIL_BACKEND': 'django.core.mail.backends.filebased.EmailBackend'},
'mailgun': {'EMAIL_BACKEND': 'django_mailgun.MailgunBackend'},
'sendgrid': {'EMAIL_BACKEND': 'sgbackend.SendGridBackend'},
'mandrill': {'EMAIL_BACKEND': 'djrill.mail.backends.djrill.DjrillBackend'},
'ses': {'EMAIL_BACKEND': 'django_ses_backend.SESBackend'},
'postmark': {'EMAIL_BACKEND': 'postmark.django_backend.EmailBackend'},
}
@staticmethod
def parse_address_list(address_string):
"""
Takes an email address list string and returns a list of (name, address) pairs
"""
return email.utils.getaddresses([address_string])
def get_address_list(self, key, default=None):
return self.parse_address_list(self.env.get(key, default))
def handle_smtp_url(self, parsed_url, config):
if config.get('EMAIL_USE_TLS'):
default_port = 587
elif config.get('EMAIL_USE_SSL'):
default_port = 465
else:
default_port = 25
config.update({
'EMAIL_HOST': parsed_url.hostname or 'localhost',
'EMAIL_PORT': parsed_url.port or default_port,
'EMAIL_HOST_USER': parsed_url.username or '',
'EMAIL_HOST_PASSWORD': parsed_url.password or ''})
return config
def handle_smtps_url(self, parsed_url, config):
return self.handle_smtp_url(parsed_url, config)
def handle_file_url(self, parsed_url, config):
if parsed_url.path == '/dev/stdout':
config['EMAIL_BACKEND'] = 'django.core.mail.backends.console.EmailBackend'
elif parsed_url.path == '/dev/null':
config['EMAIL_BACKEND'] = 'django.core.mail.backends.dummy.EmailBackend'
else:
config['EMAIL_FILE_PATH'] = parsed_url.path
return config
def handle_mailgun_url(self, parsed_url, config):
config['MAILGUN_ACCESS_KEY'] = parsed_url.password
config['MAILGUN_SERVER_NAME'] = parsed_url.hostname
return config
def auto_config_mailgun(self, environ):
try:
api_key, login, password, server, port = [
environ['MAILGUN_' + key] for key in (
'API_KEY', 'SMTP_LOGIN', 'SMTP_PASSWORD',
'SMTP_SERVER', 'SMTP_PORT')]
except KeyError:
return
if is_importable(self.CONFIG['mailgun']['EMAIL_BACKEND']):
domain = login.split('@')[-1]
return 'mailgun://api:{api_key}@{domain}'.format(
api_key=api_key, domain=domain)
else:
return 'smtps://{login}:{password}@{server}:{port}'.format(
login=login, password=password, server=server, port=port)
def handle_sendgrid_url(self, parsed_url, config):
config['SENDGRID_USER'] = parsed_url.username
config['SENDGRID_PASSWORD'] = <PASSWORD>_url.password
return config
def auto_config_sendgrid(self, environ):
try:
user, password = environ['SENDGRID_USERNAME'], environ['SENDGRID_PASSWORD']
except KeyError:
return
if is_importable(self.CONFIG['sendgrid']['EMAIL_BACKEND']):
return 'sendgrid://{user}:{password}@sendgrid.com'.format(
user=user, password=password)
else:
return 'smtps://{user}:{password}@smtp.sendgrid.net:587'.format(
user=user, password=password)
def handle_mandrill_url(self, parsed_url, config):
config['MANDRILL_API_KEY'] = parsed_url.password
if parsed_url.username:
config['MANDRILL_SUBACCOUNT'] = parsed_url.username
return config
def auto_config_mandrill(self, environ):
try:
user, api_key = environ['MANDRILL_USERNAME'], environ['MANDRILL_APIKEY']
except KeyError:
return
if is_importable(self.CONFIG['mandrill']['EMAIL_BACKEND']):
return 'mandrill://:{api_key}@mand<EMAIL>'.format(
api_key=api_key)
else:
return 'smtps://{user}:{api_key}@smtp.mand<EMAIL>:587'.format(
user=user, api_key=api_key)
def handle_ses_url(self, parsed_url, config):
if parsed_url.username:
config['AWS_SES_ACCESS_KEY_ID'] = parsed_url.username
if parsed_url.password:
config['AWS_SES_SECRET_ACCESS_KEY'] = parsed_url.password
if parsed_url.hostname:
if '.' in parsed_url.hostname:
config['AWS_SES_REGION_ENDPOINT'] = parsed_url.hostname
else:
config['AWS_SES_REGION_NAME'] = parsed_url.hostname
return config
def handle_postmark_url(self, parsed_url, config):
config['POSTMARK_API_KEY'] = parsed_url.password
return config
def auto_config_postmark(self, environ):
try:
api_key, server = (environ['POSTMARK_API_KEY'],
environ['POSTMARK_SMTP_SERVER'])
except KeyError:
return
if is_importable(self.CONFIG['postmark']['EMAIL_BACKEND']):
return 'postmark://user:{api_key}@postmarkapp.com'.format(
api_key=api_key)
else:
return 'smtps://{api_key}:{api_key}@{server}:25'.format(
api_key=api_key, server=server)
|
StarcoderdataPython
|
3387041
|
<reponame>gcewing/PyGUI
#
# Python GUI - Scrollable Views - Gtk
#
import gtk
from GUI import export
from GUI import Scrollable
from GUI.GScrollableViews import ScrollableView as GScrollableView, \
default_extent, default_line_scroll_amount, default_scrolling
class ScrollableView(GScrollableView, Scrollable):
def __init__(self, extent = default_extent,
line_scroll_amount = default_line_scroll_amount,
scrolling = default_scrolling,
**kwds):
gtk_scrolled_window = gtk.ScrolledWindow()
gtk_scrolled_window.show()
GScrollableView.__init__(self, _gtk_outer = gtk_scrolled_window,
extent = extent, line_scroll_amount = line_scroll_amount,
scrolling = scrolling)
self.set(**kwds)
#
# Properties
#
def get_border(self):
return self._gtk_outer_widget.get_shadow_type() <> gtk.SHADOW_NONE
def set_border(self, x):
if x:
s = gtk.SHADOW_IN
else:
s = gtk.SHADOW_NONE
self._gtk_outer_widget.set_shadow_type(s)
def get_content_width(self):
w = self._size[0]
if self.hscrolling:
w -= self.gtk_scrollbar_breadth
if self.border:
w -= 2 * self.gtk_border_thickness[0]
return w
def get_content_height(self):
h = self._size[1]
if self.vscrolling:
h -= self.gtk_scrollbar_breadth
if self.border:
h -= 2 * self.gtk_border_thickness[1]
return h
def get_content_size(self):
return self.content_width, self.content_height
def set_content_size(self, size):
w, h = size
d = self.gtk_scrollbar_breadth
if self.hscrolling:
w += d
if self.vscrolling:
h += d
if self.border:
b = self.gtk_border_thickness
w += 2 * b[0]
h += 2 * b[1]
self.size = (w, h)
def get_extent(self):
return self._gtk_inner_widget.get_size()
def set_extent(self, (w, h)):
self._gtk_inner_widget.set_size(int(round(w)), int(round(h)))
def get_scroll_offset(self):
hadj, vadj = self._gtk_adjustments()
return int(hadj.value), int(vadj.value)
def set_scroll_offset(self, (x, y)):
hadj, vadj = self._gtk_adjustments()
hadj.set_value(min(float(x), hadj.upper - hadj.page_size))
vadj.set_value(min(float(y), vadj.upper - vadj.page_size))
def get_line_scroll_amount(self):
hadj, vadj = self._gtk_adjustments()
return hadj.step_increment, vadj.step_increment
def set_line_scroll_amount(self, (dx, dy)):
hadj, vadj = self._gtk_adjustments()
hadj.step_increment = float(dx) # Amazingly, ints are not
vadj.step_increment = float(dy) # acceptable here.
#
# Internal
#
def _gtk_adjustments(self):
gtk_widget = self._gtk_inner_widget
hadj = gtk_widget.get_hadjustment()
vadj = gtk_widget.get_vadjustment()
return hadj, vadj
export(ScrollableView)
|
StarcoderdataPython
|
67863
|
from distutils.core import setup
from setuptools import find_packages
setup(
name='pyrelate',
version='1.0.0',
author='sendwithus',
author_email='<EMAIL>',
packages=find_packages(),
scripts=[],
url='https://github.com/mrmch/pyrelate',
license='LICENSE.txt',
description='Python API client for relateiq',
long_description=open('README.md').read(),
install_requires=[
"requests >= 1.1.0"
]
)
|
StarcoderdataPython
|
1754389
|
import sys, os
sys.path.append('../python_packages_static')
import zipfile
import numpy as np
import pandas as pd
import pyemu
import flopy.utils as fu
from get_endpoints import get_endpoints
# set path
run_dir = '.'
# get the run index from the command line
runindex = int(sys.argv[1])
# get the correct q ratio from the command line
qrat = sys.argv[2]
# read in modpath parameter ensemble, pst control file to modify for modpath
mp_par = pd.read_csv(os.path.join(run_dir, 'parens{0}.csv'.format(qrat)), index_col=0)
pst = pyemu.Pst(os.path.join(run_dir, 'var_q_template.pst'))
pst.control_data.noptmax = 0
# check that indicies are in same order
assert np.sum(pst.parameter_data.index == mp_par.T.index) == len(pst.parameter_data)
# set parvals using runindex value and write modpath pest file
pst.parameter_data.parval1 = mp_par.iloc[runindex].T
pst.write(os.path.join(run_dir, 'modpath.pst'))
# run pest/modflow to get phi
runstring = './pestpp-ies modpath.pst'
print(runstring)
os.system(runstring)
# get water table array and save as txt file
h = fu.binaryfile.HeadFile(os.path.join(run_dir, 'neversink.hds')).get_data()
wt = fu.postprocessing.get_water_table(h, 1e+30)
np.savetxt('wt_array.txt', wt, fmt='%.4e')
print('wt_array.txt created')
# run modpath
mp_zone_files = ['neversink_mp_forward_weak_NE', 'neversink_mp_forward_weak_W', 'neversink_mp_forward_weak_S']
for zone in mp_zone_files:
runstring = './mp7 {}.mpsim'.format(zone)
print(runstring)
os.system(runstring)
# get endpoints
get_endpoints('{}.mpend'.format(zone), zone[26:])
# zip up results
with zipfile.ZipFile('mp_results_{}_{}.zip'.format(runindex, qrat), 'w', zipfile.ZIP_DEFLATED) as zf:
zf.write('modpath.phi.actual.csv')
zf.write('endpoint_cells_NE.csv')
zf.write('endpoint_cells_W.csv')
zf.write('endpoint_cells_S.csv')
zf.write('wt_array.txt')
|
StarcoderdataPython
|
3366789
|
from .base_encoder import *
from .encoder import *
|
StarcoderdataPython
|
1775008
|
<filename>oct/reconstruct/structure.py
from oct.utils import *
import logging
from ..load.metadata import Metadata
cp, np, convolve, gpuAvailable, freeMemory, e = checkForCupy()
class Structure:
""" Structure contrast OCT reconstruction """
def __init__(self, mode='log'):
acceptedModes = 'log+linear'
if mode in acceptedModes:
self.mode = mode
else:
self.mode = 'log'
self.filter = None
self.initialized = False
self.chCount = 0
self.tomch1 = None
self.tomch2 = None
self.processedData = {
'struct': None
}
meta = Metadata()
self.settings = meta.structureSettings
def initialize(self, data=None, settings=None, filterSize=(5, 5)):
"""
Initialize the structure reconstruction with desired setting
Args:
data (object) : Support and raw data holder
settings (dict) : Manual input settings holder
filterSize (tuple) : filter X & Z sizes
"""
if data and settings:
self.setSettings(data=data, settings=settings)
elif data:
self.setSettings(data=data)
elif settings:
self.setSettings(settings=settings)
if not (filter is None):
d1 = cp.hanning(filterSize[0])
d2 = cp.hanning(filterSize[1])
self.filter = cp.sqrt(cp.outer(d1, d2))
self.filter = self.filter / cp.sum(self.filter)
self.initialized = True
logging.info('====================================================')
logging.info('Structure settings initialized:')
for i in self.settings.keys():
logging.info('Key_Name:"{kn}", Key_Value:"{kv}"'.format(kn=i, kv=self.settings[i]))
def setSettings(self, data=None, settings=None):
"""Extract the required settings variables from the dataset metadata"""
if data:
for key, val in data.structureSettings.items():
self.settings[key] = data.structureSettings[key]
elif settings:
for key, val in settings.items():
self.settings[key] = settings[key]
def requires(self):
"""Prints out the required/optional variables to perform reconstruction"""
print('Required:')
print("tomch1=tomch1")
print("\n")
print('Optional:')
print("tomch2=tomch2")
print("\n")
print('OR:')
print("data=data")
print("\n")
print('Settings:')
print("data.structureSettings['contrastLowHigh'] ( [min, max])")
print("data.structureSettings['invertGray'] ( [0 or 1])")
print("data.structureSettings['imgWidth'] (for big-seg scans) ")
print("\n")
print('For reference, all possible settings and defaults are:')
for key, value in self.settings.items():
print("self.settings['", key, "'] : ", value)
def manageChannels(self):
""" Handle case where only channel 2's are passed """
if not (self.tomch2 is None) and (self.tomch1 is None):
self.tomch1 = self.tomch2
self.tomch1 = None
def reconstruct(self, tomch1=None, tomch2=None, data=None, settings=None):
"""
Reconstruct a structure contrast frame
Notes:
Args:
tomch1 (array) : Reconstructed tomogram from channel 1
tomch1 (array) : Reconstructed tomogram from channel 2
data (object) : An object containing all the preloaded data from a directory for post processing
settings (dict): The required/edittable settings to process an angio contrast frame
Returns:
processeData (dict) : A dictionary containing an intensity contrast frame labelled 'struct'
"""
if not self.initialized:
if data:
if data:
self.chCount = 2
self.tomch1 = cp.asarray(data.processedData['tomch1'])
self.tomch2 = cp.asarray(data.processedData['tomch2'])
else:
if not (tomch1 is None):
self.chCount = self.chCount + 1
self.tomch1 = cp.asarray(tomch1)
if not (tomch2 is None):
self.chCount = self.chCount + 1
self.tomch2 = cp.asarray(tomch2)
self.manageChannels()
self.initialize(data=data, settings=settings)
if self.chCount == 2:
struct = self.intensity2Channel()
else:
struct = self.intensity1Channel()
if 'log' in self.mode:
struct = 10 * cp.log(struct)
self.processedData['struct'] = self.formatOut(struct)
elif 'linear' in self.mode:
self.processedData['struct'] = cp.asnumpy(struct)
else:
self.processedData['struct'] = cp.asnumpy(struct)
return self.processedData
def intensity2Channel(self):
"""
Compute Intensity / Structure frame from tomogram
Args:
tomogram (obj): data storage object
Output:
data.processedData['struct']
"""
pch1 = cp.abs(self.tomch1) ** 2
pch2 = cp.abs(self.tomch2) ** 2
if self.settings['imgDepth'] > 1:
pch1 = pch1.reshape([pch1.shape[0],
int(pch1.shape[1]/self.settings['imgDepth']),
self.settings['imgDepth']])
pch2 = pch2.reshape([pch2.shape[0],
int(pch2.shape[1]/self.settings['imgDepth']),
self.settings['imgDepth']])
struct = cp.sum(pch1 + pch2, axis=2) / self.settings['imgDepth']
else:
struct = pch1 + pch2
pch1, pch2 = None, None
return struct
def intensity1Channel(self):
"""
Compute Intensity / Structure frame from tomogram
Args:
data (obj): data storage object
Output:
data.processedData['struct']
"""
pch1 = cp.abs(self.tomch1) ** 2
if self.settings['imgDepth'] > 1:
pch1 = pch1.reshape([pch1.shape[0],
int(pch1.shape[1]/self.settings['imgDepth']),
self.settings['imgDepth']])
struct = cp.sum(pch1, axis=2) / self.settings['imgDepth']
else:
struct = pch1
pch1 = None
return struct
def formatOut(self, struct):
""" Format the output structure array to Uint8, according to contrast settings
Notes:
Args:
struct (array) : Un-formatted float intensity array
Returns:
array : Uint8 (0-255) formatted structure array within contrast range
"""
struct = (struct - self.settings['contrastLowHigh'][0]) / (
self.settings['contrastLowHigh'][1] - self.settings['contrastLowHigh'][0])
struct = cp.clip(struct, a_min=0, a_max=1)
if self.settings['invertGray']:
struct = struct*-1+1
struct = cp.asnumpy((struct * 255).astype('uint8'))
return struct
|
StarcoderdataPython
|
3372212
|
import os
import re
from flask import Response, redirect
from subprocess import PIPE, Popen
from wsgi_utils import PipeWrapper
TRANSCODABLE_FORMATS = ['mp3', 'ogg', 'flac', 'm4a', 'wav']
def _format_of_file(filename):
return re.search('\.([^.]+)$', filename).group(1)
class Transcoder(object):
def __init__(self, music_dir, cache_dir):
self.music_dir = music_dir
self.cache_dir = cache_dir
def needs_transcode(self, filename, wanted_formats):
return _format_of_file(filename) not in wanted_formats
def can_transcode(self, filename, wanted_formats):
return (
_format_of_file(filename) in TRANSCODABLE_FORMATS and
'ogg' in wanted_formats
)
def path_for_cache_key(self, cache_key):
return os.path.join(self.cache_dir, 'tx' + cache_key + '.ogg')
def transcode_and_stream(self, filename, cache_key=None):
full_filename = os.path.join(self.music_dir, filename)
cache_filename = None
if cache_key:
cache_filename = self.path_for_cache_key(cache_key)
# See if the transcode is already cached.
try:
os.stat(cache_filename)
return redirect(os.path.join('/', cache_filename))
except OSError:
pass
# TODO: maintain a set of tasks for currently ongoing transcodes
# to avoid transcoding a track twice at the same time. Then try
# sending Accept-Ranges: bytes and honoring range requests, while
# not providing a Content-Length. See if this makes Firefox's
# media file fetching happy.
# Transcode to ogg.
# The filename should come out of the DB and *not* be user-specified
# (through the web interface), so it can be trusted.
command = [
'avconv', '-v', 'quiet',
'-i', full_filename,
'-f', 'ogg', '-acodec', 'libvorbis', '-aq', '5', '-'
]
pipe = Popen(command, stdout=PIPE)
return Response(
PipeWrapper(pipe, copy_to_filename=cache_filename),
mimetype='audio/ogg',
direct_passthrough=True
)
|
StarcoderdataPython
|
4826258
|
import os
os.rename("/home/simon/Programming/python/foo/test.py", "/home/simon/Programming/python/bar/bar/test1.py")
|
StarcoderdataPython
|
3347511
|
<gh_stars>0
"""
CORE APP
This module provides an interface to the app's managers.
"""
from django.db import models
from django.utils import timezone
from django.contrib.contenttypes.models import ContentType
from django.core.urlresolvers import reverse
from django.template.defaultfilters import slugify
# from polymorphic import PolymorphicManager
from tunobase.core import constants, query
# Normal managers
class VersionManager(models.Manager):
def publish_objects(self):
"""Return only published objects."""
queryset = self.exclude(state=constants.STATE_PUBLISHED)
to_publish_ids = []
for obj in queryset:
published_obj = obj.series.versions.filter(
state=constants.STATE_PUBLISHED
).exists()
if not published_obj and obj.content_object.publish_at <= timezone.now():
to_publish_ids.append(obj.pk)
obj.content_object.state = constants.STATE_PUBLISHED
obj.content_object.save()
update_queryset = self.filter(pk__in=to_publish_ids)
update_queryset.update(state=constants.STATE_PUBLISHED)
class CoreManager(models.Manager):
"""Return relevant objects."""
def get_queryset(self):
"""Return objects."""
return query.CoreQuerySet(self.model, using=self._db)
def for_current_site(self):
"""Return objects for the current site."""
return self.get_queryset().for_current_site()
class CoreStateManager(CoreManager):
"""Return relevant objects depending on state."""
def get_queryset(self):
"""Return objects."""
return query.CoreStateQuerySet(self.model, using=self._db)
def publish_objects(self):
"""Return only published objects."""
queryset = self.permitted().filter(
publish_at__lte=timezone.now()
).exclude(state=constants.STATE_PUBLISHED)
queryset.update(state=constants.STATE_PUBLISHED)
def permitted(self):
"""Only return publised objects."""
return self.get_queryset().permitted()
def get_list(self):
return self.get_queryset().get_list()
def get_console_queryset(self):
return self.get_queryset().get_console_queryset()
def version_list(self, object_id, state):
series = self.get_series(object_id)
if series is not None:
qs = series.versions.filter(state=state)
for model in qs:
model.change_url = reverse('%s_%s_change' % (
model.content_object._meta.app_label,
model.content_object._meta.module_name),
args=(model.object_id,)
)
return qs
return []
def get_series(self, object_id):
from tunobase.core.models import Version
model_type = ContentType.objects.get_for_model(self.model)
try:
return Version.objects.get(
content_type__pk=model_type.id,
object_id=object_id
).series
except:
return None
def add_series(self, slug):
from tunobase.core.models import VersionSeries
return VersionSeries.objects.create(
slug=slug
)
def add_version(self, obj):
from tunobase.core.models import Version
model_type = ContentType.objects.get_for_model(self.model)
series = self.add_series(slugify(str(obj)))
Version.objects.create(
content_type=model_type,
object_id=obj.pk,
series=series,
number=1,
state=obj.state
)
def add_to_series(self, series, obj):
from tunobase.core.models import Version
model_type = ContentType.objects.get_for_model(self.model)
try:
latest_version_number = Version.objects.filter(
series=series
).order_by('-number')[0].number + 1
except:
latest_version_number = 1
Version.objects.create(
content_type=model_type,
object_id=obj.pk,
series=series,
number=latest_version_number,
state=constants.STATE_UNPUBLISHED
)
def stage_version(self, object_id):
from tunobase.core.models import Version
series = self.get_series(object_id)
model_type = ContentType.objects.get_for_model(self.model)
if series is not None and Version.objects.filter(
series=series, state=constants.STATE_STAGED).exists():
staged_version = Version.objects.get(
series=series,
state=constants.STATE_STAGED
)
staged_version.state = constants.STATE_UNPUBLISHED
staged_version.save()
staged_version.content_object.state = constants.STATE_UNPUBLISHED
staged_version.content_object.save()
version = Version.objects.get(
content_type__pk=model_type.id,
object_id=object_id
)
version.state = constants.STATE_STAGED
version.save()
version.content_object.state = constants.STATE_STAGED
version.content_object.save()
def publish_version(self, object_id):
from tunobase.core.models import Version
series = self.get_series(object_id)
model_type = ContentType.objects.get_for_model(self.model)
if series is not None and Version.objects.filter(
series=series, state=constants.STATE_PUBLISHED).exists():
published_version = Version.objects.get(
series=series,
state=constants.STATE_PUBLISHED
)
published_version.state = constants.STATE_UNPUBLISHED
published_version.save()
published_version.content_object.state = constants.STATE_UNPUBLISHED
published_version.content_object.save()
version = Version.objects.get(
content_type__pk=model_type.id,
object_id=object_id
)
version.state = constants.STATE_PUBLISHED
version.save()
version.content_object.state = constants.STATE_PUBLISHED
version.content_object.save()
def unpublish_version(self, object_id):
from tunobase.core.models import Version
model_type = ContentType.objects.get_for_model(self.model)
version = Version.objects.get(
content_type__pk=model_type.id,
object_id=object_id
)
version.state = constants.STATE_UNPUBLISHED
version.save()
version.content_object.state = constants.STATE_UNPUBLISHED
version.content_object.publish_date_time = timezone.now()
version.content_object.save()
def delete_version(self, object_id):
from tunobase.core.models import Version
model_type = ContentType.objects.get_for_model(self.model)
version = Version.objects.get(
content_type__pk=model_type.id,
object_id=object_id
)
version.state = constants.STATE_DELETED
version.save()
version.content_object.state = constants.STATE_DELETED
version.content_object.save()
# # Polymorphic Managers
#
# class CorePolymorphicManager(PolymorphicManager, CoreManager):
#
# def get_queryset(self):
# return query.CorePolymorphicQuerySet(self.model, using=self._db)
#
#
# class CorePolymorphicStateManager(CorePolymorphicManager, CoreStateManager):
#
# def get_queryset(self):
# return query.CorePolymorphicStateQuerySet(self.model, using=self._db)
# Other Managers
class DefaultImageManager(CoreStateManager):
def get_queryset(self):
return query.DefaultImageQuerySet(self.model, using=self._db)
def get_random(self, category=None):
return self.get_queryset().get_random(category)
|
StarcoderdataPython
|
3399382
|
<reponame>jorgemauricio/python
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Mon Jul 17 16:17:25 2017
@author: jorgemauricio
Instrucciones
1. Ordenar la siguiente lista de valores por medio de ciclos y/o validaciones
arreglo = [54,26,93,17,77,31,44,55,20]
Resultado
arreglo = [54,26,93,17,77,31,44,55,20]
arreglo = [17, 20, 26, 31, 44, 54, 55, 77, 93]
"""
|
StarcoderdataPython
|
3352729
|
<filename>python/testData/resolve/UserPyInsteadProvidedPyi/main.py
from pkg import foo
foo.bar("a", "b")
# <ref>
|
StarcoderdataPython
|
1694313
|
<filename>algo/spectral/SlidingWindow.py
# -*- coding: utf-8 -*-
import numpy as np
# Based on: http://stackoverflow.com/a/4947453
# with some adjusts for noverlap
"""
Examples of use:
1-D:
====
a = np.array(range(100), dtype=np.int)
b = SlidingWindow(a, 10, 0) # window_size=10, no overlap
2-D:
====
a = np.array(range(100), dtype=np.int).reshape((10, 10))
b = SlidingWindow(a, (4, 4), 2) # window_size=4 (each dimension), overlap=2
3-D:
====
a = np.array(range(1000), dtype=np.int).reshape((10, 10, 10))
b = SlidingWindow(a, (3, 3, 3), 1) # window_size=3 (each dimension), overlap=1
"""
def rolling_window_lastaxis(a, window_size, noverlap):
if window_size < 1:
raise ValueError("`window_size` must be at least 1.")
if window_size > a.shape[-1]:
raise ValueError("`window_size` is too long.")
if noverlap is None:
noverlap = window_size - 1
step = window_size - noverlap
if noverlap > window_size:
raise Exception('Overlap cannot be greater than window size.')
try:
# // for integer division in Python 3
shape = a.shape[:-1] + ((a.shape[-1] - window_size) // step + 1, window_size)
strides = a.strides[:-1] + (a.strides[-1] * step,) + a.strides[-1:]
ret = np.lib.stride_tricks.as_strided(a, shape=shape, strides=strides)
except Exception as e:
print('rolling_window_lastaxis:', e)
return ret
def SlidingWindow(a, window_size, noverlap=None):
if not hasattr(window_size, '__iter__'):
return rolling_window_lastaxis(a, window_size, noverlap)
for i, win in enumerate(window_size):
if win > 1:
a = a.swapaxes(i, -1)
a = rolling_window_lastaxis(a, win, noverlap)
a = a.swapaxes(-2, i)
return a
# a = np.array(range(100), dtype=np.int)
# print SlidingWindow(a, 10, 5) # window_size=10, no overlap
|
StarcoderdataPython
|
94625
|
<reponame>bmcs-group/bmcs_beam
from .ex_run import ExRun
from .ex_run_view import ExRunView
|
StarcoderdataPython
|
4808924
|
<reponame>PitPietro/pascal-triangle
"""
> Task
Given a string, find out if its characters can be rearranged to form a palindrome.
> Example
For inputString = "aabb", the output should be true.
We can rearrange "aabb" to make "abba", which is a palindrome.
> Input/Output
- execution time limit: 4 seconds (py3)
- input: string inputString
A string consisting of lowercase English letters.
- guaranteed constraints:
1 ≤ inputString.length ≤ 50.
- output: boolean
true if the characters of the inputString can be rearranged to form a palindrome, false otherwise.36
"""
from code_signal_challenges.check_palindrome import check_palindrome
from collections import deque, Counter
def palindrome_rearranging(input_string):
if check_palindrome(input_string):
return True
else:
for i in range(len(input_string)):
for j in range(len(input_string)):
temp_b = input_string
if i != j:
temp_string = temp_b[j]
temp_b = temp_b.replace(temp_b[j], temp_b[i])
temp_b = temp_b.replace(temp_b[i], temp_string)
print("i: {}| j: {}| temp_b: {}| temp_string: {}".format(i, j, temp_b, temp_string))
if check_palindrome(temp_b):
return True
return False
def palindrome_from(letters):
"""
Forms a palindrome by rearranging :letters: if possible,
throwing a :ValueError: otherwise.
:param letters: a suitable iterable, usually a string
:return: a string containing a palindrome
"""
counter = Counter(letters)
sides = []
center = deque()
for letter, occurrences in counter.items():
repetitions, odd_count = divmod(occurrences, 2)
if not odd_count:
sides.append(letter * repetitions)
continue
if center:
raise ValueError("no palindrome exists for '{}'".format(letters))
center.append(letter * occurrences)
center.extendleft(sides)
center.extend(sides)
return ''.join(center)
if __name__ == '__main__':
# true
test_1 = "aabb"
# false
test_2 = "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaabc"
# true
test_3 = "abbcabb"
# true
test_4 = "zyyzzzzz"
# true
test_5 = "z"
# true
test_6 = "zaa"
# false
test_7 = "abca"
# false
test_8 = "abcad"
# false
test_9 = "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaabbbccccaaaaaaaaaaaaa"
# false
test_10 = "abdhuierf"
index = 1
print(index, ") ", palindrome_rearranging(test_1))
index += 1
print(index, ") ", palindrome_rearranging(test_2))
index += 1
print(index, ") ", palindrome_rearranging(test_3))
index += 1
print(index, ") ", palindrome_rearranging(test_4))
index += 1
print(index, ") ", palindrome_rearranging(test_5))
index += 1
print(index, ") ", palindrome_rearranging(test_6))
index += 1
print(index, ") ", palindrome_rearranging(test_7))
index += 1
print(index, ") ", palindrome_rearranging(test_8))
index += 1
print(index, ") ", palindrome_rearranging(test_9))
index += 1
print(index, ") ", palindrome_rearranging(test_10))
print()
flag = True
while flag:
try:
word = input('Enter a word: ')
if word == "flag = false":
flag = False
print(palindrome_from(word))
except ValueError as e:
print(*e.args)
except EOFError:
break
|
StarcoderdataPython
|
3238613
|
<reponame>rmed/akamatsu<filename>akamatsu/views/admin/profile.py
# -*- coding: utf-8 -*-
#
# Akamatsu CMS
# https://github.com/rmed/akamatsu
#
# MIT License
#
# Copyright (c) 2020 <NAME> <<EMAIL>>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
"""This module contains user profile views."""
from flask import current_app, flash, redirect, render_template, url_for
from flask_babel import _
from flask_login import current_user, fresh_login_required, login_required
from sqlalchemy.exc import IntegrityError
from akamatsu import crypto_manager, db
from akamatsu.views.admin import bp_admin
from akamatsu.forms import PasswordResetForm, ProfileForm
@bp_admin.route('/profile', methods=['GET', 'POST'])
@login_required
def profile_edit():
"""Show user profile edition form."""
form = ProfileForm(obj=current_user)
if form.validate_on_submit():
form.populate_obj(current_user)
try:
correct = True
db.session.commit()
flash(_('Profile updated correctly'), 'success')
return render_template('admin/profile/edit.html', form=form)
except IntegrityError:
# Email already exists
correct = False
form.errors.email.append(_('Email is already registered'))
return render_template('admin/profile/edit.html', form=form)
except Exception:
# Catch anything unknown
correct = False
flash(_('Failed to update profile, contact an administrator'), 'error')
return render_template('admin/profile/edit.html', form=form)
finally:
if not correct:
db.session.rollback()
return render_template('admin/profile/edit.html', form=form)
@bp_admin.route('/profile/change-password', methods=['GET', 'POST'])
@fresh_login_required
def change_password():
"""Show form to update user password.
Requires confirming current password.
"""
form = PasswordResetForm()
if form.validate_on_submit():
# Update user
current_user.password = <PASSWORD>(form.password.data)
try:
correct = True
db.session.commit()
flash(_('Password updated correctly'), 'success')
return redirect(url_for('admin.profile_edit'))
except Exception:
correct = False
current_app.logger.exception('Failed to update user password')
flash(_('Error updating password, contact an administrator'), 'error')
return render_template('admin/profile/change_password.html', form=form)
finally:
if not correct:
db.session.rollback()
return render_template('admin/profile/change_password.html', form=form)
|
StarcoderdataPython
|
3330250
|
<gh_stars>0
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""NuvlaBox Peripheral Manager Bluetooth
This service provides bluetooth device discovery.
"""
import bluetooth as bt
import logging
import sys
import time
import os
import json
import requests
#from bluetooth.ble import DiscoveryService
from threading import Event
scanning_interval = 30
KUBERNETES_SERVICE_HOST = os.getenv('KUBERNETES_SERVICE_HOST')
namespace = os.getenv('MY_NAMESPACE', 'nuvlabox')
def init_logger():
""" Initializes logging """
root = logging.getLogger()
root.setLevel(logging.DEBUG)
handler = logging.StreamHandler(sys.stdout)
handler.setLevel(logging.DEBUG)
formatter = logging.Formatter('%(levelname)s - %(funcName)s - %(message)s')
handler.setFormatter(formatter)
root.addHandler(handler)
def wait_bootstrap(api_url):
"""
Waits for the NuvlaBox to finish bootstrapping, by checking
the context file.
:returns
"""
while True:
try:
logging.info(f'Waiting for {api_url}...')
r = requests.get(api_url + '/healthcheck')
r.raise_for_status()
if r.status_code == 200:
break
except:
time.sleep(15)
logging.info('NuvlaBox has been initialized.')
return
def bluetoothCheck(api_url, mac_addr):
""" Checks if peripheral already exists """
identifier = mac_addr
try:
r = requests.get(f'{api_url}/{identifier}')
if r.status_code == 404:
return False
elif r.status_code == 200:
return True
else:
r.raise_for_status()
except requests.exceptions.InvalidSchema:
logging.error(f'The Agent API URL {api_url} seems to be malformed. Cannot continue...')
raise
except requests.exceptions.ConnectionError as ex:
logging.error(f'Cannot reach out to Agent API at {api_url}. Can be a transient issue: {str(ex)}')
raise
except requests.exceptions.HTTPError as e:
logging.warning(f'Could not lookup peripheral {identifier}. Assuming it does not exist')
return False
def createDeviceFile(device_mac_addr, device_file, peripheral_dir):
file_path = '{}/{}'.format(peripheral_dir, device_mac_addr)
with open(file_path, 'w') as outfile:
json.dump(device_file, outfile)
def removeDeviceFile(device_mac_addr, peripheral_dir):
file_path = '{}/{}'.format(peripheral_dir, device_mac_addr)
os.unlink(file_path)
def readDeviceFile(device_mac_addr, peripheral_dir):
file_path = '{}/{}'.format(peripheral_dir, device_mac_addr)
return json.load(open(file_path))
def deviceDiscovery():
"""
Return all discoverable bluetooth devices.
"""
return bt.discover_devices(lookup_names=True, lookup_class=True)
# def bleDeviceDiscovery():
# service = DiscoveryService("hci0")
# devices = service.discover(2)
# return devices
def compareBluetooth(bluetooth, ble):
output = []
for device in bluetooth:
if device[0] not in ble:
d = {
"identifier": device[0],
"class": device[2],
"interface": "Bluetooth"
}
if device[1] != "":
d["name"] = device[1]
output.append(d)
for device_id, device_name in ble.items():
d = {
"identifier": device_id,
"class": "", # TODO
"interface": "Bluetooth-LE"
}
if device_name != "":
d["name"] = device_name
output.append(d)
return output
def cod_converter(cod_decimal_string):
""" From a decimal value of CoD, map and retrieve the corresponding major class of a Bluetooth device
:param cod_decimal_string: numeric string corresponding to the class of device
:return: list of class(es)
"""
if not cod_decimal_string or cod_decimal_string == "":
return []
cod_decimal_string = int(cod_decimal_string)
# Major CoDs
classes = {0: {'major': 'Miscellaneous',
'minor': {}},
1: {
'major': 'Computer',
'minor': {
'bitwise': False,
'0': 'Uncategorized',
'1': 'Desktop workstation',
'2': 'Server-class computer',
'3': 'Laptop',
'4': 'Handheld PC/PDA (clamshell)',
'5': 'Palm-size PC/PDA',
'6': 'Wearable computer (watch size)',
'7': 'Tablet'}
},
2: {
'major': 'Phone',
'minor': {
'bitwise': False,
'0': 'Uncategorized',
'1': 'Cellular',
'2': 'Cordless',
'3': 'Smartphone',
'4': 'Wired modem or voice gateway',
'5': 'Common ISDN access'
}
},
3: {
'major': 'LAN/Network Access Point',
'minor': {
'bitwise': False,
'0': 'Fully available',
'1': '1% to 17% utilized',
'2': '17% to 33% utilized',
'3': '33% to 50% utilized',
'4': '50% to 67% utilized',
'5': '67% to 83% utilized',
'6': '83% to 99% utilized',
'7': 'No service available'
}
},
4: {
'major': 'Audio/Video',
'minor': {
'bitwise': False,
'0': 'Uncategorized',
'1': 'Wearable Headset Device',
'2': 'Hands-free Device',
'3': '(Reserved)',
'4': 'Microphone',
'5': 'Loudspeaker',
'6': 'Headphones',
'7': 'Portable Audio',
'8': 'Car audio',
'9': 'Set-top box',
'10': 'HiFi Audio Device',
'11': 'VCR',
'12': 'Video Camera',
'13': 'Camcorder',
'14': 'Video Monitor',
'15': 'Video Display and Loudspeaker',
'16': 'Video Conferencing',
'17': '(Reserved)',
'18': 'Gaming/Toy'
}
},
5: {
'major': 'Peripheral',
'minor': {
'bitwise': False,
'feel': {
'0': 'Not Keyboard / Not Pointing Device',
'1': 'Keyboard',
'2': 'Pointing device',
'3': 'Combo keyboard/pointing device'
},
'0': 'Uncategorized',
'1': 'Joystick',
'2': 'Gamepad',
'3': 'Remote control',
'4': 'Sensing device',
'5': 'Digitizer tablet',
'6': 'Card Reader',
'7': 'Digital Pen',
'8': 'Handheld scanner for bar-codes, RFID, etc.',
'9': 'Handheld gestural input device'
}
},
6: {
'major': 'Imaging',
'minor': {
'bitwise': True,
'4': 'Display',
'8': 'Camera',
'16': 'Scanner',
'32': 'Printer'
}
},
7: {
'major': 'Wearable',
'minor': {
'bitwise': False,
'0': 'Wristwatch',
'1': 'Pager',
'2': 'Jacket',
'3': 'Helmet',
'4': 'Glasses'
}
},
8: {
'major': 'Toy',
'minor': {
'bitwise': False,
'0': 'Robot',
'1': 'Vehicle',
'2': 'Doll / Action figure',
'3': 'Controller',
'4': 'Game'
}
},
9: {
'major': 'Health',
'minor': {
'bitwise': False,
'0': 'Undefined',
'1': 'Blood Pressure Monitor',
'2': 'Thermometer',
'3': 'Weighing Scale',
'4': 'Glucose Meter',
'5': 'Pulse Oximeter',
'6': 'Heart/Pulse Rate Monitor',
'7': 'Health Data Display',
'8': 'Step Counter',
'9': 'Body Composition Analyzer',
'10': 'Peak Flow Monitor',
'11': 'Medication Monitor',
'12': 'Knee Prosthesis',
'13': 'Ankle Prosthesis',
'14': 'Generic Health Manager',
'15': 'Personal Mobility Device'
}
}}
major_number = (cod_decimal_string >> 8) & 0x1f
minor_number = (cod_decimal_string >> 2) & 0x3f
minor_class_name = None
minor = {'minor': {}}
if major_number == 31:
major = {'major': 'Uncategorized'}
else:
major = classes.get(major_number, {'major': 'Reserved'})
minor = classes.get(major_number, minor)
minor_class = minor.get('minor', {})
if minor_class.get('bitwise', False):
# i.e. imaging
for key, value in minor_class.items():
try:
# if key is an integer, it is good to be evaluated
minor_key = int(key)
except ValueError:
continue
except:
logging.exception("Failed to evaluate minor device class with key %s" % key)
continue
if minor_number & minor_key:
minor_class_name = value
break
else:
minor_class_name = minor_class.get(str(minor_number), 'reserved')
major_class_name = major.get('major')
peripheral_classes = [major_class_name, minor_class_name]
if 'feel' in minor_class:
feel_number = minor_number >> 4
feel_class_name = minor_class['feel'].get(str(feel_number))
if feel_class_name:
peripheral_classes.append(feel_class_name)
return peripheral_classes
def bluetoothManager():
output = {}
try:
# list
bluetoothDevices = deviceDiscovery()
logging.info(bluetoothDevices)
except:
bluetoothDevices = []
logging.exception("Failed to discover BT devices")
bleDevices = {}
# TODO: implement reliable BLE discovery that works for RPi
# try:
# # dict
# bleDevices = bleDeviceDiscovery()
# logging.info(bleDevices)
# except:
# bleDevices = {}
# logging.exception("Failed to discover BLE devices")
# get formatted list of bt devices [{},...]
bluetooth = compareBluetooth(bluetoothDevices, bleDevices)
if len(bluetooth) > 0:
for device in bluetooth:
name = device.get("name", "unknown")
output[device['identifier']] = {
"available": True,
"name": name,
"classes": cod_converter(device.get("class", "")),
"identifier": device.get("identifier"),
"interface": device.get("interface", "Bluetooth"),
}
return output
def diff(before, after):
enter = []
leaving = []
for key in before.keys():
if key not in after.keys():
leaving.append(key)
for key in after.keys():
if key not in before.keys():
enter.append(key)
return enter, leaving
def post_peripheral(api_url: str, body: dict) -> dict:
""" Posts a new peripheral into Nuvla, via the Agent API
:param body: content of the peripheral
:param api_url: URL of the Agent API for peripherals
:return: Nuvla resource
"""
try:
r = requests.post(api_url, json=body)
r.raise_for_status()
return r.json()
except:
logging.error(f'Cannot create new peripheral in Nuvla. See agent logs for more details on the problem')
# this will be caught by the calling block
raise
def delete_peripheral(api_url: str, identifier: str, resource_id=None) -> dict:
""" Deletes an existing peripheral from Nuvla, via the Agent API
:param identifier: peripheral identifier (same as local filename)
:param api_url: URL of the Agent API for peripherals
:param resource_id: peripheral resource ID in Nuvla
:return: Nuvla resource
"""
if resource_id:
url = f'{api_url}/{identifier}?id={resource_id}'
else:
url = f'{api_url}/{identifier}'
try:
r = requests.delete(url)
r.raise_for_status()
return r.json()
except:
logging.error(f'Cannot delete peripheral {identifier} from Nuvla. See agent logs for more info about the issue')
# this will be caught by the calling block
raise
def remove_legacy_peripherals(api_url: str, peripherals_dir: str, protocols: list):
""" In previous versions of this component, the peripherals were stored in an incompatible manner.
To avoid duplicates, before starting this component, we make sure all legacy peripherals are deleted
:param api_url: agent api url for peripherals
:param peripherals_dir: path to peripherals dir
:param protocols: list of protocols to look for
:return:
"""
for proto in protocols:
if not proto:
# just to be sure we don't delete the top directory
continue
path = f'{peripherals_dir}{proto}'
if os.path.isdir(path):
for legacy_peripheral in os.listdir(path):
with open(f'{path}/{legacy_peripheral}') as lp:
nuvla_id = json.load(lp).get("resource_id")
# if it has a nuvla_id, there it must be removed from Nuvla
if nuvla_id:
try:
delete_peripheral(api_url, f"{proto}/{legacy_peripheral}", resource_id=nuvla_id)
continue
except:
pass
logging.info(f'Removed legacy peripheral {proto}/{legacy_peripheral}. If it still exists, it shall be re-created.')
os.remove(f'{path}/{legacy_peripheral}')
# by now, dir must be empty, so this shall work
os.rmdir(path)
logging.info(f'Removed all legacy peripherals for interface {proto}: {path}')
def get_saved_peripherals(api_url, protocol):
"""
To be used at bootstrap, to check for existing peripherals, just to make sure we delete old and only insert new
peripherals, that have been modified during the NuvlaBox shutdown
:param api_url: url of the agent api for peripherals
:param protocol: protocol name = interface
:return: map of device identifiers and content
"""
query = f'{api_url}?parameter=interface&value={protocol}'
r = requests.get(query)
r.raise_for_status()
return r.json()
if __name__ == "__main__":
init_logger()
logging.info('BLUETOOTH MANAGER STARTED')
e = Event()
peripheral_path = '/srv/nuvlabox/shared/.peripherals/'
agent_api_endpoint = 'localhost:5080' if not KUBERNETES_SERVICE_HOST else f'agent.{namespace}'
base_api_url = f"http://{agent_api_endpoint}/api"
API_URL = f"{base_api_url}/peripheral"
wait_bootstrap(base_api_url)
remove_legacy_peripherals(API_URL, peripheral_path, ["bluetooth"])
old_devices = get_saved_peripherals(API_URL, 'Bluetooth')
while True:
current_devices = bluetoothManager()
logging.info('CURRENT DEVICES: {}'.format(current_devices))
if current_devices != old_devices:
publishing, removing = diff(old_devices, current_devices)
for device in publishing:
peripheral_already_registered = bluetoothCheck(API_URL, device)
if not peripheral_already_registered:
logging.info('PUBLISHING: {}'.format(current_devices[device]))
try:
resource = post_peripheral(API_URL, current_devices[device])
except Exception as ex:
logging.error(f'Unable to publish peripheral {device}: {str(ex)}')
continue
old_devices[device] = current_devices[device]
for device in removing:
logging.info('REMOVING: {}'.format(old_devices[device]))
peripheral_already_registered = bluetoothCheck(API_URL, device)
if peripheral_already_registered:
try:
resource = delete_peripheral(API_URL, device)
except:
logging.exception(f'Cannot delete {device} from Nuvla')
continue
else:
logging.warning(f'Peripheral {device} seems to have been removed already')
del old_devices[device]
e.wait(timeout=scanning_interval)
|
StarcoderdataPython
|
1722290
|
# Generated by Django 3.1.6 on 2021-03-01 11:48
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('website', '0003_mgrssquare'),
]
operations = [
migrations.RemoveField(
model_name='mgrssquare',
name='precision',
),
]
|
StarcoderdataPython
|
3267463
|
import numpy
print(dir(numpy))
|
StarcoderdataPython
|
1686780
|
<reponame>MarkGhebrial/advent-of-code-2021
def binaryToInt (string: str, oneChar = "1", zeroChar = "0"):
out = 0
for i in range(len(string)):
currentDigit = None
if string[len(string) - 1 - i] == oneChar:
currentDigit = 1
elif string[len(string) - 1 - i] == zeroChar:
currentDigit = 0
out += (2**i) * currentDigit
return(out)
if __name__ == "__main__":
print(binaryToInt("1011"))
|
StarcoderdataPython
|
1796228
|
from django.db import models
# from django.template.defaultfilters import slugify
'''
def app_source_path(instance, filename):
return '{0}/app_{1}/{2}/{3}'.format(slugify(instance.app.author), slugify(instance.app.name), instance.version, filename)
def app_destiny_path(instance, filename):
return '{0}/app_{1}/{2}/{3}/{4}'.format(slugify(instance.app.author), slugify(instance.app.name),instance.version, "build", filename)
'''
class App(models.Model):
name = models.CharField(max_length=120, unique=True, blank=False)
description = models.TextField(blank=True)
author = models.CharField(max_length=120)
def save(self, *args, **kwargs):
print('save() is called.')
super(App, self).save(*args, **kwargs)
def __unicode__(self):
return self.name
def __str__(self):
return self.name
class Version(models.Model):
app = models.ForeignKey(App)
version = models.CharField(max_length=20)
def save(self, *args, **kwargs):
print('save() is called.')
super(Version, self).save(*args, **kwargs)
git_url = models.URLField(default="https://github.com/Boquete/electron-online-example.git")
# source_local = models.FileField(upload_to=app_source_path, blank=True)
destiny_git_url = models.URLField(default="https://github.com/Boquete/electron-online-example.git")
# destiny_local = models.FilePathField(path=app_destiny_path, editable=False)
config_editor = models.TextField(blank=True)
config_file = models.URLField(default="https://github.com/Boquete/electron-online-example/blob/master/electron_config.js", blank=True)
def __unicode__(self):
return self.version
def __str__(self):
return self.version
|
StarcoderdataPython
|
1682003
|
import sqlite3
conn = sqlite3.connect('spider.sqlite')
cur = conn.cursor()
cur.execute('SELECT * FROM Twitter')
count = 0
for row in cur :
print row
count = count + 1
print count, 'rows.'
cur.close()
|
StarcoderdataPython
|
3335456
|
<reponame>taku-ito/nlp100.github.io<filename>tools/extract_country_names.py<gh_stars>0
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
import sys
import re
d = json.load(sys.stdin)
lines = d['text'].split('\n')
for line in lines:
# |{{flagicon|...} [[ ... ]] for Japanese Wikipedia
# | {{flagdeco|...} [[ ... ]] for English Wikipedia
m = re.match(r'\|\s*\{\{(flagicon|flagdeco)\|[^}]+\}\}\s*\[\[([^\]]+)\]\]', line)
if m is not None:
link = m.group(2)
fields = link.split('|')
print(fields[0])
|
StarcoderdataPython
|
122041
|
<reponame>geostarling/duct<filename>duct/protocol/sflow/protocol/protocol.py
"""
.. module:: protocol
:synopsis: SFlow protocol
.. moduleauthor:: <NAME> <<EMAIL>>
"""
import xdrlib
from duct.protocol.sflow.protocol import flows, counters
class Sflow(object):
"""SFlow protocol stream decoder
"""
def __init__(self, payload, host):
self.host = host
assert isinstance(payload, bytes)
u = xdrlib.Unpacker(payload)
self.version = u.unpack_uint()
self.samplers = {
1: FlowSample,
2: CounterSample
}
self.samples = []
self.sample_count = 0
if self.version == 5:
self.sflow_v5(u)
def sflow_v5(self, u):
"""SFlow version 5 decoder
"""
self.addrtype = u.unpack_uint()
if self.addrtype == 1:
self.address = u.unpack_fstring(4)
if self.addrtype == 2:
self.address = u.unpack_fstring(16)
self.sub_agent_id = u.unpack_uint()
self.sequence_number = u.unpack_uint()
self.uptime = u.unpack_uint()
self.sample_count = u.unpack_uint()
self.decode_samples(u)
# Sort samples by sequence number
self.samples.sort(key=lambda x: x.sequence)
def decode_samples(self, u):
"""Decode samples received
"""
for _i in range(self.sample_count):
sample_type = u.unpack_uint()
self.samples.append(self.samplers[sample_type](u))
class FlowSample(object):
"""Flow sample object
"""
def __init__(self, u):
self.size = u.unpack_uint()
self.sequence = u.unpack_uint()
self.source_id = u.unpack_uint()
self.sample_rate = u.unpack_uint()
self.sample_pool = u.unpack_uint()
self.dropped_packets = u.unpack_uint()
self.if_inIndex = u.unpack_uint()
self.if_outIndex = u.unpack_uint()
self.record_count = u.unpack_uint()
self.flows = {}
for _i in range(self.record_count):
flow_format = u.unpack_uint()
flow_head = u.unpack_opaque()
flow_u = xdrlib.Unpacker(flow_head)
d = flows.getDecoder(flow_format)
if d:
self.flows[flow_format] = d(flow_u)
class CounterSample(object):
"""Counter sample object
"""
def __init__(self, u):
self.size = u.unpack_uint()
self.sequence = u.unpack_uint()
self.source_id = u.unpack_uint()
self.record_count = u.unpack_uint()
self.counters = {}
for _i in range(self.record_count):
counter_format = u.unpack_uint()
counter = u.unpack_opaque()
d = counters.getDecoder(counter_format)
if d:
self.counters[counter_format] = d(xdrlib.Unpacker(counter))
else:
print("Unknown format:", counter_format)
|
StarcoderdataPython
|
59876
|
from __future__ import print_function, absolute_import
import os.path as osp
import numpy as np
from ..utils.data import Dataset
from ..utils.osutils import mkdir_if_missing
from ..utils.serialization import write_json, read_json
from ..utils.data.dataset import _pluck
class SynergyReID(Dataset):
md5 = '05050b5d9388563021315a81b531db7d'
def __init__(self, root, split_id=0, num_val=100, download=True):
super(SynergyReID, self).__init__(root, split_id=split_id)
if download:
self.download()
if not self._check_integrity():
raise RuntimeError("Dataset not found or corrupted. " +
"You can use download=True to download it.")
self.load(num_val)
def download(self):
if self._check_integrity():
print("Files already downloaded and verified")
return
import hashlib
import shutil
from glob import glob
from zipfile import ZipFile
raw_dir = osp.join(self.root, 'raw')
mkdir_if_missing(raw_dir)
# Open the raw zip file
fpath = osp.join(raw_dir, 'synergyreid_data.zip')
if osp.isfile(fpath) and \
hashlib.md5(open(fpath, 'rb').read()).hexdigest() == self.md5:
print("Using downloaded file: " + fpath)
else:
raise RuntimeError("Please move data to {} "
.format(fpath))
# Extract the file
exdir = osp.join(raw_dir, 'data_reid')
if not osp.isdir(exdir):
print("Extracting zip file")
with ZipFile(fpath) as z:
z.extractall(path=raw_dir)
# Format
images_dir = osp.join(self.root, 'images')
mkdir_if_missing(images_dir)
# 487 identities (+1 for background) with 2 camera views each
# Here we use the convention that camera 0 is for query and
# camera 1 is for gallery
identities = [[[] for _ in range(2)] for _ in range(487)]
def register(subdir):
fpaths = sorted(glob(osp.join(exdir, subdir, '*.jpeg')))
pids = set()
for fpath in fpaths:
fname = osp.basename(fpath)
pid = int(fname.split('_')[0])
cam = 1 if 'gallery' in subdir else 0
pids.add(pid)
fname = ('{:08d}_{:02d}_{:04d}.jpg'
.format(pid, cam, len(identities[pid][cam])))
identities[pid][cam].append(fname)
shutil.copy(fpath, osp.join(images_dir, fname))
return pids
trainval_pids = register('reid_training')
query_val_pids = register('reid_val/query')
gallery_val_pids = register('reid_val/gallery')
assert query_val_pids <= gallery_val_pids
assert trainval_pids.isdisjoint(query_val_pids)
identities_test = [[[] for _ in range(2)] for _ in range(9172)]
def register_test(subdir, n=0):
fpaths = sorted(glob(osp.join(exdir, subdir, '*.jpeg')))
pids = set()
for pindx, fpath in enumerate(fpaths):
fname = osp.basename(fpath)
pid = int(fname.split('.')[0])
cam = 1 if 'gallery' in subdir else 0
pids.add(pid)
fname = ('{:08d}_{:02d}_{:04d}.jpg'
.format(pid, cam, 0))
identities_test[pindx+n][cam].append(fname)
shutil.copy(fpath, osp.join(images_dir, fname))
return pids
query_test_pids = register_test('reid_test/query')
gallery_test_pids = register_test('reid_test/gallery',
n=len(query_test_pids))
# Save the training / val / test splits
splits = [{
'trainval': sorted(list(trainval_pids)),
'query_val': sorted(list(query_val_pids)),
'gallery_val': sorted(list(gallery_val_pids)),
'query_test': sorted(list(query_test_pids)),
'gallery_test': sorted(list(gallery_test_pids))}]
write_json(splits, osp.join(self.root, 'splits.json'))
# Save meta information into a json file
meta = {'name': 'SynergyReID', 'shot': 'multiple', 'num_cameras': 2,
'identities': identities, 'identities_test': identities_test}
write_json(meta, osp.join(self.root, 'meta.json'))
def load(self, verbose=True):
splits = read_json(osp.join(self.root, 'splits.json'))
if self.split_id >= len(splits):
raise ValueError("split_id exceeds total splits {}"
.format(len(splits)))
self.split = splits[self.split_id]
trainval_pids = np.concatenate((np.asarray(self.split['trainval']),
np.asarray(self.split['query_val'])))
def _pluck_val(identities, indices, relabel=False, cam=0):
ret = []
for index, pid in enumerate(indices):
pid_images = identities[pid]
for camid, cam_images in enumerate(pid_images):
if camid == cam:
for fname in cam_images:
name = osp.splitext(fname)[0]
x, y, _ = map(int, name.split('_'))
assert pid == x and camid == y
if relabel:
ret.append((fname, index, camid))
else:
ret.append((fname, pid, camid))
return ret
def _pluck_test(identities, indices, n=0):
ret = []
for index, pid in enumerate(indices):
pid_images = identities[index+n]
for camid, cam_images in enumerate(pid_images):
for fname in cam_images:
ret.append((fname, pid, camid))
return ret
self.meta = read_json(osp.join(self.root, 'meta.json'))
identities = self.meta['identities']
identities_test = self.meta['identities_test']
self.train = _pluck(identities, self.split['trainval'], relabel=True)
self.trainval = _pluck(identities, trainval_pids, relabel=True)
self.query_val = _pluck_val(identities, self.split['query_val'], cam=0)
self.gallery_val = _pluck_val(identities, self.split['gallery_val'], cam=1)
self.query_test = _pluck_test(identities_test, self.split['query_test'])
self.gallery_test = _pluck_test(identities_test, self.split['gallery_test'], n=len(self.split['query_test']))
self.num_train_ids = len(self.split['trainval'])
self.num_val_ids = len(self.split['query_val'])
self.num_trainval_ids = len(trainval_pids)
if verbose:
print(self.__class__.__name__, "dataset loaded")
print(" subset | # ids | # images")
print(" ---------------------------")
print(" train | {:5d} | {:8d}"
.format(self.num_train_ids, len(self.train)))
print(" query val | {:5d} | {:8d}"
.format(len(self.split['query_val']), len(self.query_val)))
print(" gallery val | {:5d} | {:8d}"
.format(len(self.split['gallery_val']), len(self.gallery_val)))
print(" trainval | {:5d} | {:8d}"
.format(self.num_trainval_ids, len(self.trainval)))
print(" ---------------------------")
print(" query test | {:5d} | {:8d}"
.format(len(self.split['query_test']), len(self.query_test)))
print(" gallery test | {:5d} | {:8d}"
.format(len(self.split['gallery_test']), len(self.gallery_test)))
|
StarcoderdataPython
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.